file_path
stringlengths 20
207
| content
stringlengths 5
3.85M
| size
int64 5
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
omniverse-code/kit/exts/omni.graph.action/omni/graph/action/tests/test_action_graph_nodes_01.py
|
"""Action Graph Node Tests, Part 1"""
import time
import carb
import omni.graph.core as og
import omni.graph.core.tests as ogts
import omni.kit.app
import omni.kit.test
import omni.usd
from omni.graph.core import ThreadsafetyTestUtils
from pxr import Gf, OmniGraphSchemaTools, Sdf, Vt
# ======================================================================
class TestActionGraphNodes(ogts.OmniGraphTestCase):
"""Tests action graph node functionality"""
TEST_GRAPH_PATH = "/World/TestGraph"
keys = og.Controller.Keys
E = og.ExecutionAttributeState.ENABLED
D = og.ExecutionAttributeState.DISABLED
async def setUp(self):
"""Set up test environment, to be torn down when done"""
await super().setUp()
# -----------------------------------------------------------------------
async def test_addprimrelationship_node(self):
"""Test AddPrimRelationship node"""
# Check that we can add a relationship to a prim and get that relationship.
#
# +---------+ +---------------------+
# | ONTICK +-->| AddPrimRelationship +
# +---------+ +---------------------+
#
usd_context = omni.usd.get_context()
stage = usd_context.get_stage()
og.Controller.create_graph({"graph_path": self.TEST_GRAPH_PATH, "evaluator_name": "execution"})
og.Controller.edit(
self.TEST_GRAPH_PATH,
{
self.keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("AddRel", "omni.graph.action.AddPrimRelationship"),
],
self.keys.CREATE_PRIMS: [
("/Test", {}),
("/Target", {}),
],
self.keys.CONNECT: [("OnTick.outputs:tick", "AddRel.inputs:execIn")],
self.keys.SET_VALUES: [
("OnTick.inputs:onlyPlayback", False),
("AddRel.inputs:path", "/Test"),
("AddRel.inputs:name", "rel"),
("AddRel.inputs:target", "/Target"),
],
},
)
prim = stage.GetPrimAtPath("/Test")
await og.Controller.evaluate()
rel = prim.GetRelationship("rel")
targets = rel.GetTargets()
self.assertEqual(len(targets), 1)
self.assertTrue(str(targets[0]) == "/Target")
# ----------------------------------------------------------------------
# The Branch node has a built-in test construct in its .ogn file located at ../../nodes/OgnBranch.ogn
# (relative to the source location of the currently-opened testing script) AND is used in other testing
# methods, so we skip adding extra node-specific tests for it here.
# ----------------------------------------------------------------------
@ThreadsafetyTestUtils.make_threading_test
def test_counter_node(self, test_instance_id: int = 0):
"""Test Counter node"""
# Instance a test graph setup. Note that we append the graph path with the test_instance_id
# so that the graph can be uniquely identified in the thread-safety test!
graph_path = self.TEST_GRAPH_PATH + str(test_instance_id)
og.Controller.create_graph({"graph_path": graph_path, "evaluator_name": "execution"})
(_, (ontick_node, counter_node), _, _,) = og.Controller.edit(
graph_path,
{
self.keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("Counter", "omni.graph.action.Counter"),
],
self.keys.SET_VALUES: [("OnTick.inputs:onlyPlayback", False)],
self.keys.CONNECT: ("OnTick.outputs:tick", "Counter.inputs:execIn"),
},
)
# Obtain necessary attributes.
in_exec_attr = counter_node.get_attribute("inputs:execIn")
in_reset_attr = counter_node.get_attribute("inputs:reset")
state_cnt_attr = counter_node.get_attribute("state:count")
out_exec_attr = counter_node.get_attribute("outputs:execOut")
out_cnt_attr = counter_node.get_attribute("outputs:count")
out_tick_attr = ontick_node.get_attribute("outputs:tick")
# Check that the counter node gets correctly incremented when executing.
self.assertEqual(state_cnt_attr.get(), 0)
self.assertEqual(out_cnt_attr.get(), 0)
self.assertEqual(out_exec_attr.get(), self.D)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(state_cnt_attr.get(), 1)
self.assertEqual(out_cnt_attr.get(), 1)
self.assertEqual(out_exec_attr.get(), self.E)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(state_cnt_attr.get(), 2)
self.assertEqual(out_cnt_attr.get(), 2)
self.assertEqual(out_exec_attr.get(), self.E)
# Check that the counter node doesn't increment when not executing.
og.Controller.disconnect(
out_tick_attr,
in_exec_attr,
)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(state_cnt_attr.get(), 2)
self.assertEqual(out_cnt_attr.get(), 2)
self.assertEqual(out_exec_attr.get(), self.E)
# Check that the reset flag for the Counter node instance works correctly when
# inputs:execIn is set to 0 (i.e. when the Counter node is NOT supposed to be
# executing).
og.Controller.connect(out_tick_attr, in_reset_attr)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(state_cnt_attr.get(), 0)
self.assertEqual(out_cnt_attr.get(), 0)
self.assertEqual(out_exec_attr.get(), self.E)
# ----------------------------------------------------------------------
@ThreadsafetyTestUtils.make_threading_test
def test_delay_node(self, test_instance_id: int = 0):
"""Test Delay node"""
# Instance a test graph setup. Note that we append the graph path with the test_instance_id
# so that the graph can be uniquely identified in the thread-safety test!
graph_path = self.TEST_GRAPH_PATH + str(test_instance_id)
og.Controller.create_graph({"graph_path": graph_path, "evaluator_name": "execution"})
(_, (on_impulse_node, _, counter_node), _, _,) = og.Controller.edit(
graph_path,
{
self.keys.CREATE_NODES: [
("OnImpulse", "omni.graph.action.OnImpulseEvent"),
("Delay", "omni.graph.action.Delay"),
("Counter", "omni.graph.action.Counter"),
],
self.keys.CONNECT: [
("OnImpulse.outputs:execOut", "Delay.inputs:execIn"),
("Delay.outputs:finished", "Counter.inputs:execIn"),
],
self.keys.SET_VALUES: [
("OnImpulse.inputs:onlyPlayback", False),
("Delay.inputs:duration", 0.01),
],
},
)
# Obtain necessary attributes.
out_cnt_attr = counter_node.get_attribute("outputs:count")
state_enable_impulse_attr = on_impulse_node.get_attribute("state:enableImpulse")
# Trigger the graph(s) once.
state_enable_impulse_attr.set(True)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
# Downstream execution is delayed, so the counter node won't be incremented.
self.assertEqual(out_cnt_attr.get(), 0)
# Wait to ensure that the delay finishes before checking that the counter node
# has indeed been incremented.
time.sleep(0.02)
yield ThreadsafetyTestUtils.EVALUATION_WAIT_FRAME # Yielding to compute by waiting for the next app frame.
self.assertEqual(out_cnt_attr.get(), 1)
# ----------------------------------------------------------------------
@ThreadsafetyTestUtils.make_threading_test
def test_flipflop_node(self, test_instance_id: int = 0):
"""Test FlipFlop node"""
# Instance a test graph setup. Note that we append the graph path with the test_instance_id
# so that the graph can be uniquely identified in the thread-safety test!
graph_path = self.TEST_GRAPH_PATH + str(test_instance_id)
og.Controller.create_graph({"graph_path": graph_path, "evaluator_name": "execution"})
(graph, (_, flip_flop_node), _, _,) = og.Controller.edit(
graph_path,
{
self.keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("FlipFlop", "omni.graph.action.FlipFlop"),
],
self.keys.SET_VALUES: [("OnTick.inputs:onlyPlayback", False)],
self.keys.CONNECT: (
"OnTick.outputs:tick",
"FlipFlop.inputs:execIn",
),
},
)
# Obtain necessary attributes.
out_a_attr = flip_flop_node.get_attribute("outputs:a")
out_b_attr = flip_flop_node.get_attribute("outputs:b")
out_isa_attr = flip_flop_node.get_attribute("outputs:isA")
# First eval, 'a'.
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_a_attr.get(), self.E)
self.assertEqual(out_b_attr.get(), self.D)
self.assertTrue(out_isa_attr.get())
# Second eval 'b'.
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_a_attr.get(), self.D)
self.assertEqual(out_b_attr.get(), self.E)
self.assertFalse(out_isa_attr.get())
# Third eval 'a'.
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_a_attr.get(), self.E)
self.assertEqual(out_b_attr.get(), self.D)
self.assertTrue(out_isa_attr.get())
# Test that non-usd-backed FlipFlop nodes correctly
# set their execution-type attributes.
# Make sure that the node paths are prefaced with the path to the
# graph that they should reside in (so that each node creation command
# can be processed to produce unique nodes for each test instance)!
_, on_tick_no_usd = og.cmds.CreateNode(
graph=graph,
node_path=f"{graph_path}/OnTickNoUSD",
node_type="omni.graph.action.OnTick",
create_usd=False,
)
_, flip_flop_no_usd = og.cmds.CreateNode(
graph=graph,
node_path=f"{graph_path}/FlipFlopNoUSD",
node_type="omni.graph.action.FlipFlop",
create_usd=False,
)
# Obtain necessary attributes.
out_a_attr = flip_flop_no_usd.get_attribute("outputs:a")
out_b_attr = flip_flop_no_usd.get_attribute("outputs:b")
out_isa_attr = flip_flop_no_usd.get_attribute("outputs:isA")
on_tick_no_usd.get_attribute("inputs:onlyPlayback").set(False)
# Make sure that the node attribute paths are prefaced with the graph
# path that they reside in (so that each instanced node attribute can
# be uniquely processed)!
og.cmds.ConnectAttrs(
src_attr=f"{graph_path}/OnTickNoUSD.outputs:tick",
dest_attr=f"{graph_path}/FlipFlopNoUSD.inputs:execIn",
modify_usd=False,
)
# First eval, 'a'.
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_a_attr.get(), self.E)
self.assertEqual(out_b_attr.get(), self.D)
self.assertTrue(out_isa_attr.get())
# Second eval 'b'.
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_a_attr.get(), self.D)
self.assertEqual(out_b_attr.get(), self.E)
self.assertFalse(out_isa_attr.get())
# Third eval 'a'.
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_a_attr.get(), self.E)
self.assertEqual(out_b_attr.get(), self.D)
self.assertTrue(out_isa_attr.get())
# ----------------------------------------------------------------------
# The ForEach node has a built-in test construct in its .ogn file located at ../../nodes/OgnForEach.ogn
# (relative to the source location of the currently-opened testing script), so we skip adding extra
# node-specific tests for it here.
# ----------------------------------------------------------------------
@ThreadsafetyTestUtils.make_threading_test
def test_forloop_node(self, test_instance_id: int = 0):
"""Test ForLoop node"""
context = omni.usd.get_context()
stage = context.get_stage()
# Since we want to use the same prim across all graph instances in the
# thread-safety test, we add it to the threading cache like so:
prim = ThreadsafetyTestUtils.add_to_threading_cache(test_instance_id, stage.DefinePrim("/World/TestPrim"))
ThreadsafetyTestUtils.add_to_threading_cache(
test_instance_id,
prim.CreateAttribute("val1", Sdf.ValueTypeNames.Int2, False).Set(Gf.Vec2i(1, 1)),
)
# Instance a test graph setup. Note that we append the graph path with the test_instance_id
# so that the graph can be uniquely identified in the thread-safety test!
graph_path = self.TEST_GRAPH_PATH + str(test_instance_id)
og.Controller.create_graph({"graph_path": graph_path, "evaluator_name": "execution"})
(_, (_, _, _, _, _, _, write_node, finish_counter), _, _) = og.Controller.edit(
graph_path,
{
self.keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("Const", "omni.graph.nodes.ConstantInt2"),
("StopNum", "omni.graph.nodes.ConstantInt"),
("Add", "omni.graph.nodes.Add"),
("Branch", "omni.graph.action.Branch"),
("For", "omni.graph.action.ForLoop"),
("Write1", "omni.graph.nodes.WritePrimAttribute"),
("FinishCounter", "omni.graph.action.Counter"),
],
self.keys.SET_VALUES: [
("OnTick.inputs:onlyPlayback", False),
("Const.inputs:value", [1, 2]),
("StopNum.inputs:value", 3),
("Write1.inputs:name", "val1"),
("Write1.inputs:primPath", "/World/TestPrim"),
("Write1.inputs:usePath", True),
("Branch.inputs:condition", True),
],
self.keys.CONNECT: [
("OnTick.outputs:tick", "For.inputs:execIn"),
("StopNum.inputs:value", "For.inputs:stop"),
("For.outputs:loopBody", "Branch.inputs:execIn"),
("For.outputs:finished", "FinishCounter.inputs:execIn"),
("Branch.outputs:execTrue", "Write1.inputs:execIn"),
("For.outputs:value", "Add.inputs:a"),
("Const.inputs:value", "Add.inputs:b"),
("Add.outputs:sum", "Write1.inputs:value"),
],
},
)
# Evaluate the graph(s).
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertListEqual([3, 4], list(stage.GetAttributeAtPath("/World/TestPrim.val1").Get()))
self.assertEqual(3, write_node.get_compute_count())
self.assertEqual(1, finish_counter.get_compute_count())
# This tests make sure that a for loop writing arrays to diferent prims
# work as expected (OM-84129)
async def test_foreach_node_write_multiple_prim(self, test_instance_id: int = 0):
"""Test the foreach node writing arrays to output prims"""
og.Controller.create_graph({"graph_path": self.TEST_GRAPH_PATH, "evaluator_name": "execution"})
(_, _, (prim1, prim2), _) = og.Controller.edit(
self.TEST_GRAPH_PATH,
{
self.keys.CREATE_PRIMS: [
("/World/Prim1", {"graph_output": ("Int[]", [])}),
("/World/Prim2", {"graph_output": ("Int[]", [])}),
],
self.keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("For", "omni.graph.action.ForEach"),
("Write", "omni.graph.nodes.WritePrimAttribute"),
("MakeArray", "omni.graph.nodes.ConstructArray"),
],
self.keys.SET_VALUES: [
("For.inputs:arrayIn", {"type": "token[]", "value": ["/World/Prim1", "/World/Prim2"]}),
("OnTick.inputs:onlyPlayback", False),
("Write.inputs:name", "graph_output"),
("Write.inputs:usePath", True),
("Write.inputs:usdWriteBack", True),
("MakeArray.inputs:arraySize", 1),
],
self.keys.CONNECT: [
("OnTick.outputs:tick", "For.inputs:execIn"),
("For.outputs:loopBody", "Write.inputs:execIn"),
("For.outputs:element", "Write.inputs:primPath"),
("For.outputs:arrayIndex", "MakeArray.inputs:input0"),
("MakeArray.outputs:array", "Write.inputs:value"),
],
},
)
await omni.kit.app.get_app().next_update_async()
prim1_out = prim1.GetAttribute("graph_output")
prim2_out = prim2.GetAttribute("graph_output")
self.assertEqual(prim1_out.Get(), Vt.IntArray([0]))
self.assertEqual(prim2_out.Get(), Vt.IntArray([1]))
# ----------------------------------------------------------------------
# The Gate node has a built-in test construct in its .ogn file located at ../../nodes/OgnGate.ogn
# (relative to the source location of the currently-opened testing script) AND is used in other
# testing methods, so we skip adding extra node-specific tests for it here.
# ----------------------------------------------------------------------
@ThreadsafetyTestUtils.make_threading_test
def test_multigate_node(self, test_instance_id: int = 0):
"""Test Multigate node"""
# Instance a test graph setup. Note that we append the graph path with the test_instance_id
# so that the graph can be uniquely identified in the thread-safety test!
graph_path = self.TEST_GRAPH_PATH + str(test_instance_id)
(_, (_, multigate_node), _, _) = og.Controller.edit(
{"graph_path": graph_path, "evaluator_name": "execution"},
{
self.keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("Multigate", "omni.graph.action.Multigate"),
],
self.keys.SET_VALUES: [("OnTick.inputs:onlyPlayback", False)],
self.keys.CONNECT: [
("OnTick.outputs:tick", "Multigate.inputs:execIn"),
],
},
)
# Add 5 extra outputs to the Multigate node.
for i in range(1, 6):
og.Controller.create_attribute(
multigate_node,
f"outputs:output{i}",
og.Type(og.BaseDataType.UINT, 1, 0, og.AttributeRole.EXECUTION),
og.AttributePortType.ATTRIBUTE_PORT_TYPE_OUTPUT,
)
# Obtain necessary attributes.
out_attr_0 = multigate_node.get_attribute("outputs:output0")
out_attr_1 = multigate_node.get_attribute("outputs:output1")
out_attr_2 = multigate_node.get_attribute("outputs:output2")
out_attr_3 = multigate_node.get_attribute("outputs:output3")
out_attr_4 = multigate_node.get_attribute("outputs:output4")
out_attr_5 = multigate_node.get_attribute("outputs:output5")
# Check that the Multigate node correctly cycles through each of its outputs.
# Note that we trigger an execution through the Multigate node via the OnTick node,
# whose onlyPlayback input we've set to False in order to trigger an execution each
# time we evaluate the graph(s).
for i in range(0, 6):
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(multigate_node.get_attribute(f"outputs:output{(i + 0) % 6}").get(), self.E)
self.assertEqual(multigate_node.get_attribute(f"outputs:output{(i + 1) % 6}").get(), self.D)
self.assertEqual(multigate_node.get_attribute(f"outputs:output{(i + 2) % 6}").get(), self.D)
self.assertEqual(multigate_node.get_attribute(f"outputs:output{(i + 3) % 6}").get(), self.D)
self.assertEqual(multigate_node.get_attribute(f"outputs:output{(i + 4) % 6}").get(), self.D)
self.assertEqual(multigate_node.get_attribute(f"outputs:output{(i + 5) % 6}").get(), self.D)
# Next try removing some output attributes during evaluation and test if the
# Multigate node correctly cycles through.
for _ in range(4):
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_attr_0.get(), self.D)
self.assertEqual(out_attr_1.get(), self.D)
self.assertEqual(out_attr_2.get(), self.D)
self.assertEqual(out_attr_3.get(), self.E)
self.assertEqual(out_attr_4.get(), self.D)
self.assertEqual(out_attr_5.get(), self.D)
multigate_node.remove_attribute("outputs:output4")
# The Multigate node cycles back to 0 instead of going to 5 since it thinks that it's
# reached the end of its outputs list (i.e. it expects to jump from pin 3 to 4, but b/c
# there is no such pin it goes back to 0 rather than 5).
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_attr_0.get(), self.E)
self.assertEqual(out_attr_1.get(), self.D)
self.assertEqual(out_attr_2.get(), self.D)
self.assertEqual(out_attr_3.get(), self.D)
self.assertEqual(out_attr_5.get(), self.D)
# Further showing that executing 4 times brings us back to pin 0 rather than pin 5.
for _ in range(4):
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_attr_0.get(), self.E)
self.assertEqual(out_attr_1.get(), self.D)
self.assertEqual(out_attr_2.get(), self.D)
self.assertEqual(out_attr_3.get(), self.D)
self.assertEqual(out_attr_5.get(), self.D)
# Execute the graph(s) once, then remove the currently-enabled output pin. The Multigate node will think
# that it's reached the end of the outputs list, and cycle back. Because we removed pin 1, it'll go
# back to pin 0 and never cycle through the other outputs since it cannot make the jump from
# pin 0 to 2 (as mentioned previously).
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
multigate_node.remove_attribute("outputs:output1")
for _ in range(3):
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_attr_0.get(), self.E)
self.assertEqual(out_attr_2.get(), self.D)
self.assertEqual(out_attr_3.get(), self.D)
self.assertEqual(out_attr_5.get(), self.D)
# ----------------------------------------------------------------------
@ThreadsafetyTestUtils.make_threading_test
def test_multisequence_node(self, test_instance_id: int = 0):
"""Test Multisequence node"""
# Instance a test graph setup. Note that we append the graph path with the test_instance_id
# so that the graph can be uniquely identified in the thread-safety test!
graph_path = self.TEST_GRAPH_PATH + str(test_instance_id)
og.Controller.create_graph({"graph_path": graph_path, "evaluator_name": "execution"})
(
_,
(on_tick_node, multisequence_node, counter0_node, counter1_node, counter2_node),
_,
_,
) = og.Controller.edit(
graph_path,
{
self.keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("Multisequence", "omni.graph.action.Multisequence"),
("Counter0", "omni.graph.action.Counter"),
("Counter1", "omni.graph.action.Counter"),
("Counter2", "omni.graph.action.Counter"),
],
self.keys.SET_VALUES: [("OnTick.inputs:onlyPlayback", False)],
self.keys.CONNECT: [
("OnTick.outputs:tick", "Multisequence.inputs:execIn"),
],
},
)
# Add 3 extra outputs to the Multisequence node.
for j in range(1, 4):
og.Controller.create_attribute(
multisequence_node,
f"outputs:output{j}",
og.Type(og.BaseDataType.UINT, 1, 0, og.AttributeRole.EXECUTION),
og.AttributePortType.ATTRIBUTE_PORT_TYPE_OUTPUT,
)
# Obtain necessary attributes.
out_attr_0 = multisequence_node.get_attribute("outputs:output0")
out_attr_1 = multisequence_node.get_attribute("outputs:output1")
out_attr_2 = multisequence_node.get_attribute("outputs:output2")
out_attr_3 = multisequence_node.get_attribute("outputs:output3")
in_exec_attr_0 = counter0_node.get_attribute("inputs:execIn")
in_exec_attr_1 = counter1_node.get_attribute("inputs:execIn")
in_exec_attr_2 = counter2_node.get_attribute("inputs:execIn")
out_cnt_attr_0 = counter0_node.get_attribute("outputs:count")
out_cnt_attr_1 = counter1_node.get_attribute("outputs:count")
out_cnt_attr_2 = counter2_node.get_attribute("outputs:count")
in_onlyplayback_attr = on_tick_node.get_attribute("inputs:onlyPlayback")
# Connect Multisequence node output attributes to the counter nodes.
og.Controller.connect(out_attr_0, in_exec_attr_0)
og.Controller.connect(out_attr_2, in_exec_attr_1)
og.Controller.connect(out_attr_1, in_exec_attr_2)
# Check that the Multisequence node correctly executes through its outputs when input
# execution is enabled via the OnTick node. This is done by checking whether
# each counter has been incremented by 1, and if the last output pin on the
# Multisequence node remains enabled (regardless of the fact that it's not connected
# downstream).
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_cnt_attr_0.get(), 1)
self.assertEqual(out_cnt_attr_1.get(), 1)
self.assertEqual(out_cnt_attr_2.get(), 1)
self.assertEqual(out_attr_0.get(), self.D)
self.assertEqual(out_attr_1.get(), self.D)
self.assertEqual(out_attr_2.get(), self.D)
self.assertEqual(out_attr_3.get(), self.E)
# Connect the Counter2 node to another Multisequence output pin.
og.Controller.connect(out_attr_3, in_exec_attr_2)
# Once again evaluate the graph(s). In this situation the Counter2 node should be incremented twice
# (since it's connected to 2 separate Multisequence output pins). Also Multisequence output pins
# 1 AND 3 should both be enabled by the end of the execution; this is because pin 3 would
# typically be the last output that gets enabled, but because pin 3 shares a downstream
# node with pin 1 (that node being Counter2), both outputs need to be enabled by the end.
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_cnt_attr_0.get(), 2)
self.assertEqual(out_cnt_attr_1.get(), 2)
self.assertEqual(out_cnt_attr_2.get(), 3)
self.assertEqual(out_attr_0.get(), self.D)
self.assertEqual(out_attr_1.get(), self.E)
self.assertEqual(out_attr_2.get(), self.D)
self.assertEqual(out_attr_3.get(), self.E)
# Set the OnTick node to only trigger downstream execution when playback is enabled; check
# that in this situation the Multisequence node correctly skips executing through its outputs
# (i.e. that the Counter nodes don't get incremented). The state of the Multisequence's output
# pins should not have changed since the last graph evaluation.
in_onlyplayback_attr.set(True)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_cnt_attr_0.get(), 2)
self.assertEqual(out_cnt_attr_1.get(), 2)
self.assertEqual(out_cnt_attr_2.get(), 3)
self.assertEqual(out_attr_0.get(), self.D)
self.assertEqual(out_attr_1.get(), self.E)
self.assertEqual(out_attr_2.get(), self.D)
self.assertEqual(out_attr_3.get(), self.E)
# Try removing an output attribute from the Multisequence node, check that all other
# outputs that come before in the list get triggered. In this example we remove outputs2,
# which means that outputs3 won't get triggered at all (as evidenced by the fact that the
# Counter2 node only gets incremented once by output1).
og.Controller.disconnect(out_attr_2, in_exec_attr_1)
multisequence_node.remove_attribute("outputs:output2")
in_onlyplayback_attr.set(False)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_cnt_attr_0.get(), 3)
self.assertEqual(out_cnt_attr_1.get(), 2)
self.assertEqual(out_cnt_attr_2.get(), 4)
self.assertEqual(out_attr_0.get(), self.D)
self.assertEqual(out_attr_1.get(), self.E)
self.assertEqual(out_attr_3.get(), self.D)
# ----------------------------------------------------------------------
@ThreadsafetyTestUtils.make_threading_test
def test_once_node(self, test_instance_id: int = 0):
"""Test Once node"""
# Instance a test graph setup. Note that we append the graph path with the test_instance_id
# so that the graph can be uniquely identified in the thread-safety test!
graph_path = self.TEST_GRAPH_PATH + str(test_instance_id)
og.Controller.create_graph({"graph_path": graph_path, "evaluator_name": "execution"})
(_, (ontick_node, once_node), _, _,) = og.Controller.edit(
graph_path,
{
self.keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("Once", "omni.graph.action.Once"),
],
self.keys.SET_VALUES: [("OnTick.inputs:onlyPlayback", False)],
self.keys.CONNECT: ("OnTick.outputs:tick", "Once.inputs:execIn"),
},
)
# Obtain necessary attributes.
in_exec_attr = once_node.get_attribute("inputs:execIn")
in_reset_attr = once_node.get_attribute("inputs:reset")
out_once_attr = once_node.get_attribute("outputs:once")
out_after_attr = once_node.get_attribute("outputs:after")
out_tick_attr = ontick_node.get_attribute("outputs:tick")
# Check that the Once node controls flow of execution by passing flow
# differently the first time it's executed compared to all subsequent
# executions.
self.assertEqual(out_once_attr.get(), self.D)
self.assertEqual(out_after_attr.get(), self.D)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_once_attr.get(), self.E)
self.assertEqual(out_after_attr.get(), self.D)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_once_attr.get(), self.D)
self.assertEqual(out_after_attr.get(), self.E)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_once_attr.get(), self.D)
self.assertEqual(out_after_attr.get(), self.E)
# Check that the reset flag works correctly when inputs:execIn is set to 0 (i.e. when
# the Once node is NOT supposed to be executing).
og.Controller.disconnect(out_tick_attr, in_exec_attr)
og.Controller.connect(out_tick_attr, in_reset_attr)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_once_attr.get(), self.D)
self.assertEqual(out_after_attr.get(), self.D)
og.Controller.disconnect(out_tick_attr, in_reset_attr)
og.Controller.connect(out_tick_attr, in_exec_attr)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_once_attr.get(), self.E)
self.assertEqual(out_after_attr.get(), self.D)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_once_attr.get(), self.D)
self.assertEqual(out_after_attr.get(), self.E)
# Check that when both the execIn and reset input attributes get triggered, the latter
# overrides the former and execution flow does not pass through outputs:after.
# FIXME: Something about the 2nd connection here is messing up the data model such
# that inputs:reset is being read as 0 inside the node
og.Controller.connect(out_tick_attr, in_reset_attr)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
# self.assertEqual(out_once_attr.get(), self.D)
# self.assertEqual(out_after_attr.get(), self.D)
# ----------------------------------------------------------------------
# NOTE: Even though the OnClosing node is threadsafe (its compute method is very simple),
# we don't adapt the below test to check for thread-safety conditions because it relies
# on other nodes (omni.graph.action.SendCustomEvent and omni.graph.nodes.GraphTarget)
# which are NOT threadsafe.
async def test_onclosing_node(self):
"""Test OnClosing node"""
og.Controller.edit({"graph_path": self.TEST_GRAPH_PATH, "evaluator_name": "execution"})
# Testing OnClosing is tricky because OG is being destroyed when it happens -
# so test by sending a custom event when the network is triggered
# and then checking if we got that event.
def registered_event_name(event_name):
"""Returns the internal name used for the given custom event name"""
name = "omni.graph.action." + event_name
return carb.events.type_from_string(name)
got_event = [0]
def on_event(_):
got_event[0] = got_event[0] + 1
reg_event_name = registered_event_name("foo")
message_bus = omni.kit.app.get_app().get_message_bus_event_stream()
sub = message_bus.create_subscription_to_push_by_type(reg_event_name, on_event)
self.assertIsNotNone(sub)
async def set_up_graph():
controller = og.Controller()
keys = og.Controller.Keys
controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnClosing", "omni.graph.action.OnClosing"),
("Send", "omni.graph.action.SendCustomEvent"),
("GraphTarget", "omni.graph.nodes.GraphTarget"),
],
keys.CONNECT: [
("OnClosing.outputs:execOut", "Send.inputs:execIn"),
("GraphTarget.outputs:primPath", "Send.inputs:path"),
],
keys.SET_VALUES: [("Send.inputs:eventName", "foo")],
},
)
await set_up_graph()
# Evaluate once so that the standalone graph is in steady state.
await omni.kit.app.get_app().next_update_async()
self.assertEqual(got_event[0], 0)
# Close the stage.
usd_context = omni.usd.get_context()
(result, _) = await usd_context.close_stage_async()
self.assertTrue(result)
# Check our handler was called.
self.assertEqual(got_event[0], 1)
# Reset the counter.
got_event[0] = 0
# Now check that the same works with instanced graphs.
await usd_context.new_stage_async()
await set_up_graph()
og.cmds.SetEvaluationMode(
graph=og.get_graph_by_path(self.TEST_GRAPH_PATH),
new_evaluation_mode=og.GraphEvaluationMode.GRAPH_EVALUATION_MODE_INSTANCED,
)
stage = usd_context.get_stage()
prims = [stage.DefinePrim(f"/prim_{i}") for i in range(0, 100)]
for prim in prims:
OmniGraphSchemaTools.applyOmniGraphAPI(stage, prim.GetPath(), self.TEST_GRAPH_PATH)
# Wait an update for the graphs to get set up.
await omni.kit.app.get_app().next_update_async()
# Close the stage.
(result, _) = await usd_context.close_stage_async()
self.assertTrue(result)
# Check that our handler was called.
self.assertEqual(got_event[0], len(prims))
# ----------------------------------------------------------------------
async def test_oncustomevent_and_sendcustomevent_nodes(self):
"""Test OnCustomEvent and SendCustomEvent nodes"""
controller = og.Controller()
controller.create_graph({"graph_path": self.TEST_GRAPH_PATH, "evaluator_name": "execution"})
(_, (_, _, event1_node, counter1_node, event2_node, counter2_node), _, _,) = controller.edit(
self.TEST_GRAPH_PATH,
{
self.keys.CREATE_NODES: [
("OnImpulse", "omni.graph.action.OnImpulseEvent"),
("Send", "omni.graph.action.SendCustomEvent"),
("OnCustomEvent1", "omni.graph.action.OnCustomEvent"),
("Counter1", "omni.graph.action.Counter"),
("OnCustomEvent2", "omni.graph.action.OnCustomEvent"),
("Counter2", "omni.graph.action.Counter"),
],
self.keys.CONNECT: [
("OnImpulse.outputs:execOut", "Send.inputs:execIn"),
("OnCustomEvent1.outputs:execOut", "Counter1.inputs:execIn"),
("OnCustomEvent2.outputs:execOut", "Counter2.inputs:execIn"),
],
self.keys.SET_VALUES: [
("OnCustomEvent1.inputs:onlyPlayback", False),
("OnCustomEvent2.inputs:onlyPlayback", False),
("OnImpulse.inputs:onlyPlayback", False),
("Send.inputs:eventName", "foo"),
("Send.inputs:path", "Test Path"),
("OnCustomEvent1.inputs:eventName", "foo"),
("OnCustomEvent2.inputs:eventName", "foo"),
],
},
)
counter1_controller = og.Controller(og.Controller.attribute("outputs:count", counter1_node))
counter2_controller = og.Controller(og.Controller.attribute("outputs:count", counter2_node))
event1_controller = og.Controller(og.Controller.attribute("outputs:path", event1_node))
event2_controller = og.Controller(og.Controller.attribute("outputs:path", event2_node))
await omni.kit.app.get_app().next_update_async()
self.assertEqual(counter1_controller.get(), 0)
# Trigger graph once, this will queue up the event for the next evaluation.
controller.edit(self.TEST_GRAPH_PATH, {self.keys.SET_VALUES: ("OnImpulse.state:enableImpulse", True)})
# Note that if this is a push subscription, the receivers will run this frame instead of next.
await omni.kit.app.get_app().next_update_async()
self.assertEqual(counter1_controller.get(), 0)
# This evaluation should trigger the receivers.
await omni.kit.app.get_app().next_update_async()
# Verify that events were received.
self.assertEqual(counter1_controller.get(), 1)
self.assertEqual(event1_controller.get(), "Test Path")
self.assertEqual(counter2_controller.get(), 1)
self.assertEqual(event2_controller.get(), "Test Path")
# Verify the contents of the associated bundle.
# FIXME: Authored bundle is always empty?
# bundle_contents = og.BundleContents(graph.get_default_graph_context(), event1_node, "outputs:bundle", True)
# self.assertEqual(1, bundle_contents.size)
# Modify the event name one receiver and sender and ensure it still works.
controller.edit(
self.TEST_GRAPH_PATH,
{
self.keys.SET_VALUES: [("Send.inputs:eventName", "bar"), ("OnImpulse.state:enableImpulse", True)],
},
)
await omni.kit.app.get_app().next_update_async()
await omni.kit.app.get_app().next_update_async()
# We changed the sender event name, so counter should NOT have triggered again.
self.assertEqual(counter1_controller.get(), 1)
# Change the receiver name to match.
controller.edit(self.TEST_GRAPH_PATH, {self.keys.SET_VALUES: ("OnCustomEvent1.inputs:eventName", "bar")})
await omni.kit.app.get_app().next_update_async()
# Trigger send again and verify we get it (1 frame lag for pop).
controller.edit(self.TEST_GRAPH_PATH, {self.keys.SET_VALUES: ("OnImpulse.state:enableImpulse", True)})
await omni.kit.app.get_app().next_update_async()
await omni.kit.app.get_app().next_update_async()
self.assertEqual(counter1_controller.get(), 2)
| 42,965 |
Python
| 50.089179 | 117 | 0.59218 |
omniverse-code/kit/exts/omni.graph.action/omni/graph/action/tests/test_action_graph_evaluation_01.py
|
"""Action Graph Evaluation Tests, Part 1"""
import asyncio
import json
import omni.graph.core as og
import omni.graph.core.tests as ogts
import omni.kit.test
import omni.usd
# ======================================================================
class TestActionGraphEvaluation(ogts.OmniGraphTestCase):
"""Tests action graph evaluator functionality"""
TEST_GRAPH_PATH = "/World/TestGraph"
async def setUp(self):
"""Set up test environment, to be torn down when done"""
await super().setUp()
og.Controller.edit({"graph_path": self.TEST_GRAPH_PATH, "evaluator_name": "execution"})
# ----------------------------------------------------------------------
async def test_active_latent(self):
"""Exercise a latent node that executes downstream nodes while latent"""
# +--------+ +----------+finished+-------------+
# | OnTick+-->| Countdown+-------->FinishCounter|
# +--------+ | | +-------------+
# | +-+
# +----------+ | +------------+ +------------+ +------------+
# +-----> TickCounter+----->TickCounter2+---->TickCounter3|
# tick +------------+ +------------+ +------------+
controller = og.Controller()
keys = og.Controller.Keys
(graph, nodes, _, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("Countdown", "omni.graph.action.Countdown"),
("FinishCounter", "omni.graph.action.Counter"),
("TickCounter", "omni.graph.action.Counter"),
("TickCounter2", "omni.graph.action.Counter"),
("TickCounter3", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("OnTick.outputs:tick", "Countdown.inputs:execIn"),
("Countdown.outputs:finished", "FinishCounter.inputs:execIn"),
("Countdown.outputs:tick", "TickCounter.inputs:execIn"),
("TickCounter.outputs:execOut", "TickCounter2.inputs:execIn"),
("TickCounter2.outputs:execOut", "TickCounter3.inputs:execIn"),
],
keys.SET_VALUES: [("Countdown.inputs:duration", 3), ("OnTick.inputs:onlyPlayback", False)],
},
)
(_, _, finish_counter, tick_counter, _, tick_counter_3) = nodes
finish_counter_controller = og.Controller(og.Controller.attribute("outputs:count", finish_counter))
tick_counter_controller = og.Controller(og.Controller.attribute("outputs:count", tick_counter))
tick_counter_3_controller = og.Controller(og.Controller.attribute("outputs:count", tick_counter_3))
await controller.evaluate(graph)
self.assertEqual(finish_counter_controller.get(), 0)
await controller.evaluate(graph)
self.assertEqual(tick_counter_controller.get(), 1)
await controller.evaluate(graph)
self.assertEqual(finish_counter_controller.get(), 0)
self.assertEqual(tick_counter_controller.get(), 2)
await controller.evaluate(graph)
self.assertEqual(finish_counter_controller.get(), 0)
self.assertEqual(tick_counter_3_controller.get(), 3)
await controller.evaluate(graph)
self.assertEqual(finish_counter_controller.get(), 1)
self.assertEqual(tick_counter_3_controller.get(), 3)
await controller.evaluate(graph)
self.assertEqual(finish_counter_controller.get(), 1)
self.assertEqual(tick_counter_3_controller.get(), 3)
# ----------------------------------------------------------------------
async def test_async_nodes(self):
"""Test asynchronous action nodes"""
# Check that a nested loop state is maintained when executing a latent delay.
#
# +---------+ +----------+ +----------+ +-------+ +--------+
# | IMPULSE +-->| FOR-LOOP +--->| FOR-LOOP +--->| DELAY +--->| COUNTER|
# +---------+ +----------+ +----------+ +-------+ +--------+
#
controller = og.Controller()
keys = og.Controller.Keys
(graph, (_, _, _, _, counter_node, _, _), _, _,) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnImpulse", "omni.graph.action.OnImpulseEvent"),
("ForLoop1", "omni.graph.action.ForLoop"),
("ForLoop2", "omni.graph.action.ForLoop"),
("Delay", "omni.graph.action.Delay"),
("Counter", "omni.graph.action.Counter"),
("OnTick", "omni.graph.action.OnTick"),
("Counter2", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("OnImpulse.outputs:execOut", "ForLoop1.inputs:execIn"),
("ForLoop1.outputs:loopBody", "ForLoop2.inputs:execIn"),
("ForLoop2.outputs:loopBody", "Delay.inputs:execIn"),
("Delay.outputs:finished", "Counter.inputs:execIn"),
("OnTick.outputs:tick", "Counter2.inputs:execIn"),
],
keys.SET_VALUES: [
("OnTick.inputs:onlyPlayback", False),
("OnImpulse.inputs:onlyPlayback", False),
("Delay.inputs:duration", 0.1),
("ForLoop1.inputs:stop", 2),
("ForLoop2.inputs:stop", 5),
],
},
)
await controller.evaluate(graph)
# Trigger graph once.
controller.edit(self.TEST_GRAPH_PATH, {keys.SET_VALUES: ("OnImpulse.state:enableImpulse", True)})
await controller.evaluate(graph)
# In delay now, no count.
counter_controller = og.Controller(og.Controller.attribute("outputs:count", counter_node))
self.assertEqual(counter_controller.get(), 0)
# Wait to ensure the first 5 delays compute.
for _ in range(5):
await asyncio.sleep(0.2)
await controller.evaluate(graph)
count_val = counter_controller.get()
self.assertGreater(count_val, 4)
# Wait and verify the remainder go through.
for _ in range(5):
await asyncio.sleep(0.1)
await controller.evaluate(graph)
self.assertEqual(counter_controller.get(), 10)
# ----------------------------------------------------------------------
async def test_chained_stateful_nodes(self):
"""Test that chaining loop nodes works"""
controller = og.Controller()
keys = og.Controller.Keys
(graph, (_, _, _, counter_node), _, _,) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("ForLoop1", "omni.graph.action.ForLoop"),
("ForLoop2", "omni.graph.action.ForLoop"),
("Counter", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("OnTick.outputs:tick", "ForLoop1.inputs:execIn"),
("ForLoop1.outputs:loopBody", "ForLoop2.inputs:execIn"),
("ForLoop2.outputs:loopBody", "Counter.inputs:execIn"),
],
keys.SET_VALUES: [
("OnTick.inputs:onlyPlayback", False),
("ForLoop1.inputs:stop", 5),
("ForLoop2.inputs:stop", 5),
],
},
)
await controller.evaluate(graph)
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", counter_node)), 5 * 5)
# ----------------------------------------------------------------------
async def test_cycle_break(self):
"""Test that an illegal cycle issues a warning"""
controller = og.Controller()
keys = og.Controller.Keys
(graph, (on_impulse, count_a, count_b), _, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnImpulse", "omni.graph.action.OnImpulseEvent"),
("A", "omni.graph.action.Counter"),
("B", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("OnImpulse.outputs:execOut", "A.inputs:execIn"),
("A.outputs:execOut", "B.inputs:execIn"),
("B.outputs:execOut", "A.inputs:execIn"),
],
keys.SET_VALUES: [
("OnImpulse.state:enableImpulse", True),
("OnImpulse.inputs:onlyPlayback", False),
],
},
)
with ogts.ExpectedError():
await controller.evaluate(graph)
og.Controller.set(controller.attribute("state:enableImpulse", on_impulse), True)
with ogts.ExpectedError():
await controller.evaluate(graph)
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", count_a)), 2)
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", count_b)), 2)
# ----------------------------------------------------------------------
async def test_dep_sort_fan_out(self):
"""Test that dependency sort works when there is data fan-out"""
# +-------------+
# +-------->| |
# | | SwitchTokenA|
# | +--->+-------------+
# +----------+ |
# |OnImpulse +----|------+ +--------------+
# +----------+ | +---------->| SwitchTokenB |
# | +^-------------+
# +------+-+ +--------+ |
# | ConstA +--->AppendB +---+
# +--------+ +--------+
controller = og.Controller()
keys = og.Controller.Keys
(graph, (_, _, _, _, _), _, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnImpulse", "omni.graph.action.OnImpulseEvent"),
("ConstA", "omni.graph.nodes.ConstantToken"),
("AppendB", "omni.graph.nodes.AppendString"),
("SwitchTokenA", "omni.graph.action.SwitchToken"),
("SwitchTokenB", "omni.graph.action.SwitchToken"),
],
keys.CONNECT: [
("OnImpulse.outputs:execOut", "SwitchTokenA.inputs:execIn"),
("OnImpulse.outputs:execOut", "SwitchTokenB.inputs:execIn"),
("ConstA.inputs:value", "SwitchTokenA.inputs:value"),
("ConstA.inputs:value", "AppendB.inputs:value"),
("AppendB.outputs:value", "SwitchTokenB.inputs:value"),
],
keys.SET_VALUES: [
("OnImpulse.inputs:onlyPlayback", False),
("OnImpulse.state:enableImpulse", True),
("AppendB.inputs:suffix", {"value": "Foo", "type": "token"}),
],
},
)
await controller.evaluate(graph)
graph_state = og.OmniGraphInspector().as_json(graph, flags=["evaluation"])
graph_state_obj = json.loads(graph_state)
trace = graph_state_obj["Evaluator"]["Instances"][0]["LastNonEmptyEvaluation"]["Trace"]
# The switches can run in any order
self.assertTrue(
trace in (["SwitchTokenA", "AppendB", "SwitchTokenB"], ["AppendB", "SwitchTokenB", "SwitchTokenA"])
)
# ----------------------------------------------------------------------
async def test_diamond_fan(self):
"""Test latent nodes in parallel fan-out which fan-in to a downstream node"""
# +--------++ +----------+
# +--> TickA +--->|FinishedA |---+
# | +---------+ +----------+ |
# +---------+ +-----------+ | | +------------+
# |OnImpulse+-->|TickCounter+-+ +-->|MergeCounter|
# +---------+ +-----------+ | | +------------+
# | +---------+ +----------+ |
# +-->| TickB +--->|FinishedB |--+
# +--------++ +----------+
# | +---------+
# +-->| TickC |
# +--------++
controller = og.Controller()
keys = og.Controller.Keys
(graph, nodes, _, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnImpulse", "omni.graph.action.OnImpulseEvent"),
("TickCounter", "omni.graph.action.Counter"),
("TickA", "omni.graph.action.Countdown"),
("TickB", "omni.graph.action.Countdown"),
("TickC", "omni.graph.action.Countdown"),
("FinishCounterA", "omni.graph.action.Counter"),
("FinishCounterB", "omni.graph.action.Counter"),
("MergeCounter", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("OnImpulse.outputs:execOut", "TickCounter.inputs:execIn"),
("TickCounter.outputs:execOut", "TickA.inputs:execIn"),
("TickCounter.outputs:execOut", "TickB.inputs:execIn"),
("TickCounter.outputs:execOut", "TickC.inputs:execIn"),
("TickA.outputs:finished", "FinishCounterA.inputs:execIn"),
("TickB.outputs:finished", "FinishCounterB.inputs:execIn"),
("FinishCounterA.outputs:execOut", "MergeCounter.inputs:execIn"),
("FinishCounterB.outputs:execOut", "MergeCounter.inputs:execIn"),
],
keys.SET_VALUES: [
("TickA.inputs:duration", 1),
("TickB.inputs:duration", 1),
("TickC.inputs:duration", 1),
("OnImpulse.inputs:onlyPlayback", False),
("OnImpulse.state:enableImpulse", True),
],
},
)
(_, tick_counter, _, _, tick_c, finish_counter_a, finish_counter_b, merge_counter) = nodes
def check_counts(t_c, f_a, f_b, m_c, tick_c_count):
for node, expected in (
(tick_counter, t_c),
(finish_counter_a, f_a),
(finish_counter_b, f_b),
(merge_counter, m_c),
):
count = og.Controller.get(controller.attribute("outputs:count", node))
self.assertEqual(count, expected, node.get_prim_path())
self.assertEqual(tick_c.get_compute_count(), tick_c_count)
self.assertEqual(tick_c.get_compute_count(), 0)
# Set up latent tickers.
await controller.evaluate(graph)
check_counts(1, 0, 0, 0, 1)
# Latent ticks.
await controller.evaluate(graph)
check_counts(1, 0, 0, 0, 2)
# Both branches complete.
await controller.evaluate(graph)
check_counts(1, 1, 1, 2, 3)
# No count changes + no additional computes of tickC.
await controller.evaluate(graph)
check_counts(1, 1, 1, 2, 3)
# ----------------------------------------------------------------------
async def test_diamond_latent_fan(self):
"""Test latent nodes in parallel fan-out which fan-in to a latent downstream node"""
# +--------++
# +--> TickA +--+
# | +---------+ |
# +---------+ | | +-------+ +-------+
# |OnImpulse+-->+ +-->|TickD +-+--->|CountF |
# +---------+ | | +-------+ | +-------+
# | +--------+ | +--->+-------+
# +-->| TickB +--+ |TickE |
# | +--------+ +--->+-------+
# | +--------+ |
# +-->| TickC +----------------+
# +--------+
# Note that when TickA triggers TickD into latent state, then TickB hits TickD subsequently. This subsequent
# evaluation is _transient_. Meaning that TickB will not block on a new copy of TickD.
# This is because there is only one TickD so there can be only one state (latent or not).
controller = og.Controller()
keys = og.Controller.Keys
(graph, nodes, _, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnImpulse", "omni.graph.action.OnImpulseEvent"),
("TickA", "omni.graph.action.Countdown"),
("TickB", "omni.graph.action.Countdown"),
("TickC", "omni.graph.action.Countdown"),
("TickD", "omni.graph.action.Countdown"),
("TickE", "omni.graph.action.Countdown"),
("CountF", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("OnImpulse.outputs:execOut", "TickA.inputs:execIn"),
("OnImpulse.outputs:execOut", "TickB.inputs:execIn"),
("OnImpulse.outputs:execOut", "TickC.inputs:execIn"),
("TickA.outputs:finished", "TickD.inputs:execIn"),
("TickB.outputs:finished", "TickD.inputs:execIn"),
("TickC.outputs:finished", "TickE.inputs:execIn"),
("TickD.outputs:finished", "TickE.inputs:execIn"),
("TickD.outputs:finished", "CountF.inputs:execIn"),
],
keys.SET_VALUES: [
("TickA.inputs:duration", 1),
("TickB.inputs:duration", 1),
("TickC.inputs:duration", 2),
("TickD.inputs:duration", 1),
("TickE.inputs:duration", 1),
("OnImpulse.inputs:onlyPlayback", False),
("OnImpulse.state:enableImpulse", True),
],
},
)
(_, tick_a, tick_b, tick_c, tick_d, tick_e, count_f) = nodes
def check_counts(i, t_a, t_b, t_c, t_d, t_e):
for node, expected in ((tick_a, t_a), (tick_b, t_b), (tick_c, t_c), (tick_d, t_d), (tick_e, t_e)):
self.assertEqual(node.get_compute_count(), expected, f"Check {i} for {node.get_prim_path()}")
# A, B, C, D, E
compute_counts = [
(1, 1, 1, 0, 0), # 0. fan out to trigger A, B, C into latent state
(2, 2, 2, 0, 0), # 1. A, B, C tick
(3, 3, 3, 2, 0), # 2. A, B end latent, D into latent via A or B, D ticks via A or B, C ticks
(3, 3, 4, 3, 2), # 3.
(3, 3, 4, 3, 3), # 4.
(3, 3, 4, 3, 3), # 5.
(3, 3, 4, 3, 3), # 6.
]
for i, c_c in enumerate(compute_counts):
await controller.evaluate(graph)
check_counts(i, *c_c)
# Verify that CountF has computed 1x due to the fan-in at TickD NOT acting like separate threads.
self.assertEqual(count_f.get_compute_count(), 1)
# ----------------------------------------------------------------------
async def test_dynamic_exec_pins(self):
"""Test that adding execution pins to a non-action node works"""
controller = og.Controller()
keys = og.Controller.Keys
(_, (on_tick, to_string), _, _,) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("ToString", "omni.graph.nodes.ToString"),
],
keys.SET_VALUES: [
("ToString.inputs:value", 42, "double"),
("OnTick.inputs:onlyPlayback", False),
],
},
)
# Verify to_string has not been computed.
await controller.evaluate()
self.assertEqual(0, to_string.get_compute_count())
self.assertEqual(1, on_tick.get_compute_count())
# Add execution attribs and verify it still doesn't get computed.
attrib = og.Controller.create_attribute(
to_string,
"inputs:execIn",
"execution",
og.AttributePortType.ATTRIBUTE_PORT_TYPE_INPUT,
)
self.assertIsNotNone(attrib)
await controller.evaluate()
self.assertEqual(0, to_string.get_compute_count())
self.assertEqual(2, on_tick.get_compute_count())
# Hook up to OnTick and verify it is now computing.
controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CONNECT: [
("OnTick.outputs:tick", "ToString.inputs:execIn"),
]
},
)
for i in range(10):
await controller.evaluate()
self.assertEqual(i + 1, to_string.get_compute_count())
self.assertEqual(i + 3, on_tick.get_compute_count())
# ----------------------------------------------------------------------
async def test_exec_fan_out(self):
"""Test that fanning out from an exec port works"""
controller = og.Controller()
keys = og.Controller.Keys
(graph, nodes, _, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("FF1", "omni.graph.action.FlipFlop"),
("FF2", "omni.graph.action.FlipFlop"),
("FF11", "omni.graph.action.FlipFlop"),
("FF12", "omni.graph.action.FlipFlop"),
],
keys.SET_VALUES: [
("OnTick.inputs:onlyPlayback", False),
],
keys.CONNECT: [
("OnTick.outputs:tick", "FF1.inputs:execIn"),
("OnTick.outputs:tick", "FF2.inputs:execIn"),
("FF1.outputs:a", "FF11.inputs:execIn"),
("FF1.outputs:a", "FF12.inputs:execIn"),
],
},
)
# 1. OnTick triggers FF1 which triggers FF11 and FF12, then FF2.
# 2. OnTick triggers FF1 and FF2.
# 3. OnTick triggers FF1 which triggers FF11 and FF12, then FF2.
await controller.evaluate(graph)
flip_flops = nodes[1:]
ff_state = [og.Controller.get(controller.attribute("outputs:isA", node)) for node in flip_flops]
self.assertEqual(ff_state, [True, True, True, True])
await controller.evaluate(graph)
ff_state = [og.Controller.get(controller.attribute("outputs:isA", node)) for node in flip_flops]
self.assertEqual(ff_state, [False, False, True, True])
await controller.evaluate(graph)
ff_state = [og.Controller.get(controller.attribute("outputs:isA", node)) for node in flip_flops]
self.assertEqual(ff_state, [True, True, False, False])
# ----------------------------------------------------------------------
async def test_exec_fan_out_shared_deps(self):
"""Test that dependency sort works when there is shared data in exec fan-out"""
# +---------+
# +---------->| Write1 |
# | +----^----+
# | |
# | +----------+
# | |
# +-----------+ | |
# | OnImpulse +-----+-----+----> +---------+
# +-----------+ | | | Write2 |
# | +----->+---------+
# | |
# | | +---------+
# +-----+----->| Write3 |
# | +---------+
# | ^
# +-------+ +---+----+---+
# | Const +----->| Inc |
# +-------+ +--------+
controller = og.Controller()
keys = og.Controller.Keys
(graph, _, _, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnImpulse", "omni.graph.action.OnImpulseEvent"),
("Const", "omni.graph.nodes.ConstantDouble"),
("Inc", "omni.graph.nodes.Increment"),
("Write1", "omni.graph.nodes.WritePrimAttribute"),
("Write2", "omni.graph.nodes.WritePrimAttribute"),
("Write3", "omni.graph.nodes.WritePrimAttribute"),
],
keys.CREATE_PRIMS: [
("/World/TestPrim1", {"val": ("double", 1.0)}),
("/World/TestPrim2", {"val": ("double", 2.0)}),
("/World/TestPrim3", {"val": ("double", 3.0)}),
],
keys.CONNECT: [
("OnImpulse.outputs:execOut", "Write1.inputs:execIn"),
("OnImpulse.outputs:execOut", "Write2.inputs:execIn"),
("OnImpulse.outputs:execOut", "Write3.inputs:execIn"),
("Const.inputs:value", "Inc.inputs:value"),
("Inc.outputs:result", "Write1.inputs:value"),
("Inc.outputs:result", "Write2.inputs:value"),
("Inc.outputs:result", "Write3.inputs:value"),
],
keys.SET_VALUES: [
("OnImpulse.inputs:onlyPlayback", False),
("OnImpulse.state:enableImpulse", True),
("Const.inputs:value", 41.0),
("Inc.inputs:increment", 1.0),
("Write1.inputs:primPath", "/World/TestPrim1"),
("Write1.inputs:usePath", True),
("Write1.inputs:name", "val"),
("Write2.inputs:primPath", "/World/TestPrim2"),
("Write2.inputs:usePath", True),
("Write2.inputs:name", "val"),
("Write3.inputs:primPath", "/World/TestPrim3"),
("Write3.inputs:usePath", True),
("Write3.inputs:name", "val"),
],
},
)
await controller.evaluate(graph)
stage = omni.usd.get_context().get_stage()
for i in (1, 2, 3):
self.assertEqual(stage.GetAttributeAtPath(f"/World/TestPrim{i}.val").Get(), 42.0)
# ----------------------------------------------------------------------
async def test_exec_sort_failure(self):
"""Test that sorting dependencies with non-trivial authored graph"""
# Our global sort excludes exec nodes, so a global topo (Kahn) sort will fail such that Inc3 doesn't get
# computed until after Add2, so instead we sort each dep network independently. This test verifies the case
# where that matters.
#
# +-----------------------------> Write1(var) +----------------------------------------+
# | ^ | |
# | | | v
# OnTick --------------------+ | +-----------Inc------------+ Write2(var2)
# | | | ^
# v | | |
# Read1(var)------------> Add1 --Inc2--+ v |
# Inc3 --------------> Add2 ---------------+
controller = og.Controller()
keys = og.Controller.Keys
(_, (on_tick, a_1, a_2, _, _, _, _, _, _), _, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("A1", "omni.graph.nodes.Add"),
("A2", "omni.graph.nodes.Add"),
("Write1", "omni.graph.core.WriteVariable"),
("Write2", "omni.graph.core.WriteVariable"),
("Read1", "omni.graph.core.ReadVariable"),
("Inc", "omni.graph.nodes.Increment"),
("Inc2", "omni.graph.nodes.Increment"),
("Inc3", "omni.graph.nodes.Increment"),
],
keys.CREATE_VARIABLES: [
("var", og.Type(og.BaseDataType.DOUBLE)),
("var2", og.Type(og.BaseDataType.DOUBLE)),
],
keys.SET_VALUES: [
("OnTick.inputs:onlyPlayback", False),
("Inc3.inputs:value", {"type": "double", "value": 42.0}),
("Write1.inputs:variableName", "var"),
("Write2.inputs:variableName", "var2"),
("Read1.inputs:variableName", "var"),
],
keys.CONNECT: [
("OnTick.outputs:tick", "Write1.inputs:execIn"),
("OnTick.outputs:timeSinceStart", "A1.inputs:a"),
("Read1.outputs:value", "A1.inputs:b"),
("A1.outputs:sum", "Inc2.inputs:value"),
("Inc2.outputs:result", "Write1.inputs:value"),
("Write1.outputs:execOut", "Write2.inputs:execIn"),
("Write1.outputs:value", "Inc.inputs:value"),
("Inc.outputs:result", "A2.inputs:a"),
("Inc3.outputs:result", "A2.inputs:b"),
("A2.outputs:sum", "Write2.inputs:value"),
],
},
)
await omni.kit.app.get_app().next_update_async()
a_1_v = og.Controller.get(controller.attribute("outputs:sum", a_1))
a_2_v = og.Controller.get(controller.attribute("outputs:sum", a_2))
on_tick_dt = og.Controller.get(controller.attribute("outputs:timeSinceStart", on_tick))
a_1_expected = 0 + on_tick_dt
a_2_expected = (a_1_expected + 1.0 + 1.0) + (42.0 + 1.0)
self.assertAlmostEqual(a_1_v, a_1_expected, places=3)
self.assertAlmostEqual(a_2_v, a_2_expected, places=3)
# ----------------------------------------------------------------------
async def test_fan_in(self):
"""Test that fan-in of execution connections works as expected (from a loaded test .usda file)"""
(result, error) = await ogts.load_test_file("TestActionFanIn.usda", use_caller_subdirectory=True)
self.assertTrue(result, error)
graph_path = "/World/ActionGraph"
controller = og.Controller()
graph = controller.graph(graph_path)
# Trigger the loop.
og.Controller.set(controller.attribute(f"{graph_path}/on_impulse_event.state:enableImpulse"), True)
await controller.evaluate(graph)
graph_state = og.OmniGraphInspector().as_json(controller.graph(graph_path), flags=["evaluation"])
graph_state_obj = json.loads(graph_state)
trace = graph_state_obj["Evaluator"]["Instances"][0]["LastNonEmptyEvaluation"]["Trace"]
# Verify the first loop iteration.
self.assertEqual("for_loop", trace[0])
# These nodes can compute in any order
self.assertEqual(["counter", "counter_01"], sorted(trace[1:3]))
expected_trace = [
"to_uint64",
"sync_gate",
"to_uint64",
"sync_gate",
]
self.assertListEqual(expected_trace, trace[3:7])
trace[0:7] = []
# Verify downstream from sync gate.
expected_trace = [
"counter_02",
]
self.assertListEqual(expected_trace, trace[0:1])
trace[0 : len(expected_trace)] = []
# Verify second iteration.
self.assertEqual("for_loop", trace[0])
# These nodes can compute in any order
self.assertEqual(["counter", "counter_01"], sorted(trace[1:3]))
expected_trace = [
"to_uint64",
"sync_gate",
"to_uint64",
"sync_gate",
]
self.assertListEqual(expected_trace, trace[3:7])
# ----------------------------------------------------------------------
async def test_loading_type_resol(self):
"""Test that loading a file with weird type resolution pattern works"""
(result, error) = await ogts.load_test_file("load_with_type_resol.usda", use_caller_subdirectory=True)
self.assertTrue(result, error)
graph_path = "/World/ActionGraph"
controller = og.Controller()
graph = controller.graph(graph_path)
# eval
await controller.evaluate(graph)
# check result
var = graph.find_variable("Result")
val = var.get_array(graph.get_default_graph_context(), False, 0)
self.assertTrue((val == [(-50, -50, -50), (50, 50, 50)]).all())
# ----------------------------------------------------------------------
async def test_fan_in_exec(self):
"""Test that execution fan-in is handled correctly"""
# The evaluator has to consider the case that gate.enter will have contradicting upstream values.
# Gate needs to know which input is active, it needs the value of enter to be ENABLED when it
# is triggered by OnTick, even though OnTickDisabled has set it's output to the same attrib as DISABLED.
#
# +--------------+
# |OnImpulseEvent+---+ +-----------+
# +--------------+ | |Gate |
# +--->toggle |
# +--------------+ | |
# |OnTick +------>|enter |
# +--------------+ +^-----exit-+
# |
# +--------------+ |
# |OnTickDisabled+--------+
# +--------------+
controller = og.Controller()
keys = og.Controller.Keys
(graph, (_, _, gate, on_impulse_event), _, _,) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("OnTickDisabled", "omni.graph.action.OnTick"),
("Gate", "omni.graph.action.Gate"),
("OnImpulseEvent", "omni.graph.action.OnImpulseEvent"),
],
keys.CONNECT: [
("OnTick.outputs:tick", "Gate.inputs:enter"),
("OnTickDisabled.outputs:tick", "Gate.inputs:enter"),
("OnImpulseEvent.outputs:execOut", "Gate.inputs:toggle"),
],
keys.SET_VALUES: [
("Gate.inputs:startClosed", True),
("OnTick.inputs:onlyPlayback", False),
("OnImpulseEvent.inputs:onlyPlayback", False),
],
},
)
await controller.evaluate(graph)
gate_exit = controller.attribute("outputs:exit", gate)
# Verify the Gate has not triggered.
self.assertFalse(gate_exit.get())
await controller.evaluate(graph)
self.assertFalse(gate_exit.get())
# Toggle the gate and verify that the tick goes through. The first evaluate it isn't known if the Gate will
# trigger because the order that entry points are executed is not defined... FIXME.
controller.attribute("state:enableImpulse", on_impulse_event).set(True)
await controller.evaluate(graph)
await controller.evaluate(graph)
self.assertTrue(gate_exit.get())
# ----------------------------------------------------------------------
async def test_fan_out_exec(self):
"""Test that execution fan-out is handled correctly"""
# We want to reset the execution attribute states before the node compute() to avoid bugs
# that arise when authors forget to fully specify the output states. However we can't
# do this in the middle of traversal, because fan-out from a connection requires that the state
# be preserved for every downstream node which may read from it (like Gate).
controller = og.Controller()
keys = og.Controller.Keys
(graph, (_, gate, _), _, _,) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("Gate", "omni.graph.action.Gate"),
("Counter", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("OnTick.outputs:tick", "Counter.inputs:execIn"),
("OnTick.outputs:tick", "Gate.inputs:enter"),
],
keys.SET_VALUES: [
("Gate.inputs:startClosed", False),
("OnTick.inputs:onlyPlayback", False),
],
},
)
await controller.evaluate(graph)
gate_exit = controller.attribute("outputs:exit", gate)
# Verify the Gate has triggered.
self.assertTrue(gate_exit.get())
await controller.evaluate(graph)
self.assertTrue(gate_exit.get())
# ----------------------------------------------------------------------
async def test_latent_and_push(self):
"""Exercise latent nodes in combination with stateful loop node"""
#
# +---------+ +-------+ tick +--------+ loopBody +-------+ +------------+
# |OnImpulse+-->|TickA +----------->ForLoop1++--------->|TickB +-+->|TickCounterB|
# +---------+ +----+--+ +--------++ +-------+ | +------------+
# | finish | |
# | | |
# | +--------------+ +v----------------+ +-v------------+
# +----->|FinishCounterA| |FinishLoopCounter| |FinishCounterB|
# +--------------+ +-----------------+ +--------------+
controller = og.Controller()
keys = og.Controller.Keys
(graph, nodes, _, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnImpulse", "omni.graph.action.OnImpulseEvent"),
("TickA", "omni.graph.action.Countdown"),
("ForLoop1", "omni.graph.action.ForLoop"),
("TickB", "omni.graph.action.Countdown"),
("FinishLoopCounter", "omni.graph.action.Counter"),
("FinishCounterA", "omni.graph.action.Counter"),
("FinishCounterB", "omni.graph.action.Counter"),
("TickCounterB", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("OnImpulse.outputs:execOut", "TickA.inputs:execIn"),
("TickA.outputs:tick", "ForLoop1.inputs:execIn"),
("TickA.outputs:finished", "FinishCounterA.inputs:execIn"),
("ForLoop1.outputs:loopBody", "TickB.inputs:execIn"),
("ForLoop1.outputs:finished", "FinishLoopCounter.inputs:execIn"),
("TickB.outputs:tick", "TickCounterB.inputs:execIn"),
("TickB.outputs:finished", "FinishCounterB.inputs:execIn"),
],
keys.SET_VALUES: [
("ForLoop1.inputs:start", 0),
("ForLoop1.inputs:stop", 3),
("TickA.inputs:duration", 2),
("TickB.inputs:duration", 2),
("OnImpulse.state:enableImpulse", True),
("OnImpulse.inputs:onlyPlayback", False),
],
},
)
(_, _, _, _, finish_loop_counter, finish_counter_a, finish_counter_b, tick_counter_b) = nodes
for _ in range(20):
await controller.evaluate(graph)
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", finish_counter_a)), 1)
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", tick_counter_b)), 12)
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", finish_counter_b)), 6)
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", finish_loop_counter)), 2)
# ----------------------------------------------------------------------
async def test_latent_chain(self):
"""Exercise a chain of latent nodes"""
# +---------+ +-------+ tick +-------+ tick +-------+
# |OnImpulse+-->TickA +-------->TickB +-------->|LatentC|
# +---------+ +-----+-+ +------++ +-------+-----+
# | finish | finish |
# finish | +-------------+ | +-------------+ +-v----------+
# +->TickCounterA | +-->| TickCounterB| |TickCounterC|
# +-------------+ +-------------+ +------------+
controller = og.Controller()
keys = og.Controller.Keys
(graph, nodes, _, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnImpulse", "omni.graph.action.OnImpulseEvent"),
("TickA", "omni.graph.action.Countdown"),
("TickB", "omni.graph.action.Countdown"),
("LatentC", "omni.graph.action.Countdown"),
("TickCounterA", "omni.graph.action.Counter"),
("TickCounterB", "omni.graph.action.Counter"),
("TickCounterC", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("OnImpulse.outputs:execOut", "TickA.inputs:execIn"),
("TickA.outputs:tick", "TickB.inputs:execIn"),
("TickA.outputs:finished", "TickCounterA.inputs:execIn"),
("TickB.outputs:tick", "LatentC.inputs:execIn"),
("TickB.outputs:finished", "TickCounterB.inputs:execIn"),
("LatentC.outputs:finished", "TickCounterC.inputs:execIn"),
],
keys.SET_VALUES: [
("TickA.inputs:duration", 2),
("TickB.inputs:duration", 2),
("LatentC.inputs:duration", 2),
("OnImpulse.inputs:onlyPlayback", False),
("OnImpulse.state:enableImpulse", True),
],
},
)
(_, _, _, _, tick_counter_a, tick_counter_b, tick_counter_c) = nodes
for _ in range(16):
await controller.evaluate(graph)
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", tick_counter_a)), 1)
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", tick_counter_b)), 2)
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", tick_counter_c)), 4)
| 44,222 |
Python
| 47.225736 | 116 | 0.453349 |
omniverse-code/kit/exts/omni.graph.action/omni/graph/action/tests/test_on_stage_event_node.py
|
"""Basic tests of the OnStageEvent node"""
import omni.client
import omni.graph.core as og
import omni.graph.core.tests as ogts
import omni.graph.tools.ogn as ogn
import omni.kit.app
import omni.kit.test
import omni.usd
from pxr import Sdf
# ======================================================================
class TestOnStageEventNode(ogts.OmniGraphTestCase):
"""Tests OnStageEvent node functionality"""
TEST_GRAPH_PATH = "/World/TestGraph"
async def setUp(self):
"""Set up test environment, to be torn down when done"""
await super().setUp()
og.Controller.edit({"graph_path": self.TEST_GRAPH_PATH, "evaluator_name": "execution"})
async def test_backward_compatibility_v3(self):
"""Validate backward compatibility for legacy versions of OnStageEvent node."""
# load the test scene which contains a OnStageEvent V2 node
(result, error) = await ogts.load_test_file("TestOnStageEventNode_v2.usda", use_caller_subdirectory=True)
self.assertTrue(result, error)
action_graph_path = "/World/ActionGraph"
action_graph = og.get_graph_by_path(action_graph_path)
on_stage_event_node = action_graph.get_node(action_graph_path + "/on_stage_event")
self.assertTrue(on_stage_event_node.is_valid())
# The "Hierarchy Changed" event has been introduced since V3. Validate that it is
# automatically included by the list of allowed tokens after loading V2.
attr = on_stage_event_node.get_attribute("inputs:eventName")
allowed_tokens = attr.get_metadata(ogn.MetadataKeys.ALLOWED_TOKENS)
self.assertTrue(isinstance(allowed_tokens, str))
self.assertTrue("Hierarchy Changed" in allowed_tokens.split(","))
async def test_stage_events(self):
"""Test OnStageEvent"""
controller = og.Controller()
keys = og.Controller.Keys
(_, (on_stage_node, _, _, counter_sel_node, counter_stop_node, counter_start_node), _, _,) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnStageEvent", "omni.graph.action.OnStageEvent"),
("OnStageEvent2", "omni.graph.action.OnStageEvent"),
("OnStageEvent3", "omni.graph.action.OnStageEvent"),
("Counter", "omni.graph.action.Counter"),
("Counter2", "omni.graph.action.Counter"),
("Counter3", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("OnStageEvent.outputs:execOut", "Counter.inputs:execIn"),
("OnStageEvent2.outputs:execOut", "Counter2.inputs:execIn"),
("OnStageEvent3.outputs:execOut", "Counter3.inputs:execIn"),
],
keys.SET_VALUES: [
("OnStageEvent.inputs:eventName", "Selection Changed"),
("OnStageEvent.inputs:onlyPlayback", False),
("OnStageEvent2.inputs:eventName", "Animation Stop Play"),
("OnStageEvent2.inputs:onlyPlayback", True),
("OnStageEvent3.inputs:eventName", "Animation Start Play"),
("OnStageEvent3.inputs:onlyPlayback", True),
],
},
)
async def wait_2():
await omni.kit.app.get_app().next_update_async()
await omni.kit.app.get_app().next_update_async()
def get_start_count():
return og.Controller.get(controller.attribute("outputs:count", counter_start_node))
def get_stop_count():
return og.Controller.get(controller.attribute("outputs:count", counter_stop_node))
await omni.kit.app.get_app().next_update_async()
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", counter_sel_node)), 0)
self.assertEqual(get_stop_count(), 0)
self.assertEqual(get_start_count(), 0)
selection = omni.usd.get_context().get_selection()
selection.set_selected_prim_paths([self.TEST_GRAPH_PATH + "/OnStageEvent"], False)
# 1 frame delay on the pop, 1 frame delay on the compute
await omni.kit.app.get_app().next_update_async()
await omni.kit.app.get_app().next_update_async()
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", counter_sel_node)), 1)
self.assertEqual(get_stop_count(), 0)
self.assertEqual(get_start_count(), 0)
# change the tracked event, verify selection doesn't fire
og.Controller.set(controller.attribute("inputs:eventName", on_stage_node), "Saved")
selection.set_selected_prim_paths([self.TEST_GRAPH_PATH + "/OnStageEvent2"], False)
await omni.kit.app.get_app().next_update_async()
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", counter_sel_node)), 1)
await omni.kit.app.get_app().next_update_async()
# change it back, verify it does fire when selection changes again
og.Controller.set(controller.attribute("inputs:eventName", on_stage_node), "Selection Changed")
await omni.kit.app.get_app().next_update_async()
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", counter_sel_node)), 1)
selection.set_selected_prim_paths([self.TEST_GRAPH_PATH + "/OnStageEvent"], False)
await wait_2()
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", counter_sel_node)), 2)
# Verify that start/stop events work when only-playback is true
timeline = omni.timeline.get_timeline_interface()
timeline.set_start_time(1.0)
timeline.set_end_time(10.0)
timeline.set_target_framerate(timeline.get_time_codes_per_seconds())
timeline.play()
await wait_2()
self.assertEqual(get_stop_count(), 0)
self.assertEqual(get_start_count(), 1)
await omni.kit.app.get_app().next_update_async()
self.assertEqual(get_stop_count(), 0)
self.assertEqual(get_start_count(), 1)
# Check that pausing / resuming does not trigger
timeline.pause()
await wait_2()
self.assertEqual(get_stop_count(), 0)
self.assertEqual(get_start_count(), 1)
timeline.play()
await wait_2()
self.assertEqual(get_stop_count(), 0)
self.assertEqual(get_start_count(), 1)
timeline.stop()
await wait_2()
self.assertEqual(get_stop_count(), 1)
self.assertEqual(get_start_count(), 1)
await controller.evaluate()
self.assertEqual(get_stop_count(), 1)
self.assertEqual(get_start_count(), 1)
# Verify that stopping while paused triggers the event
timeline.play()
await wait_2()
self.assertEqual(get_stop_count(), 1)
self.assertEqual(get_start_count(), 2)
timeline.pause()
await wait_2()
self.assertEqual(get_stop_count(), 1)
self.assertEqual(get_start_count(), 2)
timeline.stop()
await wait_2()
self.assertEqual(get_stop_count(), 2)
self.assertEqual(get_start_count(), 2)
# ----------------------------------------------------------------------
async def test_stage_hierarchy_changed_event(self):
"""Test the Hierarchy Changed event"""
app = omni.kit.app.get_app()
controller = og.Controller()
keys = og.Controller.Keys
usd_context = omni.usd.get_context()
stage = usd_context.get_stage()
root_path = Sdf.Path.absoluteRootPath
# Create Xform
omni.kit.commands.execute("CreatePrim", prim_type="Xform")
xform_path = root_path.AppendChild("Xform")
xform = stage.GetPrimAtPath(xform_path)
self.assertTrue(xform)
# Create Material
omni.kit.commands.execute("CreatePrim", prim_type="Material")
material_path = root_path.AppendChild("Material")
material = stage.GetPrimAtPath(material_path)
self.assertTrue(material)
# Create action graph
(_, (on_stage_node, counter_node), _, _,) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnStageEvent", "omni.graph.action.OnStageEvent"),
("Counter", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("OnStageEvent.outputs:execOut", "Counter.inputs:execIn"),
],
keys.SET_VALUES: [
("OnStageEvent.inputs:eventName", "Hierarchy Changed"),
("OnStageEvent.inputs:onlyPlayback", False),
],
},
)
outputs_count_attr = controller.attribute("outputs:count", counter_node)
expected_hierarchy_changed_event_count = 0
await app.next_update_async()
self.assertEqual(og.Controller.get(outputs_count_attr), expected_hierarchy_changed_event_count)
###########################################################
# Create cube
omni.kit.commands.execute("CreatePrim", prim_type="Cube")
cube_path = root_path.AppendChild("Cube")
cube = stage.GetPrimAtPath(cube_path)
self.assertTrue(cube)
# 1 frame delay on the pop, 1 frame delay on the compute
await app.next_update_async()
await app.next_update_async()
expected_hierarchy_changed_event_count += 1
self.assertEqual(og.Controller.get(outputs_count_attr), expected_hierarchy_changed_event_count)
###########################################################
# Reparent cube
cube_path_reparented = xform_path.AppendChild("Cube")
omni.kit.commands.execute("MovePrim", path_from=cube_path, path_to=cube_path_reparented)
await app.next_update_async()
await app.next_update_async()
expected_hierarchy_changed_event_count += 1
self.assertEqual(og.Controller.get(outputs_count_attr), expected_hierarchy_changed_event_count)
###########################################################
# Rename cube to lowercase
cube_path_lowercase = xform_path.AppendChild("cube")
omni.kit.commands.execute("MovePrim", path_from=cube_path_reparented, path_to=cube_path_lowercase)
await app.next_update_async()
await app.next_update_async()
expected_hierarchy_changed_event_count += 1
self.assertEqual(og.Controller.get(outputs_count_attr), expected_hierarchy_changed_event_count)
###########################################################
# Modify size attribute.
cube = stage.GetPrimAtPath(cube_path_lowercase)
self.assertTrue(cube)
cube.GetAttribute("size").Set(1.0)
await app.next_update_async()
await app.next_update_async()
# The "Hierarchy Changed" event is not expected for attribute change.
self.assertEqual(og.Controller.get(outputs_count_attr), expected_hierarchy_changed_event_count)
###########################################################
# Modify material binding.
rel = cube.CreateRelationship("material:binding", False)
rel.SetTargets([material_path])
await app.next_update_async()
await app.next_update_async()
# The "Hierarchy Changed" event is not expected for relationship change.
self.assertEqual(og.Controller.get(outputs_count_attr), expected_hierarchy_changed_event_count)
###########################################################
# Change the tracked event
og.Controller.set(controller.attribute("inputs:eventName", on_stage_node), "Saved")
await og.Controller.evaluate()
omni.kit.commands.execute("MovePrim", path_from=cube_path_lowercase, path_to=cube_path)
await app.next_update_async()
await app.next_update_async()
# verify hierarchy changed event doesn't fire
self.assertEqual(og.Controller.get(outputs_count_attr), expected_hierarchy_changed_event_count)
###########################################################
# Change it back, verify it does fire when hierarchy changes again
og.Controller.set(controller.attribute("inputs:eventName", on_stage_node), "Hierarchy Changed")
await og.Controller.evaluate()
# Remove cube
omni.kit.commands.execute("DeletePrims", paths=[cube_path])
await app.next_update_async()
await app.next_update_async()
expected_hierarchy_changed_event_count += 1
self.assertEqual(og.Controller.get(outputs_count_attr), expected_hierarchy_changed_event_count)
| 12,775 |
Python
| 42.016835 | 117 | 0.599687 |
omniverse-code/kit/exts/omni.graph.action/omni/graph/action/tests/test_evaluation.py
|
"""Action Graph Evaluation Tests"""
import asyncio
import carb.events
import omni.client
import omni.graph.core as og
import omni.graph.core.tests as ogts
import omni.kit.test
import omni.usd
# ======================================================================
class TestActionGraphEvaluation(ogts.OmniGraphTestCase):
"""Tests action graph evaluator functionality"""
TEST_GRAPH_PATH = "/World/TestGraph"
async def setUp(self):
"""Set up test environment, to be torn down when done"""
await super().setUp()
og.Controller.edit({"graph_path": self.TEST_GRAPH_PATH, "evaluator_name": "execution"})
# ----------------------------------------------------------------------
async def test_exec_fan_out(self):
"""Test that fanning out from an exec port works"""
controller = og.Controller()
keys = og.Controller.Keys
(graph, nodes, _, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("FF1", "omni.graph.action.FlipFlop"),
("FF2", "omni.graph.action.FlipFlop"),
("FF11", "omni.graph.action.FlipFlop"),
("FF12", "omni.graph.action.FlipFlop"),
],
keys.SET_VALUES: [
("OnTick.inputs:onlyPlayback", False),
],
keys.CONNECT: [
("OnTick.outputs:tick", "FF1.inputs:execIn"),
("OnTick.outputs:tick", "FF2.inputs:execIn"),
("FF1.outputs:a", "FF11.inputs:execIn"),
("FF1.outputs:a", "FF12.inputs:execIn"),
],
},
)
# 1. OnTick triggers FF1 which triggers FF11 and FF12, then FF2
# 2. OnTick triggers FF1 and FF2
# 3. OnTick triggers FF1 which triggers FF11 and FF12, then FF2
await controller.evaluate(graph)
flip_flops = nodes[1:]
ff_state = [og.Controller.get(controller.attribute("outputs:isA", node)) for node in flip_flops]
self.assertEqual(ff_state, [True, True, True, True])
await controller.evaluate(graph)
ff_state = [og.Controller.get(controller.attribute("outputs:isA", node)) for node in flip_flops]
self.assertEqual(ff_state, [False, False, True, True])
await controller.evaluate(graph)
ff_state = [og.Controller.get(controller.attribute("outputs:isA", node)) for node in flip_flops]
self.assertEqual(ff_state, [True, True, False, False])
# ----------------------------------------------------------------------
async def test_chained_stateful_nodes(self):
"""Test that chaining loop nodes works"""
controller = og.Controller()
keys = og.Controller.Keys
(graph, (_, _, _, counter_node), _, _,) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("ForLoop1", "omni.graph.action.ForLoop"),
("ForLoop2", "omni.graph.action.ForLoop"),
("Counter", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("OnTick.outputs:tick", "ForLoop1.inputs:execIn"),
("ForLoop1.outputs:loopBody", "ForLoop2.inputs:execIn"),
("ForLoop2.outputs:loopBody", "Counter.inputs:execIn"),
],
keys.SET_VALUES: [
("OnTick.inputs:onlyPlayback", False),
("ForLoop1.inputs:stop", 5),
("ForLoop2.inputs:stop", 5),
],
},
)
await controller.evaluate(graph)
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", counter_node)), 5 * 5)
# ----------------------------------------------------------------------
async def test_async_nodes(self):
"""Test asynchronous action nodes"""
# Check that a nested loop state is maintained when executing a latent delay
#
# +---------+ +----------+ +----------+ +-------+ +--------+
# | IMPULSE +-->| FOR-LOOP +--->| FOR-LOOP +--->| DELAY +--->| COUNTER|
# +---------+ +----------+ +----------+ +-------+ +--------+
#
controller = og.Controller()
keys = og.Controller.Keys
(graph, (_, _, _, _, counter_node, _, _), _, _,) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnImpulse", "omni.graph.action.OnImpulseEvent"),
("ForLoop1", "omni.graph.action.ForLoop"),
("ForLoop2", "omni.graph.action.ForLoop"),
("Delay", "omni.graph.action.Delay"),
("Counter", "omni.graph.action.Counter"),
("OnTick", "omni.graph.action.OnTick"),
("Counter2", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("OnImpulse.outputs:execOut", "ForLoop1.inputs:execIn"),
("ForLoop1.outputs:loopBody", "ForLoop2.inputs:execIn"),
("ForLoop2.outputs:loopBody", "Delay.inputs:execIn"),
("Delay.outputs:finished", "Counter.inputs:execIn"),
("OnTick.outputs:tick", "Counter2.inputs:execIn"),
],
keys.SET_VALUES: [
("OnTick.inputs:onlyPlayback", False),
("OnImpulse.inputs:onlyPlayback", False),
("Delay.inputs:duration", 0.1),
("ForLoop1.inputs:stop", 2),
("ForLoop2.inputs:stop", 5),
],
},
)
await controller.evaluate(graph)
# trigger graph once
controller.edit(self.TEST_GRAPH_PATH, {keys.SET_VALUES: ("OnImpulse.state:enableImpulse", True)})
await controller.evaluate(graph)
# in delay now, no count
counter_controller = og.Controller(og.Controller.attribute("outputs:count", counter_node))
self.assertEqual(counter_controller.get(), 0)
# wait to ensure the first 5 delays compute
for _ in range(5):
await asyncio.sleep(0.2)
await controller.evaluate(graph)
count_val = counter_controller.get()
self.assertGreater(count_val, 4)
# wait and verify the remainder go through
for _ in range(5):
await asyncio.sleep(0.1)
await controller.evaluate(graph)
self.assertEqual(counter_controller.get(), 10)
# ----------------------------------------------------------------------
async def test_stateful_flowcontrol_evaluation(self):
"""Test that stateful flow control nodes are fully evaluated"""
# b
# +----------+ +---------+
# +--->| Sequence +-->|Counter1 |
# | +----------+ +---------+
# +-----------+ |
# | OnImpulse +-+
# +-----------+ |
# | +----------+ +----------+
# +--->| ForLoop1 +-->| Counter2 |
# +----------+ +----------+
# finished
controller = og.Controller()
keys = og.Controller.Keys
(graph, (_, _, counter1_node, _, counter2_node), _, _,) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnImpulse", "omni.graph.action.OnImpulseEvent"),
("Sequence", "omni.graph.action.Sequence"),
("Counter1", "omni.graph.action.Counter"),
("ForLoop1", "omni.graph.action.ForLoop"),
("Counter2", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("OnImpulse.outputs:execOut", "Sequence.inputs:execIn"),
("Sequence.outputs:b", "Counter1.inputs:execIn"),
("OnImpulse.outputs:execOut", "ForLoop1.inputs:execIn"),
("ForLoop1.outputs:finished", "Counter2.inputs:execIn"),
],
keys.SET_VALUES: [("OnImpulse.inputs:onlyPlayback", False), ("ForLoop1.inputs:stop", 10)],
},
)
await controller.evaluate(graph)
# trigger graph once
controller.edit(self.TEST_GRAPH_PATH, {keys.SET_VALUES: ("OnImpulse.state:enableImpulse", True)})
await controller.evaluate(graph)
# verify that counter was called in spite of sequence 'a' being disconnected
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", counter1_node)), 1)
# verify that counter was called in spite of there being no loopBody - execution evaluator has to still trigger
# the loop 11 times despite there being no downstream connection
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", counter2_node)), 1)
# ----------------------------------------------------------------------
async def test_request_driven_node(self):
"""Test that RequestDriven nodes are computed as expected"""
controller = og.Controller()
keys = og.Controller.Keys
(graph, (_, counter_node), _, _,) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnImpulse", "omni.graph.action.OnImpulseEvent"),
("Counter", "omni.graph.action.Counter"),
],
keys.SET_VALUES: [("OnImpulse.inputs:onlyPlayback", False)],
keys.CONNECT: ("OnImpulse.outputs:execOut", "Counter.inputs:execIn"),
},
)
# After several updates, there should have been no compute calls
await controller.evaluate(graph)
await controller.evaluate(graph)
await controller.evaluate(graph)
counter_controller = og.Controller(og.Controller.attribute("outputs:count", counter_node))
self.assertEqual(counter_controller.get(), 0)
# change OnImpulse state attrib. The node should now request compute
controller.edit(self.TEST_GRAPH_PATH, {keys.SET_VALUES: ("OnImpulse.state:enableImpulse", True)})
await controller.evaluate(graph)
self.assertEqual(counter_controller.get(), 1)
# more updates should not result in more computes
await controller.evaluate(graph)
await controller.evaluate(graph)
await controller.evaluate(graph)
self.assertEqual(counter_controller.get(), 1)
# ----------------------------------------------------------------------
async def test_fan_in_exec(self):
"""Test that execution fan-in is handled correctly."""
# The evaluator has to consider the case that gate.enter will have contradicting upstream values.
# Gate needs to know which input is active, it needs the value of enter to be ENABLED when it
# is triggered by OnTick, even though OnTickDisabled has set it's output to the same attrib as DISABLED.
#
# +--------------+
# |OnImpulseEvent+---+ +-----------+
# +--------------+ | |Gate |
# +--->toggle |
# +--------------+ | |
# |OnTick +------>|enter |
# +--------------+ +^-----exit-+
# |
# +--------------+ |
# |OnTickDisabled+--------+
# +--------------+
controller = og.Controller()
keys = og.Controller.Keys
(graph, (_, _, gate, on_impulse_event), _, _,) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("OnTickDisabled", "omni.graph.action.OnTick"),
("Gate", "omni.graph.action.Gate"),
("OnImpulseEvent", "omni.graph.action.OnImpulseEvent"),
],
keys.CONNECT: [
("OnTick.outputs:tick", "Gate.inputs:enter"),
("OnTickDisabled.outputs:tick", "Gate.inputs:enter"),
("OnImpulseEvent.outputs:execOut", "Gate.inputs:toggle"),
],
keys.SET_VALUES: [
("Gate.inputs:startClosed", True),
("OnTick.inputs:onlyPlayback", False),
("OnImpulseEvent.inputs:onlyPlayback", False),
],
},
)
await controller.evaluate(graph)
gate_exit = controller.attribute("outputs:exit", gate)
# Verify the Gate has not triggered
self.assertFalse(gate_exit.get())
await controller.evaluate(graph)
self.assertFalse(gate_exit.get())
# toggle the gate and verify that the tick goes through. The first evaluate it isn't known if the Gate will
# trigger because the order that entry points are executed is not defined... FIXME
controller.attribute("state:enableImpulse", on_impulse_event).set(True)
await controller.evaluate(graph)
await controller.evaluate(graph)
self.assertTrue(gate_exit.get())
# ----------------------------------------------------------------------
async def test_fan_out_exec(self):
"""Test that execution fan-out is handled correctly."""
# We want to reset the execution attribute states before the node compute() to avoid bugs
# that arise when authors forget to fully specify the output states. However we can't
# do this in the middle of traversal, because fan-out from a connection requires that the state
# be preserved for every downstream node which may read from it (like Gate).
controller = og.Controller()
keys = og.Controller.Keys
(graph, (_, gate, _), _, _,) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("Gate", "omni.graph.action.Gate"),
("Counter", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("OnTick.outputs:tick", "Counter.inputs:execIn"),
("OnTick.outputs:tick", "Gate.inputs:enter"),
],
keys.SET_VALUES: [
("Gate.inputs:startClosed", False),
("OnTick.inputs:onlyPlayback", False),
],
},
)
await controller.evaluate(graph)
gate_exit = controller.attribute("outputs:exit", gate)
# Verify the Gate has triggered
self.assertTrue(gate_exit.get())
await controller.evaluate(graph)
self.assertTrue(gate_exit.get())
# ----------------------------------------------------------------------
async def test_onclosing(self):
"""Test OnClosing node"""
# Test OnClosing is tricky because OG is being destroyed when it happens -
# so test by sending a custom event when the network is triggered
# and then checking if we got that event
def registered_event_name(event_name):
"""Returns the internal name used for the given custom event name"""
n = "omni.graph.action." + event_name
return carb.events.type_from_string(n)
got_event = [False]
def on_event(_):
got_event[0] = True
reg_event_name = registered_event_name("foo")
message_bus = omni.kit.app.get_app().get_message_bus_event_stream()
sub = message_bus.create_subscription_to_push_by_type(reg_event_name, on_event)
self.assertIsNotNone(sub)
controller = og.Controller()
keys = og.Controller.Keys
controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnClosing", "omni.graph.action.OnClosing"),
("Send", "omni.graph.action.SendCustomEvent"),
],
keys.CONNECT: [("OnClosing.outputs:execOut", "Send.inputs:execIn")],
keys.SET_VALUES: [("Send.inputs:eventName", "foo"), ("Send.inputs:path", "Test Path")],
},
)
# evaluate once so that graph is in steady state
await controller.evaluate()
# close the stage
usd_context = omni.usd.get_context()
(result, _) = await usd_context.close_stage_async()
self.assertTrue(result)
# Check our handler was called
self.assertTrue(got_event[0])
async def test_onloaded(self):
"""Test OnLoaded node"""
def registered_event_name(event_name):
"""Returns the internal name used for the given custom event name"""
n = "omni.graph.action." + event_name
return carb.events.type_from_string(n)
events = []
def on_event(e):
events.append(e.payload["!path"])
reg_event_name = registered_event_name("foo")
message_bus = omni.kit.app.get_app().get_message_bus_event_stream()
sub = message_bus.create_subscription_to_push_by_type(reg_event_name, on_event)
self.assertIsNotNone(sub)
controller = og.Controller()
keys = og.Controller.Keys
controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("OnLoaded", "omni.graph.action.OnLoaded"),
("Send1", "omni.graph.action.SendCustomEvent"),
("Send2", "omni.graph.action.SendCustomEvent"),
],
keys.CONNECT: [
("OnLoaded.outputs:execOut", "Send1.inputs:execIn"),
("OnTick.outputs:tick", "Send2.inputs:execIn"),
],
keys.SET_VALUES: [
("OnTick.inputs:onlyPlayback", False),
("Send1.inputs:eventName", "foo"),
("Send2.inputs:eventName", "foo"),
("Send1.inputs:path", "Loaded"),
("Send2.inputs:path", "Tick"),
],
},
)
# evaluate once so that graph is in steady state
await controller.evaluate()
# Verify Loaded came before OnTick
self.assertListEqual(events, ["Loaded", "Tick"])
# ----------------------------------------------------------------------
async def test_active_latent(self):
"""exercise a latent node that executes downstream nodes while latent"""
# +--------+ +----------+finished+-------------+
# | OnTick+-->| TickN +-------->FinishCounter|
# +--------+ | | +-------------+
# | +-+
# +----------+ | +------------+ +------------+ +------------+
# +-----> TickCounter+----->TickCounter2+---->TickCounter3|
# tick +------------+ +------------+ +------------+
controller = og.Controller()
keys = og.Controller.Keys
(graph, nodes, _, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("TickN", "omni.graph.action.TickN"),
("FinishCounter", "omni.graph.action.Counter"),
("TickCounter", "omni.graph.action.Counter"),
("TickCounter2", "omni.graph.action.Counter"),
("TickCounter3", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("OnTick.outputs:tick", "TickN.inputs:execIn"),
("TickN.outputs:finished", "FinishCounter.inputs:execIn"),
("TickN.outputs:tick", "TickCounter.inputs:execIn"),
("TickCounter.outputs:execOut", "TickCounter2.inputs:execIn"),
("TickCounter2.outputs:execOut", "TickCounter3.inputs:execIn"),
],
keys.SET_VALUES: [("TickN.inputs:duration", 3), ("OnTick.inputs:onlyPlayback", False)],
},
)
(_, _, finish_counter, tick_counter, _, tick_counter_3) = nodes
finish_counter_controller = og.Controller(og.Controller.attribute("outputs:count", finish_counter))
tick_counter_controller = og.Controller(og.Controller.attribute("outputs:count", tick_counter))
tick_counter_3_controller = og.Controller(og.Controller.attribute("outputs:count", tick_counter_3))
await controller.evaluate(graph)
self.assertEqual(finish_counter_controller.get(), 0)
await controller.evaluate(graph)
self.assertEqual(tick_counter_controller.get(), 1)
await controller.evaluate(graph)
self.assertEqual(finish_counter_controller.get(), 0)
self.assertEqual(tick_counter_controller.get(), 2)
await controller.evaluate(graph)
self.assertEqual(finish_counter_controller.get(), 0)
self.assertEqual(tick_counter_3_controller.get(), 3)
await controller.evaluate(graph)
self.assertEqual(finish_counter_controller.get(), 1)
self.assertEqual(tick_counter_3_controller.get(), 3)
await controller.evaluate(graph)
self.assertEqual(finish_counter_controller.get(), 1)
self.assertEqual(tick_counter_3_controller.get(), 3)
# ----------------------------------------------------------------------
async def test_latent_chain(self):
"""exercise a chain of latent nodes"""
# +---------+ +-------+ tick +-------+ tick +-------+
# |OnImpulse+-->TickA +-------->TickB +-------->|LatentC|
# +---------+ +-----+-+ +------++ +-------+-----+
# | finish | finish |
# finish | +-------------+ | +-------------+ +-v----------+
# +->TickCounterA | +-->| TickCounterB| |TickCounterC|
# +-------------+ +-------------+ +------------+
controller = og.Controller()
keys = og.Controller.Keys
(graph, nodes, _, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnImpulse", "omni.graph.action.OnImpulseEvent"),
("TickA", "omni.graph.action.TickN"),
("TickB", "omni.graph.action.TickN"),
("LatentC", "omni.graph.action.TickN"),
("TickCounterA", "omni.graph.action.Counter"),
("TickCounterB", "omni.graph.action.Counter"),
("TickCounterC", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("OnImpulse.outputs:execOut", "TickA.inputs:execIn"),
("TickA.outputs:tick", "TickB.inputs:execIn"),
("TickA.outputs:finished", "TickCounterA.inputs:execIn"),
("TickB.outputs:tick", "LatentC.inputs:execIn"),
("TickB.outputs:finished", "TickCounterB.inputs:execIn"),
("LatentC.outputs:finished", "TickCounterC.inputs:execIn"),
],
keys.SET_VALUES: [
("TickA.inputs:duration", 2),
("TickB.inputs:duration", 2),
("LatentC.inputs:duration", 2),
("OnImpulse.inputs:onlyPlayback", False),
("OnImpulse.state:enableImpulse", True),
],
},
)
(_, _, _, _, tick_counter_a, tick_counter_b, tick_counter_c) = nodes
for _ in range(16):
await controller.evaluate(graph)
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", tick_counter_a)), 1)
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", tick_counter_b)), 2)
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", tick_counter_c)), 4)
# ----------------------------------------------------------------------
async def test_latent_and_push(self):
"""exercise latent nodes in combination with stateful loop node"""
#
# +---------+ +-------+ tick +--------+ loopBody +-------+ +------------+
# |OnImpulse+-->|TickA +----------->ForLoop1++--------->|TickB +-+->|TickCounterB|
# +---------+ +----+--+ +--------++ +-------+ | +------------+
# | finish | |
# | | |
# | +--------------+ +v----------------+ +-v------------+
# +----->|FinishCounterA| |FinishLoopCounter| |FinishCounterB|
# +--------------+ +-----------------+ +--------------+
controller = og.Controller()
keys = og.Controller.Keys
(graph, nodes, _, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnImpulse", "omni.graph.action.OnImpulseEvent"),
("TickA", "omni.graph.action.TickN"),
("ForLoop1", "omni.graph.action.ForLoop"),
("TickB", "omni.graph.action.TickN"),
("FinishLoopCounter", "omni.graph.action.Counter"),
("FinishCounterA", "omni.graph.action.Counter"),
("FinishCounterB", "omni.graph.action.Counter"),
("TickCounterB", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("OnImpulse.outputs:execOut", "TickA.inputs:execIn"),
("TickA.outputs:tick", "ForLoop1.inputs:execIn"),
("TickA.outputs:finished", "FinishCounterA.inputs:execIn"),
("ForLoop1.outputs:loopBody", "TickB.inputs:execIn"),
("ForLoop1.outputs:finished", "FinishLoopCounter.inputs:execIn"),
("TickB.outputs:tick", "TickCounterB.inputs:execIn"),
("TickB.outputs:finished", "FinishCounterB.inputs:execIn"),
],
keys.SET_VALUES: [
("ForLoop1.inputs:start", 0),
("ForLoop1.inputs:stop", 3),
("TickA.inputs:duration", 2),
("TickB.inputs:duration", 2),
("OnImpulse.state:enableImpulse", True),
("OnImpulse.inputs:onlyPlayback", False),
],
},
)
(_, _, _, _, finish_loop_counter, finish_counter_a, finish_counter_b, tick_counter_b) = nodes
for _ in range(20):
await controller.evaluate(graph)
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", finish_counter_a)), 1)
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", tick_counter_b)), 12)
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", finish_counter_b)), 6)
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", finish_loop_counter)), 2)
# ----------------------------------------------------------------------
async def test_latent_fan_out(self):
"""Test latent nodes when part of parallel evaluation"""
# +------------+
# +---->|TickCounterA|
# | +------------+
# |
# +--------++ +----------+
# +-> TickA +--->|FinishedA |
# | +---------+ +----------+
# +---------+ +-----------+ |
# |OnImpulse+-->|TickCounter+-+
# +---------+ +-----------+ |
# | +---------+ +----------+
# +>| TickB +--->|FinishedB |
# +--------++ +----------+
# |
# | +------------+
# +---->|TickCounterB|
# +------------+
controller = og.Controller()
keys = og.Controller.Keys
(graph, nodes, _, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnImpulse", "omni.graph.action.OnImpulseEvent"),
("TickA", "omni.graph.action.TickN"),
("TickB", "omni.graph.action.TickN"),
("TickCounter", "omni.graph.action.Counter"),
("TickCounterA", "omni.graph.action.Counter"),
("TickCounterB", "omni.graph.action.Counter"),
("FinishCounterA", "omni.graph.action.Counter"),
("FinishCounterB", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("OnImpulse.outputs:execOut", "TickCounter.inputs:execIn"),
("TickCounter.outputs:execOut", "TickA.inputs:execIn"),
("TickCounter.outputs:execOut", "TickB.inputs:execIn"),
("TickA.outputs:tick", "TickCounterA.inputs:execIn"),
("TickB.outputs:tick", "TickCounterB.inputs:execIn"),
("TickA.outputs:finished", "FinishCounterA.inputs:execIn"),
("TickB.outputs:finished", "FinishCounterB.inputs:execIn"),
],
keys.SET_VALUES: [
("TickA.inputs:duration", 2),
("TickB.inputs:duration", 2),
("OnImpulse.inputs:onlyPlayback", False),
("OnImpulse.state:enableImpulse", True),
],
},
)
(_, _, _, tick_counter, tick_counter_a, tick_counter_b, finish_counter_a, finish_counter_b) = nodes
def check_counts(c, a, b, f_a, f_b):
for node, expected in (
(tick_counter, c),
(tick_counter_a, a),
(tick_counter_b, b),
(finish_counter_a, f_a),
(finish_counter_b, f_b),
):
count = og.Controller.get(controller.attribute("outputs:count", node))
self.assertEqual(count, expected, node.get_prim_path())
await controller.evaluate(graph)
check_counts(1, 0, 0, 0, 0)
await controller.evaluate(graph)
check_counts(1, 1, 1, 0, 0)
await controller.evaluate(graph)
check_counts(1, 2, 2, 0, 0)
await controller.evaluate(graph)
check_counts(1, 2, 2, 1, 1)
# ----------------------------------------------------------------------
async def test_diamond_fan(self):
"""Test latent nodes in parallel fan-out which fan-in to a downstream node"""
# +--------++ +----------+
# +--> TickA +--->|FinishedA |---+
# | +---------+ +----------+ |
# +---------+ +-----------+ | | +------------+
# |OnImpulse+-->|TickCounter+-+ +-->|MergeCounter|
# +---------+ +-----------+ | | +------------+
# | +---------+ +----------+ |
# +-->| TickB +--->|FinishedB |--+
# +--------++ +----------+
# | +---------+
# +-->| TickC |
# +--------++
controller = og.Controller()
keys = og.Controller.Keys
(graph, nodes, _, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnImpulse", "omni.graph.action.OnImpulseEvent"),
("TickCounter", "omni.graph.action.Counter"),
("TickA", "omni.graph.action.TickN"),
("TickB", "omni.graph.action.TickN"),
("TickC", "omni.graph.action.TickN"),
("FinishCounterA", "omni.graph.action.Counter"),
("FinishCounterB", "omni.graph.action.Counter"),
("MergeCounter", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("OnImpulse.outputs:execOut", "TickCounter.inputs:execIn"),
("TickCounter.outputs:execOut", "TickA.inputs:execIn"),
("TickCounter.outputs:execOut", "TickB.inputs:execIn"),
("TickCounter.outputs:execOut", "TickC.inputs:execIn"),
("TickA.outputs:finished", "FinishCounterA.inputs:execIn"),
("TickB.outputs:finished", "FinishCounterB.inputs:execIn"),
("FinishCounterA.outputs:execOut", "MergeCounter.inputs:execIn"),
("FinishCounterB.outputs:execOut", "MergeCounter.inputs:execIn"),
],
keys.SET_VALUES: [
("TickA.inputs:duration", 1),
("TickB.inputs:duration", 1),
("TickC.inputs:duration", 1),
("OnImpulse.inputs:onlyPlayback", False),
("OnImpulse.state:enableImpulse", True),
],
},
)
(_, tick_counter, _, _, tick_c, finish_counter_a, finish_counter_b, merge_counter) = nodes
def check_counts(tc, f_a, f_b, mc, tick_c_count):
for node, expected in (
(tick_counter, tc),
(finish_counter_a, f_a),
(finish_counter_b, f_b),
(merge_counter, mc),
):
count = og.Controller.get(controller.attribute("outputs:count", node))
self.assertEqual(count, expected, node.get_prim_path())
self.assertEqual(tick_c.get_compute_count(), tick_c_count)
self.assertEqual(tick_c.get_compute_count(), 0)
# set up latent tickers
await controller.evaluate(graph)
check_counts(1, 0, 0, 0, 1)
# latent ticks
await controller.evaluate(graph)
check_counts(1, 0, 0, 0, 2)
# both branches complete
await controller.evaluate(graph)
check_counts(1, 1, 1, 2, 3)
# no count changes + no additional computes of tickC
await controller.evaluate(graph)
check_counts(1, 1, 1, 2, 3)
# ----------------------------------------------------------------------
async def test_diamond_latent_fan(self):
"""Test latent nodes in parallel fan-out which fan-in to a latent downstream node"""
# +--------++
# +--> TickA +--+
# | +---------+ |
# +---------+ | | +-------+ +-------+
# |OnImpulse+-->+ +-->|TickD +-+--->|CountF |
# +---------+ | | +-------+ | +-------+
# | +--------+ | +--->+-------+
# +-->| TickB +--+ |TickE |
# | +--------+ +--->+-------+
# | +--------+ |
# +-->| TickC +----------------+
# +--------+
# Note that when TickA triggers TickD into latent state, then TickB hits TickD subsequently. This subsequent
# evaluation is _transient_. Meaning that TickB will not block on a new copy of TickD.
# This is because there is only one TickD so there can be only one state (latent or not).
controller = og.Controller()
keys = og.Controller.Keys
(graph, nodes, _, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnImpulse", "omni.graph.action.OnImpulseEvent"),
("TickA", "omni.graph.action.TickN"),
("TickB", "omni.graph.action.TickN"),
("TickC", "omni.graph.action.TickN"),
("TickD", "omni.graph.action.TickN"),
("TickE", "omni.graph.action.TickN"),
("CountF", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("OnImpulse.outputs:execOut", "TickA.inputs:execIn"),
("OnImpulse.outputs:execOut", "TickB.inputs:execIn"),
("OnImpulse.outputs:execOut", "TickC.inputs:execIn"),
("TickA.outputs:finished", "TickD.inputs:execIn"),
("TickB.outputs:finished", "TickD.inputs:execIn"),
("TickC.outputs:finished", "TickE.inputs:execIn"),
("TickD.outputs:finished", "TickE.inputs:execIn"),
("TickD.outputs:finished", "CountF.inputs:execIn"),
],
keys.SET_VALUES: [
("TickA.inputs:duration", 1),
("TickB.inputs:duration", 1),
("TickC.inputs:duration", 2),
("TickD.inputs:duration", 1),
("TickE.inputs:duration", 1),
("OnImpulse.inputs:onlyPlayback", False),
("OnImpulse.state:enableImpulse", True),
],
},
)
(_, tick_a, tick_b, tick_c, tick_d, tick_e, count_f) = nodes
def check_counts(i, ta, tb, tc, td, te):
# print(f"{[node.get_compute_count() for node, expected in ((tick_a, ta), (tick_b, tb), (tick_c, tc), (tick_d, td), (tick_e, te))]}")
for node, expected in ((tick_a, ta), (tick_b, tb), (tick_c, tc), (tick_d, td), (tick_e, te)):
self.assertEqual(node.get_compute_count(), expected, f"Check {i} for {node.get_prim_path()}")
# A, B, C, D, E
compute_counts = [
(1, 1, 1, 0, 0), # 0. fan out to trigger A, B, C into latent state
(2, 2, 2, 0, 0), # 1. A, B, C tick
(3, 3, 3, 2, 0), # 2. A, B end latent, D into latent via A or B, D ticks via A or B, C ticks
(3, 3, 4, 3, 2), # 3.
(3, 3, 4, 3, 3), # 4.
(3, 3, 4, 3, 3), # 5.
(3, 3, 4, 3, 3), # 6.
]
for i, cc in enumerate(compute_counts):
await controller.evaluate(graph)
check_counts(i, *cc)
# Verify that CountF has computed 1x due to the fan-in at TickD NOT acting like separate threads
self.assertEqual(count_f.get_compute_count(), 1)
async def test_om_63924(self):
"""Test OM-63924 bug is fixed"""
# The problem here was that if there was fan in to a node which was
# computed once and then totally unwound before the other history was
# processed, there would never be a deferred activation and so the 2nd
# compute would never happen. Instead we want to only unwind one history
# at a time to ensure each one is fully evaluated.
i = 2
class OnForEachEventPy:
@staticmethod
def compute(context: og.GraphContext, node: og.Node):
nonlocal i
go = node.get_attribute("inputs:go")
go_val = og.Controller.get(go)
if not go_val:
return True
if i > 0:
og.Controller.set(
node.get_attribute("outputs:execOut"), og.ExecutionAttributeState.ENABLED_AND_PUSH
)
og.Controller.set(node.get_attribute("outputs:syncValue"), i)
i -= 1
return True
@staticmethod
def get_node_type() -> str:
return "omni.graph.test.OnForEachEventPy"
@staticmethod
def initialize_type(node_type: og.NodeType):
node_type.add_input(
"inputs:go",
"bool",
False,
)
node_type.add_output("outputs:execOut", "execution", True)
node_type.add_output("outputs:syncValue", "uint64", True)
return True
og.register_node_type(OnForEachEventPy, 1)
class NoOpPy:
@staticmethod
def compute(context: og.GraphContext, node: og.Node):
og.Controller.set(node.get_attribute("outputs:execOut"), og.ExecutionAttributeState.ENABLED)
return True
@staticmethod
def get_node_type() -> str:
return "omni.graph.test.NoOpPy"
@staticmethod
def initialize_type(node_type: og.NodeType):
node_type.add_input(
"inputs:execIn",
"execution",
True,
)
node_type.add_output("outputs:execOut", "execution", True)
return True
og.register_node_type(NoOpPy, 1)
controller = og.Controller()
keys = og.Controller.Keys
(graph, (for_each, _, _, _, _, no_op_2), _, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("PostProcessDispatcher", "omni.graph.test.OnForEachEventPy"),
("TSA1", "omni.graph.action.SyncGate"),
("TSA0", "omni.graph.action.SyncGate"),
("TestSyncAccum", "omni.graph.action.SyncGate"),
("TestPrimBbox", "omni.graph.test.NoOpPy"),
("NoOpPy2", "omni.graph.test.NoOpPy"),
],
keys.CONNECT: [
("PostProcessDispatcher.outputs:execOut", "TSA0.inputs:execIn"),
("PostProcessDispatcher.outputs:execOut", "TSA1.inputs:execIn"),
("TSA1.outputs:execOut", "TestSyncAccum.inputs:execIn"),
("TSA0.outputs:execOut", "TestPrimBbox.inputs:execIn"),
("TestPrimBbox.outputs:execOut", "TestSyncAccum.inputs:execIn"),
("TestSyncAccum.outputs:execOut", "NoOpPy2.inputs:execIn"),
("PostProcessDispatcher.outputs:syncValue", "TSA1.inputs:syncValue"),
("PostProcessDispatcher.outputs:syncValue", "TSA0.inputs:syncValue"),
("PostProcessDispatcher.outputs:syncValue", "TestSyncAccum.inputs:syncValue"),
],
},
)
og.Controller.set(controller.attribute("inputs:go", for_each), True)
await controller.evaluate(graph)
# Verify the final sync gate triggered due to being computed 2x
exec_out = og.Controller.get(controller.attribute("outputs:execOut", no_op_2))
self.assertEqual(exec_out, og.ExecutionAttributeState.ENABLED)
| 44,048 |
Python
| 46.364516 | 145 | 0.4706 |
omniverse-code/kit/exts/omni.graph.action/docs/ui_nodes.md
|
(ogn_ui_nodes)=
# UI Nodes
You may have seen the `omni.ui` extension that gives you the ability to create user interface elements through Python scripting. OmniGraph provides some nodes that can be used to do the same thing through an action graph.
These nodes provide an interface to the equivalent `omni.ui` script elements. The attributes of the nodes match the parameters you would pass to the script.
| Node | omni.ui Equivalent |
| --------------------------------------------------------------------------------------------------- | -------------------------------------------------------------- |
| {ref}`Button<GENERATED - Documentation _ognomni.graph.ui.Button>` | {py:class}`omni.ui.Button` |
| {ref}`ComboBox<GENERATED - Documentation _ognomni.graph.ui.ComboBox>` | {py:class}`omni.ui.ComboBox` |
| {ref}`OnWidgetClicked<GENERATED - Documentation _ognomni.graph.ui.OnWidgetClicked>` | {py:class}`omni.ui.Widget.call_mouse_pressed_fn` |
| {ref}`OnWidgetValueChanged<GENERATED - Documentation _ognomni.graph.ui.OnWidgetValueChanged>` | {py:class}`omni.ui.Widget.add_value_changed_fn` |
| {ref}`Slider<GENERATED - Documentation _ognomni.graph.ui.Slider>` | {py:class}`omni.ui.IntSlider` {py:class}`omni.ui.FloatSlider` |
| {ref}`VStack<GENERATED - Documentation _ognomni.graph.ui.VStack>` | {py:class}`omni.ui.VStack` |
## How To Use Them
The UI nodes are meant to be triggered to exist temporarily, meaning you want to ensure that they are part of a graph that creates them only once and then destroys them once their utility has ended.
| 2,019 |
Markdown
| 86.826083 | 221 | 0.520059 |
omniverse-code/kit/exts/omni.graph.action/docs/CHANGELOG.md
|
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
## [1.31.1] - 2023-02-10
### Fixed
- Add tests for AG fan-out with shared dependencies
## [1.31.0] - 2022-09-01
### Added
- SwitchToken, OnLoaded
## [1.30.2] - 2022-08-30
### Fixed
- Linting errors
## [1.30.1] - 2022-08-09
### Fixed
- Applied formatting to all of the Python files
## [1.30.0] - 2022-07-29
### Changed
- Add prim input to OnObjectChanged node
## [1.29.0] - 2022-07-29
### Added
- Support for relative image urls in UI widget nodes
## [1.28.1] - 2022-07-28
### Fixed
- Spurious error messages about 'Node compute request is ignored because XXX is not request-driven'
## [1.28.0] - 2022-07-26
### Added
- Placer node.
- Added ReadWidgetProperty node
## [1.27.0] - 2022-07-26
### Changed
- Removed internal Placers from the widget nodes.
## [1.26.1] - 2022-07-25
### Fixed
- Various UI nodes were rejecting valid exec inputs like ENABLED_AND_PUSH
## [1.26.0] - 2022-07-22
### Added
- 'style' input attributes for Button, Spacer and Stack nodes.
### Fixed
- WriteWidgetStyle was failing on styles containing hex values (e.g. for colors)
## [1.25.0] - 2022-07-20
### Added
- WriteWidgetStyle node
## [1.24.1] - 2022-07-21
### Changed
- Undo revert
## [1.24.0] - 2022-07-18
### Added
- Spacer node
### Changed
- VStack node:
- Removed all execution inputs except for create.
- Added support for OgnWriteWidgetProperty.
- inputs:parentWidgetPath is no longer optional.
## [1.23.0] - 2022-07-18
### Changed
- Reverted changes in 1.21.1
## [1.22.0] - 2022-07-15
### Added
- WriteWidgetProperty node
### Changed
- Removed all of Button node's execution inputs except for Create
- Removed Button node's 'iconSize' input.
- Modified Button node to work with WriteWidgetProperty
## [1.21.1] - 2022-07-15
### Changed
- Added test node TickN, modified tests
## [1.21.0] - 2022-07-14
### Changed
- Added +/- icons to Multigate and Sequence
## [1.20.0] - 2022-07-07
### Added
- Test for public API consistency
## [1.19.0] - 2022-07-04
### Changed
- OgnButton requires a parentWidgetPath. It no longer defaults to the current viewport.
- Each OgnButton instance can create multiples buttons. They no longer destroy the previous ones.
- widgetIdentifiers are now unique within GraphContext
## [1.18.1] - 2022-06-20
### Changed
- Optimized MultiSequence
## [1.18.0] - 2022-05-30
### Added
- OnClosing
## [1.17.1] - 2022-05-23
### Changed
- Changed VStack ui name to Stack
## [1.17.0] - 2022-05-24
### Changed
- Converted ForEach node from Python to C++
- Removed OnWidgetDoubleClicked
## [1.16.1] - 2022-05-23
### Changed
- Converted Counter node from Python to C++
## [1.16.0] - 2022-05-21
### Added
- Removed OnGraphInitialize, added Once to replace
## [1.15.1] - 2022-05-20
### Changed
- Added direction input to VStack node to allow objects to stack in width, height & depth directions.
- Button node uses styles to select icons rather than mouse functions.
## [1.15.0] - 2022-05-19
### Added
- OnGraphInitialize - triggers when the graph is created
### Changed
- OnStageEvent - removed non-functional events
## [1.14.1] - 2022-05-19
### Fixed
- Fixed OgnOnWidgetValueChanged output type resolution
## [1.14.0] - 2022-05-17
### Changed
- Added '(BETA)' to the ui_names of the new UI nodes.
## [1.13.0] - 2022-05-16
### Added
- Added Sequence node with dynamic outputs named OgnMultisequence
## [1.12.0] - 2022-05-11
### Added
- Node definitions for UI creation and manipulation
- Documentation on how to use the new UI nodes
- Dependencies on extensions omni.ui_query and omni.kit.window.filepicker(optional)
## [1.11.3] - 2022-04-12
### Fixed
- OnCustomEvent when onlyPlayback=true
- Remove state attrib from PrintText
## [1.11.2] - 2022-04-08
### Added
- Added absoluteSimTime output attribute to the OnTick node
## [1.11.1] - 2022-03-16
### Fixed
- OnStageEvent Animation Stop event when only-on-playback is true
## [1.11.0] - 2022-03-10
### Added
- Removed _outputs::shiftOut_, _outputs::ctrlOut_, _outputs::altOut_ from _OnKeyboardInput_ node.
- Added _inputs::shiftIn_, _inputs::ctrlIn_, _inputs::altIn_ from _OnKeyboardInput_ node.
- Added support for key modifiers to _OnKeyboardInput_ node.
## [1.10.1] - 2022-03-09
### Changed
- Made all input attributes of all event source nodes literalOnly
## [1.10.0] - 2022-02-24
### Added
- added SyncGate node
## [1.9.1] - 2022-02-14
### Fixed
- add additional extension enabled check for omni.graph.ui not enabled error
## [1.9.0] - 2022-02-04
### Added
- added SetPrimRelationship node
## [1.8.0] - 2022-02-04
### Modified
- Several event nodes now have _inputs:onlyPlayback_ attributes to control when they are active. The default is enabled, which means these nodes will only operate what playback is active.
### Fixed
- Category for Counter
## [1.7.0] - 2022-01-27
### Added
- Added SetPrimActive node
## [1.6.0] - 2022-01-27
### Added
- Added OnMouseInput node
### Modified
- Changed OnGamepadInput to use SubscriptionToInputEvents instead
- Changed OnKeyboardInput to use SubscriptionToInputEvents instead
## [1.5.0] - 2022-01-25
### Added
- Added OnGamepadInput node
## [1.4.5] - 2022-01-24
### Fixed
- categories for several nodes
## [1.4.4] - 2022-01-14
### Added
- Added car customizer tutorial
## [1.4.2] - 2022-01-05
### Modified
- Categories added to all nodes
## [1.4.1] - 2021-12-20
### Modified
- _GetLookAtRotation_ moved to _omni.graph.nodes_
## [1.4.0] - 2021-12-10
### Modified
- _OnStageEvent_ handles new stage events
## [1.3.0] - 2021-11-22
### Modified
- _OnKeyboardInput_ to use allowedTokens for input key
- _OnStageEvent_ bugfix to avoid spurious error messages on shutdown
## [1.2.0] - 2021-11-10
### Modified
- _OnKeyboardInput_, _OnCustomEvent_ to use _INode::requestCompute()_
## [1.1.0] - 2021-11-04
### Modified
- _OnImpulseEvent_ to use _INode::requestCompute()_
## [1.0.0] - 2021-05-10
### Initial Version
| 6,104 |
Markdown
| 25.201717 | 187 | 0.693971 |
omniverse-code/kit/exts/omni.graph.action/docs/README.md
|
# OmniGraph Action Graphs
## Introduction to Action Graphs
Provides visual-programming graphs to help designers bring their omniverse creations to life. Action Graphs are
triggered by events and can execute nodes which modify the stage.
## The Action Graph Extension
Contains nodes which work only with Action Graphs. Compute Nodes from other extensions can be used with Action Graphs;
for example omni.graph.nodes.
| 422 |
Markdown
| 34.249997 | 119 | 0.808057 |
omniverse-code/kit/exts/omni.graph.action/docs/index.rst
|
.. _ogn_omni_graph_action:
OmniGraph Action Graph
######################
.. tabularcolumns:: |L|R|
.. csv-table::
:width: 100%
**Extension**: omni.graph.action,**Documentation Generated**: |today|
.. toctree::
:maxdepth: 1
CHANGELOG
This extension is a collection of functionality required for OmniGraph Action Graphs.
.. toctree::
:maxdepth: 2
:caption: Contents
Overview of Action Graphs<Overview>
Hands-on Introduction to Action Graphs<https://docs.omniverse.nvidia.com/prod_extensions/prod_extensions/ext_omnigraph/quickstart.html>
Action Graph Car Customizer Tutorial<https://docs.omniverse.nvidia.com/prod_extensions/prod_extensions/ext_omnigraph/car_customizer.html>
Building UI With Action Graph<ui_nodes>
For more comprehensive examples targeted at explaining the use of OmniGraph features in detail see
:ref:`ogn_user_guide`
.. note::
Action Graphs are in an early development state
| 937 |
reStructuredText
| 25.799999 | 140 | 0.731057 |
omniverse-code/kit/exts/omni.graph.action/docs/Overview.md
|
(ogn_omni_graph_action_overview)=
```{csv-table}
**Extension**: omni.graph.action,**Documentation Generated**: {sub-ref}`today`
```
This extension is a collection of functionality required for OmniGraph Action Graphs.
```{note}
Action Graphs are in an early development state
```
# Action Graph Overview
For a hands-on introduction to OmniGraph Action Graphs see
[Action Graph Quickstart](https://docs.omniverse.nvidia.com/prod_extensions/prod_extensions/ext_omnigraph/quickstart.html)
For more comprehensive and thorough documentation on various OmniGraph features see {ref}`ogn_user_guide`
Action Graphs are comprised of any number of separate chains of nodes, like deformer graphs. However there are important differences which make Action graphs more suited to particular applications.
## Event Sources
Action graphs are *event driven*, which means that each chain of nodes must start with an *Event Source* node. Each event source node can be thought of as an entry point of the graph.
*Event Source* nodes are named with an *On* prefix, never have an *execution* input attribute, and always have at least one output *execution* attribute.
| Event Source Nodes |
| ------------------------------------------------------------------------------------------ |
| {ref}`On Keyboard Input <GENERATED - Documentation _ognomni.graph.action.OnKeyboardInput>` |
| {ref}`On Tick <GENERATED - Documentation _ognomni.graph.action.OnTick>` |
| {ref}`On Playback Tick <GENERATED - Documentation _ognomni.graph.action.OnPlaybackTick>` |
| {ref}`On Impulse Event <GENERATED - Documentation _ognomni.graph.action.OnImpulseEvent>` |
| {ref}`On Object Change <GENERATED - Documentation _ognomni.graph.action.OnObjectChange>` |
| {ref}`On Custom Event <GENERATED - Documentation _ognomni.graph.action.OnCustomEvent>` |
## Execution Attributes
Action graphs make use of *execution*-type attributes.
The *execution* evaluator works by following *execution* connections downstream and computing nodes it encounters until there are no more downstream connections to follow. The entire chain is executed to completion. When there is no downstream node the execution terminates and the next node is popped off the *execution stack*
Note that if there is more than one downstream connection from an *execution* attribute, each path will be followed in an undetermined order. Multiple downstream chains can be executed in a fixed order either by chaining the end of one to the start of the other, or by using the {ref}`Sequence <GENERATED - Documentation _ognomni.graph.action.Sequence>` node.
The value of an *execution* attribute tells the evaluator what the next step should be in the chain. It can be one of:
| Value | Description
| ---------------- | ------------------------------------------------------------------------------------------- |
| DISABLED | Do not continue from this attribute. |
| ENABLED | Continue downstream from this attribute. |
| ENABLED_AND_PUSH | Save the current node on the *execution stack* and continue downstream from this attribute. |
| LATENT_PUSH | Save the current node as it performs some asynchronous operation |
| LATENT_FINISH | Finish the asynchronous operation and continue downstream from this attribute. |
# Flow Control
Many Action graphs will need to do different things depending on some state. In a python script you would use an *if* or *while* loop to accomplish this. Similarly in Action graph there are nodes which provide this branching functionality. Flow control nodes have more than one *execution* output attribute, which is used to branch the evaluation flow.
| Flow Control Nodes |
| --------------------------------------------------------------------------- |
| {ref}`Branch <GENERATED - Documentation _ognomni.graph.action.Branch>` |
| {ref}`ForEach <GENERATED - Documentation _ognomni.graph.action.ForEach>` |
| {ref}`For Loop <GENERATED - Documentation _ognomni.graph.action.ForLoop>` |
| {ref}`Flip Flop <GENERATED - Documentation _ognomni.graph.action.FlipFlop>` |
| {ref}`Gate <GENERATED - Documentation _ognomni.graph.action.Gate>` |
| {ref}`Sequence <GENERATED - Documentation _ognomni.graph.action.Sequence>` |
| {ref}`Delay <GENERATED - Documentation _ognomni.graph.action.Delay>` |
| 4,622 |
Markdown
| 66.985293 | 359 | 0.665729 |
omniverse-code/kit/exts/omni.kit.renderer.cuda_interop/omni/kit/renderer/cuda_interop_test/__init__.py
|
from ._renderer_cuda_interop_test import *
# Cached interface instance pointer
def get_renderer_cuda_interop_test_interface() -> IRendererCudaInteropTest:
if not hasattr(get_renderer_cuda_interop_test_interface, "renderer_cuda_interop_test"):
get_renderer_cuda_interop_test_interface.renderer_cuda_interop_test = acquire_renderer_cuda_interop_test_interface()
return get_renderer_cuda_interop_test_interface.renderer_cuda_interop_test
| 453 |
Python
| 49.444439 | 124 | 0.785872 |
omniverse-code/kit/exts/omni.kit.renderer.cuda_interop/omni/kit/renderer/cuda_interop/__init__.py
|
from ._renderer_cuda_interop import *
# Cached interface instance pointer
def get_renderer_cuda_interop_interface() -> IRendererCudaInterop:
"""Returns cached :class:`omni.kit.renderer.IRendererCudaInterop` interface"""
if not hasattr(get_renderer_cuda_interop_interface, "renderer_cuda_interop"):
get_renderer_cuda_interop_interface.renderer = acquire_renderer_cuda_interop_interface()
return get_renderer_cuda_interop_interface.renderer_cuda_interop
| 475 |
Python
| 42.272723 | 96 | 0.772632 |
omniverse-code/kit/exts/omni.kit.renderer.cuda_interop/omni/kit/renderer/cuda_interop/_renderer_cuda_interop.pyi
|
"""
This module contains bindings to C++ omni::kit::renderer::IRendererCudaInterop interface, core C++ part of Omniverse Kit.
>>> import omni.kit.renderer.cuda_interop
>>> e = omni.kit.renderer.cuda_interop.get_renderer_cuda_interop_interface()
"""
from __future__ import annotations
import omni.kit.renderer.cuda_interop._renderer_cuda_interop
import typing
__all__ = [
"IRendererCudaInterop",
"acquire_renderer_cuda_interop_interface",
"release_renderer_cuda_interop_interface"
]
class IRendererCudaInterop():
pass
def acquire_renderer_cuda_interop_interface(plugin_name: str = None, library_path: str = None) -> IRendererCudaInterop:
pass
def release_renderer_cuda_interop_interface(arg0: IRendererCudaInterop) -> None:
pass
| 788 |
unknown
| 31.874999 | 129 | 0.717005 |
omniverse-code/kit/exts/omni.kit.renderer.cuda_interop/omni/kit/renderer/cuda_interop/tests/__init__.py
|
from .test_renderer_cuda_interop import *
| 42 |
Python
| 20.49999 | 41 | 0.785714 |
omniverse-code/kit/exts/omni.kit.renderer.cuda_interop/omni/kit/renderer/cuda_interop/tests/test_renderer_cuda_interop.py
|
import inspect
import pathlib
import carb
import carb.settings
import carb.tokens
import omni.kit.app
import omni.kit.test
import omni.kit.renderer.cuda_interop_test
class RendererCudaInteropTest(omni.kit.test.AsyncTestCase):
async def setUp(self):
self._settings = carb.settings.acquire_settings_interface()
self._app_window_factory = omni.appwindow.acquire_app_window_factory_interface()
self._renderer = omni.kit.renderer.bind.acquire_renderer_interface()
self._renderer_cuda_interop_test = omni.kit.renderer.cuda_interop_test.acquire_renderer_cuda_interop_test_interface()
self._renderer.startup()
self._renderer_cuda_interop_test.startup()
def __test_name(self) -> str:
return f"{self.__module__}.{self.__class__.__name__}.{inspect.stack()[2][3]}"
async def tearDown(self):
self._renderer_cuda_interop_test.shutdown()
self._renderer.shutdown()
self._renderer_cuda_interop_test = None
self._renderer = None
self._app_window_factory = None
self._settings = None
async def test_1_render_cuda_interop_test(self):
app_window = self._app_window_factory.create_window_from_settings()
app_window.startup_with_desc(
title="Renderer test OS window",
width=16,
height=16,
x=omni.appwindow.POSITION_CENTERED,
y=omni.appwindow.POSITION_CENTERED,
decorations=True,
resize=True,
always_on_top=False,
scale_to_monitor=False,
dpi_scale_override=-1.0
)
self._renderer.attach_app_window(app_window)
self._app_window_factory.set_default_window(app_window)
TEST_COLOR = (1, 2, 3, 255)
test_color_unit = tuple(c / 255.0 for c in TEST_COLOR)
self._renderer.set_clear_color(app_window, test_color_unit)
self._renderer_cuda_interop_test.startup_resources_for_app_window(app_window)
self._renderer_cuda_interop_test.setup_simple_comparison_for_app_window(app_window, TEST_COLOR[0], TEST_COLOR[1], TEST_COLOR[2], TEST_COLOR[3])
test_name = self.__test_name()
for _ in range(3):
await omni.kit.app.get_app().next_update_async()
self._renderer_cuda_interop_test.shutdown_resources_for_app_window(app_window)
self._app_window_factory.set_default_window(None)
self._renderer.detach_app_window(app_window)
app_window.shutdown()
app_window = None
| 2,522 |
Python
| 35.042857 | 151 | 0.647898 |
omniverse-code/kit/exts/omni.kit.window.audioplayer/PACKAGE-LICENSES/omni.kit.window.audioplayer-LICENSE.md
|
Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
NVIDIA CORPORATION and its licensors retain all intellectual property
and proprietary rights in and to this software, related documentation
and any modifications thereto. Any use, reproduction, disclosure or
distribution of this software and related documentation without an express
license agreement from NVIDIA CORPORATION is strictly prohibited.
| 412 |
Markdown
| 57.999992 | 74 | 0.839806 |
omniverse-code/kit/exts/omni.kit.window.audioplayer/config/extension.toml
|
[package]
title = "Kit Audio Player Window"
category = "Audio"
version = "1.0.1"
description = "A simple audio player window"
detailedDescription = """This adds a window for playing audio assets.
This also adds an option to the content browser to play audio assets in this
audio player.
"""
preview_image = "data/preview.png"
authors = ["NVIDIA"]
keywords = ["audio", "playback"]
[dependencies]
"omni.audioplayer" = {}
"omni.ui" = {}
"omni.kit.window.content_browser" = { optional=true }
"omni.kit.window.filepicker" = {}
"omni.kit.menu.utils" = {}
[[python.module]]
name = "omni.kit.window.audioplayer"
[[test]]
unreliable = true
args = [
# Use the null device backend so we don't scare devs by playing audio.
"--/audio/deviceBackend=null",
# Needed for UI testing
"--/app/menu/legacy_mode=false",
]
dependencies = [
"omni.kit.mainwindow",
"omni.kit.ui_test",
]
stdoutFailPatterns.exclude = [
"*" # I don't want these but OmniUiTest forces me to use them
]
| 993 |
TOML
| 22.116279 | 76 | 0.682779 |
omniverse-code/kit/exts/omni.kit.window.audioplayer/omni/kit/window/audioplayer/__init__.py
|
from .audio_player_window import *
| 35 |
Python
| 16.999992 | 34 | 0.771429 |
omniverse-code/kit/exts/omni.kit.window.audioplayer/omni/kit/window/audioplayer/audio_player_window.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import carb.settings
import carb.dictionary
import omni.audioplayer
import omni.kit.ui
import omni.ui
import threading
import time
import re
import asyncio
import enum
from typing import Callable
from omni.kit.window.filepicker import FilePickerDialog
PERSISTENT_SETTINGS_PREFIX = "/persistent"
class EndReason(enum.Enum):
# sound finished naturally
FINISHED = 0,
# sound was explicitly stopped
STOPPED = 1,
# seeked to a new location in the sound (causes an end callback)
SEEK = 2,
# the previous sound ended because another one is being played
QUEUED_NEW_SOUND = 3,
class AudioPlayerWindowExtension(omni.ext.IExt):
"""Audio Player Window Extension"""
class FieldModel(omni.ui.AbstractValueModel):
def __init__(self, end_edit_callback):
super(AudioPlayerWindowExtension.FieldModel, self).__init__()
self._end_edit_callback = end_edit_callback
self._value = ""
def get_value_as_string(self):
return self._value
def begin_edit(self):
pass
def set_value(self, value):
self._value = value
self._value_changed()
def end_edit(self):
self._end_edit_callback(self._value)
class SliderModel(omni.ui.AbstractValueModel):
def __init__(self, update_callback, end_edit_callback):
super(AudioPlayerWindowExtension.SliderModel, self).__init__()
self._update_callback = update_callback
self._end_edit_callback = end_edit_callback
self._value = 0
def get_value_as_int(self): # pragma: no cover
return int(self._value)
def get_value_as_float(self): # pragma: no cover
return float(self._value)
def begin_edit(self): # pragma: no cover
pass
def set_value(self, value): # pragma: no cover
self._value = value
self._value_changed()
self._update_callback(self._value)
def end_edit(self): # pragma: no cover
self._end_edit_callback(self._value)
def _on_file_pick(self, dialog: FilePickerDialog, filename: str, dirname: str): # pragma: no cover
path = ""
if dirname:
path = f"{dirname}/{filename}"
elif filename:
path = filename
dialog.hide()
self._file_field.model.set_value(path)
# this has to be called manually because set_value doesn't do it
self._file_field_end_edit(path)
def _choose_file_clicked(self): # pragma: no cover
dialog = FilePickerDialog(
"Select File",
apply_button_label="Select",
click_apply_handler=lambda filename, dirname: self._on_file_pick(dialog, filename, dirname),
)
dialog.show()
def _set_pause_button(self): # pragma: no cover
self._play_button.set_style({"image_url": "resources/glyphs/timeline_pause.svg"})
def _set_play_button(self): # pragma: no cover
self._play_button.set_style({"image_url": "resources/glyphs/timeline_play.svg"})
def _timeline_str(self, time): # pragma: no cover
sec = ":{:02.0f}".format(time % 60)
if time > 60.0 * 60.0:
return "{:1.0f}".format(time // (60 * 60)) + ":{:02.0f}".format((time // 60) % 60) + sec
else:
return "{:1.0f}".format(time // 60) + sec
def _timeline_ticker(self): # pragma: no cover
if not self._playing:
return
time = self._player.get_play_cursor()
self._timeline_cursor_label.text = self._timeline_str(time)
self._timeline_slider.model.set_value(time * self._timeline_slider_scale)
# if the window was closed, stop the player
if not self._window.visible:
self._end_reason = EndReason.STOPPED
self._player.stop_sound()
self._ticker = threading.Timer(0.25, self._timeline_ticker).start()
def _loading_ticker(self):
labels = {0: "Loading", 1: "Loading.", 2: "Loading..", 3: "Loading..."}
if not self._loading:
self._loading_label.text = ""
return
self._loading_label.text = labels[self._loading_counter % 4]
self._loading_counter += 1
self._loading_timer = threading.Timer(0.25, self._loading_ticker).start()
def _play_sound(self, time):
self._loading = True
self._player.play_sound(
self._file_field.model.get_value_as_string(), time
)
def _close_error_window(self): # pragma: no cover
self._error_window.visible = False
def _set_play_cursor(self, time): # pragma: no cover
self._end_reason = EndReason.SEEK
self._player.set_play_cursor(time)
def _file_loaded(self, success): # pragma: no cover
self._loading = False
if not success:
self._playing = False
self._set_play_button()
error_text = "Loading failed"
file_name = self._file_field.model.get_value_as_string()
if re.search("^.*.(m4a|aac)$", file_name):
error_text = (
f"Failed to load file '{file_name}' codec not supported - only Vorbis, FLAC and WAVE are supported"
)
else:
error_text = f"Failed to load file '{file_name}' codec not supported (only Vorbis, FLAC, MP3 and WAVE are supported), file does not exist or the file is corrupted"
self._error_window = omni.ui.Window(
"Audio Player Error", width=400, height=0, flags=omni.ui.WINDOW_FLAGS_NO_DOCKING
)
with self._error_window.frame:
with omni.ui.VStack():
with omni.ui.HStack():
omni.ui.Spacer()
self._error_window_label = omni.ui.Label(
error_text, word_wrap=True, width=380, alignment=omni.ui.Alignment.CENTER
)
omni.ui.Spacer()
with omni.ui.HStack():
omni.ui.Spacer()
self._error_window_ok_button = omni.ui.Button(
width=64, height=32, clicked_fn=self._close_error_window, text="ok"
)
omni.ui.Spacer()
self._waveform_image_provider.set_bytes_data([0, 0, 0, 0], [1, 1])
return
if self._new_file:
width = 2048
height = 64
raw_image = self._player.draw_waveform(width, height, [0.89, 0.54, 0.14, 1.0], [0.0, 0.0, 0.0, 0.0])
self._waveform_image_provider.set_bytes_data(raw_image, [width, height])
self._new_file = False
self._timeline_end_label.text = self._timeline_str(self._player.get_sound_length())
self._sound_length = self._player.get_sound_length()
self._timeline_slider_scale = 1.0 / self._sound_length
# set the timeline ticker going
self._timeline_ticker()
# set this back to default
self._end_reason = EndReason.FINISHED
def _play_finished(self): # pragma: no cover
if self._end_reason != EndReason.SEEK and self._end_reason != EndReason.QUEUED_NEW_SOUND:
self._playing = False
# set the slider to finished
self._timeline_cursor_label.text = self._timeline_str(0)
self._timeline_slider.model.set_value(0.0)
if self._end_reason == EndReason.FINISHED or self._end_reason == EndReason.STOPPED:
self._set_play_button()
if self._end_reason == EndReason.FINISHED and self._settings.get_as_bool(PERSISTENT_SETTINGS_PREFIX + "/audio/context/closeAudioPlayerOnStop"):
self._window.visible = False
def _play_clicked(self): # pragma: no cover
if self._loading:
return
if self._playing:
if self._paused:
self._player.unpause_sound()
self._set_pause_button()
self._paused = False
else:
self._player.pause_sound()
self._set_play_button()
self._paused = True
return
self._playing = True
self._paused = False
self._load_result_label.text = ""
self._loading_ticker()
self._set_pause_button()
self._play_sound(self._timeline_slider.model.get_value_as_float() * self._sound_length)
def _file_field_end_edit(self, value):
self._loading = True
self._new_file = True
self._load_result_label.text = ""
self._loading_ticker()
self._stop_clicked()
self._player.load_sound(self._file_field.model.get_value_as_string())
def _stop_clicked(self): # pragma: no cover
if self._loading:
return
self._end_reason = EndReason.STOPPED
self._player.stop_sound()
self._playing = False
self._paused = False
def _slider_end_edit(self, value): # pragma: no cover
if self._loading:
return
if not self._playing:
return
self._set_play_cursor(value * self._sound_length)
def _slider_changed(self, value): # pragma: no cover
if not self._playing and not self._loading:
self._timeline_cursor_label.text = self._timeline_str(value * self._sound_length)
def open_window(self):
"""
Make the window become visible
Args:
No arguments
Returns:
No return value
"""
self._window.visible = True
def open_window_and_play(self, path): # pragma: no cover
"""
Make the window become visible then begin playing a file
Args:
path: The file to begin playing
Returns:
No return value
"""
self._playing = True
self._loading = True;
self._paused = False
self._new_file = True
self._window.visible = True
self._load_result_label.text = ""
self._loading_ticker()
self._set_pause_button()
self._end_reason = EndReason.QUEUED_NEW_SOUND
self._file_field.model.set_value(path)
self._play_sound(0.0)
def _menu_callback(self, a, b):
self._window.visible = not self._window.visible
def _on_menu_click(self, menu, value): # pragma: no cover
if self._content_window is None:
return
protocol = self._content_window.get_selected_icon_protocol()
path = self._content_window.get_selected_icon_path()
if not path.startswith(protocol):
path = protocol + path
self.open_window_and_play(path)
def _on_menu_check(self, url):
return not not re.search("^.*\\.(wav|wave|ogg|oga|flac|fla|mp3|m4a|spx|opus|adpcm)$", url)
def _on_browser_click(self, menu, value): # pragma: no cover
if self._content_browser is None:
return
# protocol = self._content_browser.get_selected_icon_protocol()
# path = self._content_browser.get_selected_icon_path()
# if not path.startswith(protocol):
# path = protocol + path
self.open_window_and_play(value)
def _on_content_browser_load(self): # pragma: no cover
import omni.kit.window.content_browser
self._content_browser = omni.kit.window.content_browser.get_content_window()
if self._content_browser is not None:
self._content_browser_entry = self._content_browser.add_context_menu(
"Play Audio", "audio_play.svg", self._on_browser_click, self._on_menu_check
)
def _on_content_browser_unload(self): # pragma: no cover
if self._content_browser is not None:
self._content_browser.delete_context_menu("Play Audio")
self._content_browser_entry = None
self._content_browser = None
def _on_player_event(self, event):
if event.type == int(omni.audioplayer.CallbackType.LOADED):
success = event.payload["success"]
self._file_loaded(success)
elif event.type == int(omni.audioplayer.CallbackType.ENDED):
self._play_finished()
else:
print("unrecognized type " + str(event.type))
def on_startup(self):
self._content_browser = None
self._hooks = []
manager = omni.kit.app.get_app().get_extension_manager()
# current content window
self._hooks.append(
manager.subscribe_to_extension_enable(
on_enable_fn=lambda _: self._on_content_browser_load(),
on_disable_fn=lambda _: self._on_content_browser_unload(),
ext_name="omni.kit.window.content_browser",
hook_name="omni.kit.window.audioplayer omni.kit.window.content_browser listener",
)
)
self._loading_counter = 0
self._ticker = None
self._loading = False
self._end_reason = EndReason.FINISHED
self._new_file = True
self._sound_length = 0
self._timeline_slider_scale = 0
self._file = ""
self._playing = False
self._paused = False
self._player = omni.audioplayer.create_audio_player()
self._sub = self._player.get_event_stream().create_subscription_to_pop(self._on_player_event)
self._window = omni.ui.Window("Audio Player", width=600, height=200)
self._settings = carb.settings.get_settings()
self._settings.set_default_bool(PERSISTENT_SETTINGS_PREFIX + "/audio/context/closeAudioPlayerOnStop", False)
with self._window.frame:
with omni.ui.VStack(height=0, spacing=8):
# file dialogue
with omni.ui.HStack():
omni.ui.Button(
width=32,
height=32,
clicked_fn=self._choose_file_clicked,
style={"image_url": "resources/glyphs/folder.svg"},
)
self._file_field_model = AudioPlayerWindowExtension.FieldModel(self._file_field_end_edit)
self._file_field = omni.ui.StringField(self._file_field_model, height=32)
# timeline slider
with omni.ui.HStack(height=64):
self._timeline_cursor_label = omni.ui.Label("0:00", width=25)
omni.ui.Label(" / ", width=10)
self._timeline_end_label = omni.ui.Label("0:00", width=25)
self._timeline_slider_model = AudioPlayerWindowExtension.SliderModel(
self._slider_changed, self._slider_end_edit
)
with omni.ui.ZStack():
self._waveform_image_provider = omni.ui.ByteImageProvider()
self._waveform_image = omni.ui.ImageWithProvider(
self._waveform_image_provider,
width=omni.ui.Percent(100),
height=omni.ui.Percent(100),
fill_policy=omni.ui.IwpFillPolicy.IWP_STRETCH,
)
with omni.ui.VStack():
omni.ui.Spacer()
self._timeline_slider = omni.ui.FloatSlider(
self._timeline_slider_model,
height=0,
style={
"color": 0x00FFFFFF,
"background_color": 0x00000000,
"draw_mode": omni.ui.SliderDrawMode.HANDLE,
"font_size": 22,
},
)
omni.ui.Spacer()
# buttons
with omni.ui.HStack():
with omni.ui.ZStack():
omni.ui.Spacer()
self._load_result_label = omni.ui.Label(
"", alignment=omni.ui.Alignment.CENTER, style={"color": 0xFF0000FF}
)
self._play_button = omni.ui.Button(
width=32,
height=32,
clicked_fn=self._play_clicked,
style={"image_url": "resources/glyphs/timeline_play.svg"},
)
omni.ui.Button(
width=32,
height=32,
clicked_fn=self._stop_clicked,
style={"image_url": "resources/glyphs/timeline_stop.svg"},
)
with omni.ui.ZStack():
omni.ui.Spacer()
self._loading_label = omni.ui.Label("", alignment=omni.ui.Alignment.CENTER)
with omni.ui.HStack(alignment=omni.ui.Alignment.LEFT, width=100):
omni.ui.Label("Close on Stop", alignment=omni.ui.Alignment.LEFT)
omni.ui.Spacer()
self._auto_close_on_stop = omni.ui.CheckBox(alignment=omni.ui.Alignment.LEFT)
omni.ui.Spacer()
self._auto_close_on_stop.model.set_value(
self._settings.get_as_bool(PERSISTENT_SETTINGS_PREFIX + "/audio/context/closeAudioPlayerOnStop")
)
self._dict = carb.dictionary.get_dictionary()
self._auto_close_on_stop.model.add_value_changed_fn(
lambda a, b=self._settings: b.set_bool(
PERSISTENT_SETTINGS_PREFIX + "/audio/context/closeAudioPlayerOnStop", a.get_value_as_bool()
)
)
def on_change(item, event_type): # pragma: no cover
self._auto_close_on_stop.model.set_value(self._dict.get(item))
self._subscription = self._settings.subscribe_to_node_change_events(
PERSISTENT_SETTINGS_PREFIX + "/audio/context/closeAudioPlayerOnStop", on_change
)
# add a callback to open the window
# FIXME: disabled until the bugs are worked out
self._menuEntry = omni.kit.ui.get_editor_menu().add_item("Window/Audio Player", self._menu_callback)
self._window.visible = False
def on_shutdown(self): # pragma: no cover
self._end_reason = EndReason.STOPPED
self._player.stop_sound()
if self._ticker != None:
self._ticker.cancel()
self._settings.unsubscribe_to_change_events(self._subscription)
self._subscription = None
# run the unload function to avoid breaking the extension when it reloads
self._on_content_browser_unload()
# remove the subscription before the player to avoid events with a dead player
self._sub = None
self._player = None
self._window = None
self._menuEntry = None
self._content_window_entry = None
| 19,473 |
Python
| 37.259332 | 179 | 0.559185 |
omniverse-code/kit/exts/omni.kit.window.audioplayer/omni/kit/window/audioplayer/tests/__init__.py
|
from .test_audio_player import * # pragma: no cover
| 53 |
Python
| 25.999987 | 52 | 0.716981 |
omniverse-code/kit/exts/omni.kit.window.audioplayer/omni/kit/window/audioplayer/tests/test_audio_player.py
|
## Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
##
## NVIDIA CORPORATION and its licensors retain all intellectual property
## and proprietary rights in and to this software, related documentation
## and any modifications thereto. Any use, reproduction, disclosure or
## distribution of this software and related documentation without an express
## license agreement from NVIDIA CORPORATION is strictly prohibited.
##
import omni.kit.app
import omni.kit.test
import omni.kit.ui_test
import omni.ui as ui
import omni.usd
import omni.timeline
import carb.tokens
import omni.usd.audio
from omni.ui.tests.test_base import OmniUiTest
import pathlib
import asyncio;
class TestAudioPlayerWindow(OmniUiTest): # pragma: no cover
async def _dock_window(self):
await self.docked_test_window(
window=self._win.window,
width=600,
height=200)
def _dump_ui_tree(self, root):
print("DUMP UI TREE START")
#windows = omni.ui.Workspace.get_windows()
#children = [windows[0].frame]
children = [root.frame]
print(str(dir(root.frame)))
def recurse(children, path=""):
for c in children:
name = path + "/" + type(c).__name__
print(name)
if isinstance(c, omni.ui.ComboBox):
print(str(dir(c)))
recurse(omni.ui.Inspector.get_children(c), name)
recurse(children)
print("DUMP UI TREE END")
async def setUp(self):
await super().setUp()
extension_path = carb.tokens.get_tokens_interface().resolve("${omni.kit.window.audioplayer}")
self._test_path = pathlib.Path(extension_path).joinpath("data").joinpath("tests").absolute()
self._golden_img_dir = self._test_path.joinpath("golden")
# open the dropdown
window_menu = omni.kit.ui_test.get_menubar().find_menu("Window")
self.assertIsNotNone(window_menu)
await window_menu.click()
# click the audioplayer option to open it
player_menu = omni.kit.ui_test.get_menubar().find_menu("Audio Player")
self.assertIsNotNone(player_menu)
await player_menu.click()
self._win = omni.kit.ui_test.find("Audio Player")
self.assertIsNotNone(self._win)
self._file_name_textbox = self._win.find("**/StringField[*]")
self.assertIsNotNone(self._file_name_textbox)
async def _test_just_opened(self):
await self._dock_window()
await self.finalize_test(golden_img_dir=self._golden_img_dir, golden_img_name="test_just_opened.png")
async def _test_load_file(self):
await self._file_name_textbox.click()
await self._file_name_textbox.input(str(self._test_path / "1hz.oga"))
await asyncio.sleep(1.0)
# delete the text in the textbox so we'll have something constant
# for the image comparison
await self._file_name_textbox.double_click()
await omni.kit.ui_test.emulate_keyboard_press(carb.input.KeyboardInput.DEL)
await self._dock_window()
await self.finalize_test(golden_img_dir=self._golden_img_dir, golden_img_name="test_open_file.png")
async def test_all(self):
await self._test_just_opened()
await self._test_load_file()
self._dump_ui_tree(self._win.window)
| 3,359 |
Python
| 34.368421 | 109 | 0.649598 |
omniverse-code/kit/exts/omni.kit.property.geometry/PACKAGE-LICENSES/omni.kit.property.geometry-LICENSE.md
|
Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
NVIDIA CORPORATION and its licensors retain all intellectual property
and proprietary rights in and to this software, related documentation
and any modifications thereto. Any use, reproduction, disclosure or
distribution of this software and related documentation without an express
license agreement from NVIDIA CORPORATION is strictly prohibited.
| 412 |
Markdown
| 57.999992 | 74 | 0.839806 |
omniverse-code/kit/exts/omni.kit.property.geometry/config/extension.toml
|
[package]
# Semantic Versioning is used: https://semver.org/
version = "1.2.2"
category = "Internal"
feature = true
# Lists people or organizations that are considered the "authors" of the package.
authors = ["NVIDIA"]
# The title and description fields are primarly for displaying extension info in UI
title = "Geometry Property Widget"
description="View and Edit Geometry Property Values"
# URL of the extension source repository.
repository = ""
# Preview image. Folder named "data" automatically goes in git lfs (see .gitattributes file).
preview_image = "data/preview.png"
# Icon is shown in Extensions window, it is recommended to be square, of size 256x256.
icon = "data/icon.png"
# Keywords for the extension
keywords = ["kit", "usd", "property", "geometry"]
# Location of change log file in target (final) folder of extension, relative to the root.
# More info on writing changelog: https://keepachangelog.com/en/1.0.0/
changelog="docs/CHANGELOG.md"
# Path (relative to the root) or content of readme markdown file for UI.
readme = "docs/README.md"
[dependencies]
"omni.usd" = {}
"omni.ui" = {}
"omni.kit.window.property" = {}
"omni.kit.property.usd" = {}
[[python.module]]
name = "omni.kit.property.geometry"
[[test]]
args = [
"--/app/window/dpiScaleOverride=1.0",
"--/app/window/scaleToMonitor=false",
"--no-window"
]
dependencies = [
"omni.kit.renderer.capture",
"omni.kit.mainwindow",
"omni.kit.ui_test",
"omni.kit.test_suite.helpers"
]
stdoutFailPatterns.exclude = [
"*Failed to acquire interface*while unloading all plugins*"
]
| 1,591 |
TOML
| 25.09836 | 93 | 0.710245 |
omniverse-code/kit/exts/omni.kit.property.geometry/omni/kit/property/geometry/__init__.py
|
from .scripts import *
| 23 |
Python
| 10.999995 | 22 | 0.73913 |
omniverse-code/kit/exts/omni.kit.property.geometry/omni/kit/property/geometry/scripts/geometry_properties.py
|
import os
import carb
import omni.ext
from functools import partial
from pathlib import Path
from pxr import Sdf, Usd, UsdGeom, UsdUI
from typing import Any, Callable
from omni.kit.property.usd.prim_selection_payload import PrimSelectionPayload
_extension_instance = None
TEST_DATA_PATH = ""
def get_instance():
global _extension_instance
return _extension_instance
class GeometryPropertyExtension(omni.ext.IExt):
def __init__(self):
self._registered = False
self._button_menu_entry = []
self._visual_property_widget = None
super().__init__()
def on_startup(self, ext_id):
global _extension_instance
_extension_instance = self
self._register_widget()
manager = omni.kit.app.get_app().get_extension_manager()
extension_path = manager.get_extension_path(ext_id)
global TEST_DATA_PATH
TEST_DATA_PATH = Path(extension_path).joinpath("data").joinpath("tests")
# +add menu item(s)
from omni.kit.property.usd import PrimPathWidget
context_menu = omni.kit.context_menu.get_instance()
if context_menu is None:
carb.log_error("context_menu is disabled!") # pragma: no cover
return None # pragma: no cover
self._button_menu_entry.append(
PrimPathWidget.add_button_menu_entry(
"Instanceable",
show_fn=context_menu.is_prim_selected,
onclick_fn=self._click_toggle_instanceable,
)
)
self._button_menu_entry.append(
PrimPathWidget.add_button_menu_entry(
"Rendering/Toggle Wireframe Mode",
name_fn=partial(self._get_primvar_state, prim_name="wireframe", text_name=" Wireframe Mode"),
show_fn=partial(context_menu.prim_is_type, type=UsdGeom.Boundable),
onclick_fn=partial(self._click_set_primvar, prim_name="wireframe"),
)
)
self._button_menu_entry.append(
PrimPathWidget.add_button_menu_entry(
"Rendering/Toggle Do Not Cast Shadows",
name_fn=partial(
self._get_primvar_state, prim_name="doNotCastShadows", text_name=" Do Not Cast Shadows"
),
show_fn=partial(context_menu.prim_is_type, type=UsdGeom.Boundable),
onclick_fn=partial(self._click_set_primvar, prim_name="doNotCastShadows"),
)
)
self._button_menu_entry.append(
PrimPathWidget.add_button_menu_entry(
"Rendering/Toggle Enable Shadow Terminator Fix",
name_fn=partial(
self._get_primvar_state, prim_name="enableShadowTerminatorFix", text_name=" Enable Shadow Terminator Fix"
),
show_fn=partial(context_menu.prim_is_type, type=UsdGeom.Boundable),
onclick_fn=partial(self._click_set_primvar, prim_name="enableShadowTerminatorFix"),
)
)
self._button_menu_entry.append(
PrimPathWidget.add_button_menu_entry(
"Rendering/Toggle Enable Fast Refraction Shadow",
name_fn=partial(
self._get_primvar_state, prim_name="enableFastRefractionShadow", text_name=" Enable Fast Refraction Shadow"
),
show_fn=partial(context_menu.prim_is_type, type=UsdGeom.Boundable),
onclick_fn=partial(self._click_set_primvar, prim_name="enableFastRefractionShadow"),
)
)
self._button_menu_entry.append(
PrimPathWidget.add_button_menu_entry(
"Rendering/Toggle Disable RT SSS Transmission",
name_fn=partial(
self._get_primvar_state, prim_name="disableRtSssTransmission", text_name=" Disable RT SSS Transmission"
),
show_fn=partial(context_menu.prim_is_type, type=UsdGeom.Boundable),
onclick_fn=partial(self._click_set_primvar, prim_name="disableRtSssTransmission"),
)
)
self._button_menu_entry.append(
PrimPathWidget.add_button_menu_entry(
"Multimatted ID:",
name_fn=partial(
self._get_primvar_state, prim_name="multimatte_id", text_name=" ID for multimatte"
),
show_fn=partial(context_menu.prim_is_type, type=UsdGeom.Boundable),
onclick_fn=partial(self._click_set_primvar, prim_name="multimatte_id"),
)
)
self._button_menu_entry.append(
PrimPathWidget.add_button_menu_entry(
"Rendering/Toggle Enable Holdout Object",
name_fn=partial(
self._get_primvar_state, prim_name="holdoutObject", text_name=" Enable Holdout Object"
),
show_fn=partial(context_menu.prim_is_type, type=UsdGeom.Boundable),
onclick_fn=partial(self._click_set_primvar, prim_name="holdoutObject"),
)
)
self._button_menu_entry.append(
PrimPathWidget.add_button_menu_entry(
"Rendering/Toggle Invisible To Secondary Rays",
name_fn=partial(
self._get_primvar_state, prim_name="invisibleToSecondaryRays", text_name=" Invisible To Secondary Rays"
),
show_fn=partial(context_menu.prim_is_type, type=UsdGeom.Boundable),
onclick_fn=partial(self._click_set_primvar, prim_name="invisibleToSecondaryRays"),
)
)
self._button_menu_entry.append(
PrimPathWidget.add_button_menu_entry(
"Rendering/Toggle Is Procedural Volume",
name_fn=partial(
self._get_primvar_state, prim_name="isVolume", text_name=" Is Volume"
),
show_fn=partial(context_menu.prim_is_type, type=UsdGeom.Boundable),
onclick_fn=partial(self._click_set_primvar, prim_name="isVolume"),
)
)
self._button_menu_entry.append(
PrimPathWidget.add_button_menu_entry(
"Rendering/Toggle Matte Object",
name_fn=partial(self._get_primvar_state, prim_name="isMatteObject", text_name=" Matte Object"),
show_fn=partial(context_menu.prim_is_type, type=UsdGeom.Boundable),
onclick_fn=partial(self._click_set_primvar, prim_name="isMatteObject"),
)
)
self._button_menu_entry.append(
PrimPathWidget.add_button_menu_entry(
"Rendering/Toggle Hide From Camera",
name_fn=partial(
self._get_primvar_state, prim_name="hideForCamera", text_name=" Hide From Camera"
),
show_fn=[partial(context_menu.prim_is_type, type=UsdGeom.Boundable)],
onclick_fn=partial(self._click_set_primvar, prim_name="hideForCamera"),
)
)
self._button_menu_entry.append(
PrimPathWidget.add_button_menu_entry(
"Rendering/Toggle Is Light",
name_fn=partial(self._get_primvar_state, prim_name="isLight", text_name=" Is Light"),
show_fn=[partial(context_menu.prim_is_type, type=UsdGeom.Boundable)],
onclick_fn=partial(self._click_set_primvar, prim_name="isLight"),
)
)
def on_shutdown(self): # pragma: no cover
if self._registered:
self._unregister_widget()
# release menu item(s)
from omni.kit.property.usd import PrimPathWidget
for item in self._button_menu_entry:
PrimPathWidget.remove_button_menu_entry(item)
global _extension_instance
_extension_instance = None
def register_custom_visual_attribute(self,
attribute_name: str,
display_name: str,
type_name: str,
default_value: Any,
predicate: Callable[[Any], bool] = None):
"""
Add custom attribute with placeholder.
"""
if self._visual_property_widget:
self._visual_property_widget.add_custom_attribute(
attribute_name,
display_name,
type_name,
default_value,
predicate
)
def deregister_custom_visual_attribute(self, attribute_name: str):
if self._visual_property_widget:
self._visual_property_widget.remove_custom_attribute(attribute_name)
def _register_widget(self):
import omni.kit.window.property as p
from .prim_kind_widget import PrimKindWidget
from .prim_geometry_widget import GeometrySchemaAttributesWidget, ImageableSchemaAttributesWidget
w = p.get_window()
if w:
w.register_widget(
"prim",
"geometry",
GeometrySchemaAttributesWidget(
"Geometry",
UsdGeom.Xformable,
[
UsdGeom.BasisCurves,
UsdGeom.Capsule,
UsdGeom.Cone,
UsdGeom.Cube,
UsdGeom.Cylinder,
UsdGeom.HermiteCurves,
UsdGeom.Mesh,
UsdGeom.NurbsCurves,
UsdGeom.NurbsPatch,
UsdGeom.PointInstancer,
UsdGeom.Points,
UsdGeom.Subset,
UsdGeom.Sphere,
UsdGeom.Xform,
UsdGeom.Gprim,
UsdGeom.PointBased,
UsdGeom.Boundable,
UsdGeom.Curves,
UsdGeom.Imageable,
UsdGeom.PointBased,
UsdGeom.Subset,
UsdGeom.ModelAPI,
UsdGeom.MotionAPI,
UsdGeom.PrimvarsAPI,
UsdGeom.XformCommonAPI,
UsdGeom.ModelAPI,
UsdUI.Backdrop,
UsdUI.NodeGraphNodeAPI,
UsdUI.SceneGraphPrimAPI,
],
[
"proceduralMesh:parameterCheck",
"outputs:parameterCheck",
"refinementEnableOverride",
"refinementLevel",
"primvars:doNotCastShadows",
"primvars:enableShadowTerminatorFix",
"primvars:enableFastRefractionShadow",
"primvars:disableRtSssTransmission",
"primvars:holdoutObject",
"primvars:invisibleToSecondaryRays",
"primvars:isMatteObject",
"primvars:isVolume",
"primvars:multimatte_id",
"primvars:numSplits",
"primvars:endcaps",
UsdGeom.Tokens.proxyPrim,
],
[
"primvars:displayColor",
"primvars:displayOpacity",
"doubleSided",
"purpose",
"visibility",
"xformOpOrder",
],
),
)
self._visual_property_widget = ImageableSchemaAttributesWidget(
"Visual",
UsdGeom.Imageable,
[],
["primvars:displayColor", "primvars:displayOpacity", "doubleSided", "singleSided"],
[]
)
w.register_widget(
"prim",
"geometry_imageable",
self._visual_property_widget,
)
w.register_widget("prim", "kind", PrimKindWidget())
self._registered = True
def _unregister_widget(self): # pragma: no cover
import omni.kit.window.property as p
w = p.get_window()
if w:
w.unregister_widget("prim", "geometry")
w.unregister_widget("prim", "geometry_imageable")
w.unregister_widget("prim", "kind")
self._registered = False
def _click_set_primvar(self, payload: PrimSelectionPayload, prim_name: str):
stage = payload.get_stage()
if not stage:
return
omni.kit.commands.execute("TogglePrimVarCommand", prim_path=payload.get_paths(), prim_name=prim_name)
def _get_primvar_state(self, objects: dict, prim_name: str, text_prefix: str = "", text_name: str = "") -> str:
if not "stage" in objects or not "prim_list" in objects or not objects["stage"]:
return None
stage = objects["stage"]
primvar_state = []
for path in objects["prim_list"]:
prim = stage.GetPrimAtPath(path) if isinstance(path, Sdf.Path) else path
if prim:
primvars_api = UsdGeom.PrimvarsAPI(prim)
is_primvar = primvars_api.GetPrimvar(prim_name)
if is_primvar:
primvar_state.append(is_primvar.Get())
else:
primvar_state.append(False)
if primvar_state == [False] * len(primvar_state):
return f"{text_prefix}Set{text_name}"
elif primvar_state == [True] * len(primvar_state):
return f"{text_prefix}Clear{text_name}"
return f"{text_prefix}Toggle{text_name}"
def _click_toggle_instanceable(self, payload: PrimSelectionPayload):
omni.kit.commands.execute("ToggleInstanceableCommand", prim_path=payload.get_paths())
| 14,059 |
Python
| 41.477341 | 127 | 0.532968 |
omniverse-code/kit/exts/omni.kit.property.geometry/omni/kit/property/geometry/scripts/prim_geometry_widget.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import omni.ui as ui
import omni.usd
from dataclasses import dataclass, field
from typing import Any, Callable, OrderedDict, List
from omni.kit.property.usd.usd_property_widget import MultiSchemaPropertiesWidget, UsdPropertyUiEntry
from omni.kit.property.usd.usd_property_widget import create_primspec_bool, create_primspec_int
from omni.kit.property.usd.custom_layout_helper import CustomLayoutFrame, CustomLayoutGroup, CustomLayoutProperty
from pxr import Kind, Sdf, Usd, UsdGeom
class GeometrySchemaAttributesWidget(MultiSchemaPropertiesWidget):
def __init__(self, title: str, schema, schema_subclasses: list, include_list: list = [], exclude_list: list = []):
"""
Constructor.
Args:
title (str): Title of the widgets on the Collapsable Frame.
schema: The USD IsA schema or applied API schema to filter attributes.
schema_subclasses (list): list of subclasses
include_list (list): list of additional schema named to add
exclude_list (list): list of additional schema named to remove
"""
super().__init__(title, schema, schema_subclasses, include_list, exclude_list)
# custom attributes
self.add_custom_schema_attribute("primvars:enableFastRefractionShadow", lambda p: p.IsA(UsdGeom.Gprim), None, "", create_primspec_bool(False))
self.add_custom_schema_attribute("primvars:doNotCastShadows", lambda p: p.IsA(UsdGeom.Gprim), None, "", create_primspec_bool(False))
self.add_custom_schema_attribute("primvars:enableShadowTerminatorFix", lambda p: p.IsA(UsdGeom.Gprim), None, "", create_primspec_bool(True))
self.add_custom_schema_attribute("primvars:holdoutObject", lambda p: p.IsA(UsdGeom.Gprim), None, "", create_primspec_bool(False))
self.add_custom_schema_attribute("primvars:invisibleToSecondaryRays", lambda p: p.IsA(UsdGeom.Gprim), None, "", create_primspec_bool(False))
self.add_custom_schema_attribute("primvars:isMatteObject", lambda p: p.IsA(UsdGeom.Gprim), None, "", create_primspec_bool(False))
self.add_custom_schema_attribute("primvars:isVolume", lambda p: p.IsA(UsdGeom.Gprim), None, "", create_primspec_bool(False))
self.add_custom_schema_attribute("primvars:multimatte_id", lambda p: p.IsA(UsdGeom.Gprim), None, "", create_primspec_int(-1))
self.add_custom_schema_attribute("primvars:disableRtSssTransmission", lambda p: p.IsA(UsdGeom.Gprim), None, "", create_primspec_bool(False))
self.add_custom_schema_attribute("primvars:numSplitsOverride", lambda p: p.IsA(UsdGeom.BasisCurves), None, "", create_primspec_bool(False))
self.add_custom_schema_attribute("primvars:numSplits", lambda p: p.IsA(UsdGeom.BasisCurves), None, "", create_primspec_int(2))
self.add_custom_schema_attribute("primvars:endcaps", lambda p: p.IsA(UsdGeom.BasisCurves), None, "", create_primspec_int(1))
self.add_custom_schema_attribute("refinementEnableOverride", self._is_prim_refinement_level_supported, None, "", create_primspec_bool(False))
self.add_custom_schema_attribute("refinementLevel", self._is_prim_refinement_level_supported, None, "", create_primspec_int(0))
def on_new_payload(self, payload):
"""
See PropertyWidget.on_new_payload
"""
self._add_curves = False
self._add_points = False
if not super().on_new_payload(payload):
return False
if not self._payload or len(self._payload) == 0:
return False
used = []
for prim_path in self._payload:
prim = self._get_prim(prim_path)
if not prim or not prim.IsA(self._schema):
return False
used += [attr for attr in prim.GetProperties() if attr.GetName() in self._schema_attr_names and not attr.IsHidden()]
if (prim.IsA(UsdGeom.BasisCurves)):
self._add_curves = True
if (prim.IsA(UsdGeom.Points)):
self._add_points = True
if self.is_custom_schema_attribute_used(prim):
used.append(None)
return used
def _is_prim_refinement_level_supported(self, prim):
return (
prim.IsA(UsdGeom.Mesh)
or prim.IsA(UsdGeom.Cylinder)
or prim.IsA(UsdGeom.Capsule)
or prim.IsA(UsdGeom.Cone)
or prim.IsA(UsdGeom.Sphere)
or prim.IsA(UsdGeom.Cube)
)
def _is_prim_single_sided_supported(self, prim):
return (
prim.IsA(UsdGeom.Mesh)
or prim.IsA(UsdGeom.Cylinder)
or prim.IsA(UsdGeom.Capsule)
or prim.IsA(UsdGeom.Cone)
or prim.IsA(UsdGeom.Sphere)
or prim.IsA(UsdGeom.Cube)
)
def _customize_props_layout(self, attrs):
self.add_custom_schema_attributes_to_props(attrs)
frame = CustomLayoutFrame(hide_extra=False)
with frame:
def update_bounds(stage, prim_paths):
timeline = omni.timeline.get_timeline_interface()
current_time = timeline.get_current_time()
current_time_code = Usd.TimeCode(
omni.usd.get_frame_time_code(current_time, stage.GetTimeCodesPerSecond())
)
for path in prim_paths:
prim = stage.GetPrimAtPath(path)
attr = prim.GetAttribute("extent") if prim else None
if prim and attr:
bounds = UsdGeom.Boundable.ComputeExtentFromPlugins(UsdGeom.Boundable(prim), current_time_code)
attr.Set(bounds)
def build_extent_func(
stage,
attr_name,
metadata,
property_type,
prim_paths: List[Sdf.Path],
additional_label_kwargs={},
additional_widget_kwargs={},
):
from omni.kit.window.property.templates import HORIZONTAL_SPACING
from omni.kit.property.usd.usd_attribute_model import UsdAttributeModel
from omni.kit.property.usd.usd_property_widget import UsdPropertiesWidgetBuilder
from omni.kit.property.usd.widgets import ICON_PATH
if not attr_name or not property_type:
return
def value_changed_func(model, widget):
val = model.get_value_as_string()
widget.set_tooltip(val)
with ui.HStack(spacing=HORIZONTAL_SPACING):
model = UsdAttributeModel(stage, [path.AppendProperty(attr_name) for path in prim_paths], False, metadata)
UsdPropertiesWidgetBuilder._create_label(attr_name, metadata, additional_label_kwargs)
kwargs = {
"name": "models_readonly",
"model": model,
"enabled": False,
"tooltip": model.get_value_as_string(),
}
if additional_widget_kwargs:
kwargs.update(additional_widget_kwargs)
with ui.ZStack():
value_widget = ui.StringField(**kwargs)
mixed_overlay = UsdPropertiesWidgetBuilder._create_mixed_text_overlay()
ui.Spacer(width=0)
with ui.VStack(width=8):
ui.Spacer()
ui.Image(
f"{ICON_PATH}/Default value.svg",
width=5.5,
height=5.5,
)
ui.Spacer()
model.add_value_changed_fn(lambda m, w=value_widget: value_changed_func(m,w))
return model
def build_size_func(
stage,
attr_name,
metadata,
property_type,
prim_paths: List[Sdf.Path],
additional_label_kwargs={},
additional_widget_kwargs={},
):
from omni.kit.window.property.templates import HORIZONTAL_SPACING
from omni.kit.property.usd.usd_attribute_model import UsdAttributeModel
from omni.kit.property.usd.usd_property_widget import UsdPropertiesWidgetBuilder
from omni.kit.property.usd.widgets import ICON_PATH
if not attr_name or not property_type:
return
with ui.HStack(spacing=HORIZONTAL_SPACING):
model_kwargs = UsdPropertiesWidgetBuilder._get_attr_value_range_kwargs(metadata)
model = UsdAttributeModel(
stage, [path.AppendProperty(attr_name) for path in prim_paths], False, metadata, **model_kwargs
)
UsdPropertiesWidgetBuilder._create_label(attr_name, metadata, additional_label_kwargs)
widget_kwargs = {"model": model}
widget_kwargs.update(UsdPropertiesWidgetBuilder._get_attr_value_soft_range_kwargs(metadata))
if additional_widget_kwargs:
widget_kwargs.update(additional_widget_kwargs)
with ui.ZStack():
value_widget = UsdPropertiesWidgetBuilder._create_drag_or_slider(ui.FloatDrag, ui.FloatSlider, **widget_kwargs)
mixed_overlay = UsdPropertiesWidgetBuilder._create_mixed_text_overlay()
UsdPropertiesWidgetBuilder._create_control_state(value_widget=value_widget, mixed_overlay=mixed_overlay, **widget_kwargs)
model.add_value_changed_fn(lambda m, s=stage, p=prim_paths: update_bounds(s, p))
return model
def build_axis_func(
stage,
attr_name,
metadata,
property_type,
prim_paths: List[Sdf.Path],
additional_label_kwargs={},
additional_widget_kwargs={},
):
from omni.kit.window.property.templates import HORIZONTAL_SPACING
from omni.kit.property.usd.usd_attribute_model import TfTokenAttributeModel
from omni.kit.property.usd.usd_property_widget import UsdPropertiesWidgetBuilder
from omni.kit.property.usd.widgets import ICON_PATH
if not attr_name or not property_type:
return
with ui.HStack(spacing=HORIZONTAL_SPACING):
model = None
UsdPropertiesWidgetBuilder._create_label(attr_name, metadata, additional_label_kwargs)
tokens = metadata.get("allowedTokens")
if tokens is not None and len(tokens) > 0:
model = TfTokenAttributeModel(
stage, [path.AppendProperty(attr_name) for path in prim_paths], False, metadata
)
widget_kwargs = {"name": "choices"}
if additional_widget_kwargs:
widget_kwargs.update(additional_widget_kwargs)
with ui.ZStack():
value_widget = ui.ComboBox(model, **widget_kwargs)
mixed_overlay = UsdPropertiesWidgetBuilder._create_mixed_text_overlay()
else:
model = UsdAttributeModel(
stage, [path.AppendProperty(attr_name) for path in prim_paths], False, metadata
)
widget_kwargs = {"name": "models"}
if additional_widget_kwargs:
widget_kwargs.update(additional_widget_kwargs)
with ui.ZStack():
value_widget = ui.StringField(model, **widget_kwargs)
mixed_overlay = UsdPropertiesWidgetBuilder._create_mixed_text_overlay()
UsdPropertiesWidgetBuilder._create_control_state(
model=model, value_widget=value_widget, mixed_overlay=mixed_overlay, **widget_kwargs
)
model.add_item_changed_fn(lambda m, i, s=stage, p=prim_paths: update_bounds(s, p))
return model
def build_endcaps_func(
stage,
attr_name,
metadata,
property_type,
prim_paths: List[Sdf.Path],
additional_label_kwargs={},
additional_widget_kwargs={},
):
from omni.kit.window.property.templates import HORIZONTAL_SPACING
from omni.kit.property.usd.usd_attribute_model import TfTokenAttributeModel
from omni.kit.property.usd.usd_property_widget import UsdPropertiesWidgetBuilder
from omni.kit.property.usd.widgets import ICON_PATH
if not attr_name or not property_type:
return
with ui.HStack(spacing=HORIZONTAL_SPACING):
model = None
UsdPropertiesWidgetBuilder._create_label(attr_name, metadata, additional_label_kwargs)
class MyTfTokenAttributeModel(TfTokenAttributeModel):
allowed_tokens = ["open", "flat", "round"]
def _get_allowed_tokens(self, attr):
return self.allowed_tokens
def _get_value_from_index(self, value):
return value
def _update_value(self, force=False):
was_updating_value = self._updating_value
self._updating_value = True
if super(TfTokenAttributeModel, self)._update_value(force):
# TODO don't have to do this every time. Just needed when "allowedTokens" actually changed
self._update_allowed_token()
index = self._value if self._value < len(self._allowed_tokens) else -1
if index != -1 and self._current_index.as_int != index:
self._current_index.set_value(index)
self._item_changed(None)
self._updating_value = was_updating_value
model = MyTfTokenAttributeModel(
stage, [path.AppendProperty(attr_name) for path in prim_paths], False, metadata
)
widget_kwargs = {"name": "choices"}
if additional_widget_kwargs:
widget_kwargs.update(additional_widget_kwargs)
with ui.ZStack():
value_widget = ui.ComboBox(model, **widget_kwargs)
mixed_overlay = UsdPropertiesWidgetBuilder._create_mixed_text_overlay()
UsdPropertiesWidgetBuilder._create_control_state(
model=model, value_widget=value_widget, mixed_overlay=mixed_overlay, **widget_kwargs
)
return model
if self._add_curves:
with CustomLayoutGroup("Curve"):
CustomLayoutProperty("curveVertexCounts", "Per curve points")
CustomLayoutProperty("points", "Points")
CustomLayoutProperty("normals", "Normals")
CustomLayoutProperty("widths", "Widths")
CustomLayoutProperty("type", "Type")
CustomLayoutProperty("basis", "Basis")
CustomLayoutProperty("wrap", "Wrap")
CustomLayoutProperty("primvars:numSplitsOverride", "Number of BVH splits Override")
CustomLayoutProperty("primvars:numSplits", "Number of BVH splits")
CustomLayoutProperty("primvars:endcaps", "Endcaps", build_fn=build_endcaps_func)
if self._add_points:
with CustomLayoutGroup("Points"):
CustomLayoutProperty("points", "Points")
CustomLayoutProperty("normals", "Normals")
CustomLayoutProperty("widths", "Widths")
commonSectionName = "Mesh"
if self._add_curves or self._add_points:
commonSectionName = "Common"
with CustomLayoutGroup(commonSectionName):
CustomLayoutProperty("normals", "Normals")
CustomLayoutProperty("orientation", "Orientation")
CustomLayoutProperty("points", "Points")
CustomLayoutProperty("velocities", "Velocities")
CustomLayoutProperty("accelerations", "Accelerations")
CustomLayoutProperty("extent", "Extent", build_fn=build_extent_func)
CustomLayoutProperty("size", "Size", build_fn=build_size_func)
CustomLayoutProperty("radius", "Radius", build_fn=build_size_func)
CustomLayoutProperty("axis", "Axis", build_fn=build_axis_func)
CustomLayoutProperty("height", "Height", build_fn=build_size_func)
CustomLayoutProperty("polymesh:parameterCheck", "Parameter Check")
CustomLayoutProperty("primvars:doNotCastShadows", "Cast Shadows", build_fn=self._inverse_bool_builder)
CustomLayoutProperty("primvars:enableShadowTerminatorFix", "Shadow Terminator Fix")
CustomLayoutProperty("primvars:enableFastRefractionShadow", "Fast Refraction Shadow")
CustomLayoutProperty("primvars:disableRtSssTransmission", "Enable Rt SSS Transmission", build_fn=self._inverse_bool_builder)
CustomLayoutProperty("primvars:holdoutObject", "Holdout Object")
CustomLayoutProperty("primvars:invisibleToSecondaryRays", "Invisible To Secondary Rays")
CustomLayoutProperty("primvars:isMatteObject", "Matte Object")
CustomLayoutProperty("primvars:isVolme", "Is Volume")
CustomLayoutProperty("primvars:multimatte_id", "Multimatte ID")
with CustomLayoutGroup("Face"):
CustomLayoutProperty("faceVertexIndices", "Indices")
CustomLayoutProperty("faceVertexCounts", "Counts")
CustomLayoutProperty("faceVaryingLinearInterpolation", "Linear Interpolation")
CustomLayoutProperty("holeIndices", "Hole Indices")
with CustomLayoutGroup("Refinement"):
CustomLayoutProperty("refinementEnableOverride", "Refinement Override")
CustomLayoutProperty("refinementLevel", "Refinement Level")
CustomLayoutProperty("interpolateBoundary", "Interpolate Boundary")
CustomLayoutProperty("subdivisionScheme", "Subdivision Scheme")
CustomLayoutProperty("triangleSubdivisionRule", "Triangle SubdivisionRule")
with CustomLayoutGroup("Corner"):
CustomLayoutProperty("cornerIndices", "Indices")
CustomLayoutProperty("cornerSharpnesses", "Sharpnesses")
with CustomLayoutGroup("Crease"):
CustomLayoutProperty("creaseIndices", "Indices")
CustomLayoutProperty("creaseLengths", "Lengths")
CustomLayoutProperty("creaseSharpnesses", "Sharpnesses")
return frame.apply(attrs)
def get_additional_kwargs(self, ui_prop: UsdPropertyUiEntry):
"""
Override this function if you want to supply additional arguments when building the label or ui widget.
"""
additional_widget_kwargs = None
if ui_prop.prop_name == "refinementLevel":
additional_widget_kwargs = {"min": 0, "max": 5}
return None, additional_widget_kwargs
def _inverse_bool_builder(self,
stage,
attr_name,
metadata,
property_type,
prim_paths: List[Sdf.Path],
additional_label_kwargs={},
additional_widget_kwargs={}
):
import carb.settings
from omni.kit.window.property.templates import HORIZONTAL_SPACING
from omni.kit.property.usd.usd_attribute_model import UsdAttributeInvertedModel
from omni.kit.property.usd.usd_property_widget import UsdPropertiesWidgetBuilder
if not attr_name or not property_type:
return
with ui.HStack(spacing=HORIZONTAL_SPACING):
model = UsdAttributeInvertedModel(stage, [path.AppendProperty(attr_name) for path in prim_paths], False, metadata)
settings = carb.settings.get_settings()
left_aligned = settings.get("ext/omni.kit.window.property/checkboxAlignment") == "left"
if not left_aligned:
if not additional_label_kwargs:
additional_label_kwargs = {}
additional_label_kwargs["width"] = 0
UsdPropertiesWidgetBuilder._create_label(attr_name, metadata, additional_label_kwargs)
if not left_aligned:
ui.Spacer(width=10)
ui.Line(style={"color": 0x338A8777}, width=ui.Fraction(1))
ui.Spacer(width=5)
with ui.VStack(width=10):
ui.Spacer()
widget_kwargs = {"width": 10, "height": 0, "name": "greenCheck", "model": model}
if additional_widget_kwargs:
widget_kwargs.update(additional_widget_kwargs)
with ui.ZStack():
with ui.Placer(offset_x=0, offset_y=-2):
value_widget = ui.CheckBox(**widget_kwargs)
with ui.Placer(offset_x=1, offset_y=-1):
mixed_overlay = ui.Rectangle(
height=8, width=8, name="mixed_overlay", alignment=ui.Alignment.CENTER, visible=False
)
ui.Spacer()
if left_aligned:
ui.Spacer(width=5)
ui.Line(style={"color": 0x338A8777}, width=ui.Fraction(1))
UsdPropertiesWidgetBuilder._create_control_state(value_widget=value_widget, mixed_overlay=mixed_overlay, **widget_kwargs)
return model
@dataclass(frozen=True)
class CustomAttributeInfo:
schema_name: str
display_name: str
type_name: str
default_value: Any
predicate: Callable[[Any], bool] = None
def is_supported(self, prim):
return self.predicate is None or self.predicate(prim)
def get_metadata(self):
return {Sdf.PrimSpec.TypeNameKey: self.type_name, "customData": {"default": self.default_value}}
class ImageableSchemaAttributesWidget(MultiSchemaPropertiesWidget):
def __init__(self, title: str, schema, schema_subclasses: list, include_list: list = [], exclude_list: list = []):
"""
Constructor.
Args:
title (str): Title of the widgets on the Collapsable Frame.
schema: The USD IsA schema or applied API schema to filter attributes.
schema_subclasses (list): list of subclasses
include_list (list): list of additional schema named to add
exclude_list (list): list of additional schema named to remove
"""
super().__init__(title, schema, schema_subclasses, include_list, exclude_list)
self._custom_attributes: OrderedDict[str, CustomAttributeInfo] = OrderedDict()
self._custom_placeholders: List[str] = []
# custom attributes
self.add_custom_schema_attribute("singleSided", self._is_prim_single_sided_supported, None, "", create_primspec_bool(False))
def on_new_payload(self, payload):
"""
See PropertyWidget.on_new_payload
"""
self._custom_placeholders.clear()
if not super().on_new_payload(payload):
return False
if not self._payload or len(self._payload) == 0:
return False
used = []
for prim_path in self._payload:
prim = self._get_prim(prim_path)
if not prim or not prim.IsA(self._schema):
return False
used += [attr for attr in prim.GetProperties() if attr.GetName() in self._schema_attr_names and not attr.IsHidden()]
for schema_name, attr_info in self._custom_attributes.items():
if attr_info.is_supported(prim) and not prim.GetAttribute(schema_name):
self._custom_placeholders.append(schema_name)
used.append(None)
if self.is_custom_schema_attribute_used(prim):
used.append(None)
return used
def add_custom_attribute(self,
attribute_name,
display_name,
type_name="bool",
default_value=False,
predicate: Callable[[Any], bool] = None):
"""
Add custom attribute with placeholder.
"""
self._schema_attr_base.add(attribute_name)
self._custom_attributes.update(
{attribute_name: CustomAttributeInfo(attribute_name, display_name, type_name, default_value, predicate)}
)
self.request_rebuild()
def remove_custom_attribute(self, attribute_name):
self._schema_attr_base.remove(attribute_name)
del self._custom_attributes[attribute_name]
self.request_rebuild()
def _is_prim_single_sided_supported(self, prim):
return (
prim.IsA(UsdGeom.Mesh)
or prim.IsA(UsdGeom.Cylinder)
or prim.IsA(UsdGeom.Capsule)
or prim.IsA(UsdGeom.Cone)
or prim.IsA(UsdGeom.Sphere)
or prim.IsA(UsdGeom.Cube)
)
def _customize_props_layout(self, attrs):
self.add_custom_schema_attributes_to_props(attrs)
for schema_name, attr_info in self._custom_attributes.items():
if schema_name in self._custom_placeholders:
attrs.append(
UsdPropertyUiEntry(
schema_name,
"",
attr_info.get_metadata(),
Usd.Attribute,
)
)
frame = CustomLayoutFrame(hide_extra=True)
with frame:
for schema_name, attr_info in self._custom_attributes.items():
CustomLayoutProperty(schema_name, attr_info.display_name)
# OMFP-1917: Most Visual settings under the Property tab don't work
# Hiding doubleSided, singleSided, primvars:displayColor, primvars:displayOpacity
CustomLayoutProperty("doubleSided", "Double Sided", hide_if_true=True)
CustomLayoutProperty("singleSided", "Single Sided", hide_if_true=True)
CustomLayoutProperty("purpose", "Purpose")
CustomLayoutProperty("visibility", "Visibility")
CustomLayoutProperty("primvars:displayColor", "Display Color")
CustomLayoutProperty("primvars:displayOpacity", "Display Opacity")
return frame.apply(attrs)
| 28,148 |
Python
| 49.627698 | 160 | 0.574996 |
omniverse-code/kit/exts/omni.kit.property.geometry/omni/kit/property/geometry/scripts/__init__.py
|
from .geometry_properties import *
from .geometry_commands import *
| 68 |
Python
| 21.999993 | 34 | 0.794118 |
omniverse-code/kit/exts/omni.kit.property.geometry/omni/kit/property/geometry/scripts/geometry_commands.py
|
import carb
import omni.kit.commands
from typing import List, Optional, Any
from pxr import Usd, Sdf, UsdGeom
class PrimVarCommand(omni.kit.commands.Command):
"""
Set primvar undoable **Command**.
Args:
prim_path (list): List of paths of prims.
prim_name (str): Primvar name.
prim_type (): Primvar variable type (EG. Sdf.ValueTypeNames.Bool)
value (any): New primvar value. If primvar doesn't exist, it will be created
"""
def __init__(
self,
prim_path: List[str],
prim_name: str,
prim_type: str,
value: Any,
usd_context_name: Optional[str] = "",
):
self._prim_path = prim_path
self._prim_name = prim_name
self._prim_type = prim_type
self._value = value
self._usd_context = omni.usd.get_context(usd_context_name)
self._undo_values = {}
def do(self):
stage = self._usd_context.get_stage()
for path in self._prim_path:
if path:
primvars_api = UsdGeom.PrimvarsAPI(stage.GetPrimAtPath(path))
value = primvars_api.GetPrimvar(self._prim_name)
if value:
if value.GetTypeName() != self._prim_type:
carb.log_error(f"PrimVarCommand: cannot set value as {path}.{self._prim_name} is type {value.GetTypeName()} and expected type is {self._prim_type}")
else:
self._undo_values[str(path)] = value.Get()
value.Set(self._value)
else:
self._undo_values[str(path)] = None
primvars_api.CreatePrimvar(self._prim_name, self._prim_type).Set(self._value)
def undo(self):
stage = self._usd_context.get_stage()
for path in self._undo_values.keys():
primvars_api = UsdGeom.PrimvarsAPI(stage.GetPrimAtPath(path))
value = primvars_api.GetPrimvar(self._prim_name)
orig_value = self._undo_values[path]
if orig_value:
value.Set(orig_value)
else:
primvars_api.RemovePrimvar(self._prim_name)
self._undo_values = {}
class TogglePrimVarCommand(omni.kit.commands.Command):
"""
Toggle primvar undoable **Command**.
Args:
prim_path (list): List of paths of prims.
prim_name (str): Primvar name.
"""
def __init__(
self,
prim_path: List[str],
prim_name: str,
usd_context_name: Optional[str] = "",
):
self._prim_path = prim_path
self._prim_name = prim_name
self._usd_context = omni.usd.get_context(usd_context_name)
self._undo_values = {}
def do(self):
stage = self._usd_context.get_stage()
for path in self._prim_path:
if path:
primvars_api = UsdGeom.PrimvarsAPI(stage.GetPrimAtPath(path))
value = primvars_api.GetPrimvar(self._prim_name)
if value:
if value.GetTypeName() != Sdf.ValueTypeNames.Bool:
carb.log_error(f"TogglePrimVarCommand: cannot set value as {value.GetTypeName()} isn't a {self._prim_type}")
else:
self._undo_values[str(path)] = value.Get()
value.Set(not value.Get())
else:
self._undo_values[path] = None
primvars_api.CreatePrimvar(self._prim_name, Sdf.ValueTypeNames.Bool).Set(True)
def undo(self):
stage = self._usd_context.get_stage()
for path in self._undo_values.keys():
primvars_api = UsdGeom.PrimvarsAPI(stage.GetPrimAtPath(path))
value = primvars_api.GetPrimvar(self._prim_name)
orig_value = self._undo_values[path]
if orig_value:
value.Set(orig_value)
else:
primvars_api.RemovePrimvar(self._prim_name)
self._undo_values = {}
class ToggleInstanceableCommand(omni.kit.commands.Command):
"""
Toggle instanceable undoable **Command**.
Args:
prim_path (list): List of paths of prims.
"""
def __init__(
self,
prim_path: List[str],
usd_context_name: Optional[str] = "",
):
self._prim_path = prim_path
self._usd_context = omni.usd.get_context(usd_context_name)
self._undo_values = {}
def do(self):
stage = self._usd_context.get_stage()
for path in self._prim_path:
if path:
prim = stage.GetPrimAtPath(path)
value = prim.IsInstanceable()
self._undo_values[str(path)] = value
prim.SetInstanceable(not value)
def undo(self):
stage = self._usd_context.get_stage()
for path in self._undo_values.keys():
prim = stage.GetPrimAtPath(path)
value = self._undo_values[path]
prim.SetInstanceable(value)
self._undo_values = {}
| 5,046 |
Python
| 33.101351 | 173 | 0.550139 |
omniverse-code/kit/exts/omni.kit.property.geometry/omni/kit/property/geometry/scripts/prim_kind_widget.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import carb
import omni.ui as ui
import omni.usd
import carb
from omni.kit.window.property.templates import SimplePropertyWidget, LABEL_WIDTH, LABEL_HEIGHT, HORIZONTAL_SPACING
from omni.kit.property.usd.usd_property_widget import UsdPropertiesWidgetBuilder, UsdPropertiesWidget
from omni.kit.property.usd.usd_object_model import MetadataObjectModel
from pxr import Kind, Usd, UsdGeom
class Constant:
def __setattr__(self, name, value):
raise Exception(f"Can't change Constant.{name}") # pragma: no cover
FONT_SIZE = 14.0
MIXED = "Mixed"
MIXED_COLOR = 0xFFCC9E61
class PrimKindWidget(UsdPropertiesWidget):
def __init__(self):
super().__init__(title="Kind", collapsed=False)
self._metadata_model = None
def on_new_payload(self, payload):
"""
See PropertyWidget.on_new_payload
"""
if not super().on_new_payload(payload): # pragma: no cover
return False # pragma: no cover
if len(self._payload) == 0:
return False
for prim_path in self._payload: # pragma: no cover
prim = self._get_prim(prim_path) # pragma: no cover
if not prim or not prim.IsA(UsdGeom.Imageable): # pragma: no cover
return False
return True
def reset(self):
super().reset()
if self._metadata_model:
self._metadata_model.clean()
self._metadata_model = None
def build_items(self):
super().build_items()
# get Kinds
all_kinds = Kind.Registry.GetAllKinds()
all_kinds.insert(0, "")
# http://graphics.pixar.com/usd/docs/USD-Glossary.html#USDGlossary-Kind
# "model" is considered an abstract type and should not be assigned as any prim's kind.
all_kinds.remove(Kind.Tokens.model)
kind = None
ambiguous = False
stage = self._payload.get_stage()
for path in self._payload:
prim = stage.GetPrimAtPath(path)
if prim:
prim_kind = Usd.ModelAPI(prim).GetKind()
if kind == None:
kind = prim_kind
elif kind != prim_kind:
kind = "mixed"
if prim_kind not in all_kinds: # pragma: no cover
all_kinds.append(prim_kind) # pragma: no cover
carb.log_verbose(f"{path} has invalid Kind:{prim_kind}") # pragma: no cover
if kind == None: # pragma: no cover
return # pragma: no cover
if self._filter.matches("Kind"):
self._any_item_visible = True
highlight = self._filter.name
with ui.HStack(spacing=HORIZONTAL_SPACING):
UsdPropertiesWidgetBuilder._create_label("Kind", {}, {"highlight": highlight})
with ui.ZStack():
self._metadata_model = MetadataObjectModel(
stage, [path for path in self._payload], False, {}, key="kind", default="", options=all_kinds
)
value_widget = ui.ComboBox(self._metadata_model, name="choices")
mixed_overlay = UsdPropertiesWidgetBuilder._create_mixed_text_overlay()
UsdPropertiesWidgetBuilder._create_control_state(self._metadata_model, value_widget, mixed_overlay)
def _get_shared_properties_from_selected_prims(self, anchor_prim):
return None
def _get_prim(self, prim_path):
if prim_path:
stage = self._payload.get_stage()
if stage:
return stage.GetPrimAtPath(prim_path)
return None # pragma: no cover
| 4,198 |
Python
| 37.172727 | 117 | 0.596951 |
omniverse-code/kit/exts/omni.kit.property.geometry/omni/kit/property/geometry/tests/test_path_toggle.py
|
## Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
##
## NVIDIA CORPORATION and its licensors retain all intellectual property
## and proprietary rights in and to this software, related documentation
## and any modifications thereto. Any use, reproduction, disclosure or
## distribution of this software and related documentation without an express
## license agreement from NVIDIA CORPORATION is strictly prohibited.
##
import omni.kit.test
import os
import omni.kit.app
import omni.usd
from omni.kit.test.async_unittest import AsyncTestCase
from omni.kit import ui_test
from pxr import Gf
from omni.kit.test_suite.helpers import open_stage, get_test_data_path, select_prims, wait_stage_loading, arrange_windows
class PropertyPathAddMenu(AsyncTestCase):
# Before running each test
async def setUp(self):
await arrange_windows("Stage", 64)
await open_stage(get_test_data_path(__name__, "geometry_test.usda"))
# After running each test
async def tearDown(self):
await wait_stage_loading()
async def test_property_path_rendering(self):
await ui_test.find("Property").focus()
usd_context = omni.usd.get_context()
stage = usd_context.get_stage()
# select cube
await select_prims(["/World/Cube"])
await ui_test.human_delay()
# verify not set
prim = stage.GetPrimAtPath("/World/Cube")
attr = prim.GetAttribute("primvars:wireframe")
self.assertFalse(attr.IsValid())
# click "Add"
add_widget = [w for w in ui_test.find_all("Property//Frame/**/Button[*].identifier==''") if w.widget.text.endswith("Add")][0]
await add_widget.click()
# select wireframe
await ui_test.select_context_menu("Rendering/Set Wireframe Mode")
# verify set
self.assertTrue(attr.IsValid())
self.assertTrue(attr.Get())
# undo
omni.kit.undo.undo()
# verify not set
self.assertFalse(attr.IsValid())
| 2,006 |
Python
| 33.016949 | 133 | 0.678465 |
omniverse-code/kit/exts/omni.kit.property.geometry/omni/kit/property/geometry/tests/__init__.py
|
from .test_geometry import *
from .test_commands import *
from .test_path_toggle import *
| 90 |
Python
| 21.749995 | 31 | 0.755556 |
omniverse-code/kit/exts/omni.kit.property.geometry/omni/kit/property/geometry/tests/test_commands.py
|
## Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
##
## NVIDIA CORPORATION and its licensors retain all intellectual property
## and proprietary rights in and to this software, related documentation
## and any modifications thereto. Any use, reproduction, disclosure or
## distribution of this software and related documentation without an express
## license agreement from NVIDIA CORPORATION is strictly prohibited.
##
import pathlib
import omni.kit.app
import omni.kit.commands
import omni.kit.test
import omni.ui as ui
from omni.ui.tests.test_base import OmniUiTest
from omni.kit.test_suite.helpers import open_stage, get_test_data_path
from omni.kit import ui_test
from pxr import Sdf
class TestCommandWidget(OmniUiTest):
# Before running each test
async def setUp(self):
await super().setUp()
await open_stage(get_test_data_path(__name__, "geometry_test.usda"))
# After running each test
async def tearDown(self):
await super().tearDown()
async def test_command_prim_var(self):
stage = omni.usd.get_context().get_stage()
prim = stage.GetPrimAtPath("/World/Cube")
attr = prim.GetAttribute('primvars:test_int')
self.assertFalse(attr.IsValid())
# create primvar as int
omni.kit.commands.execute("PrimVarCommand", prim_path=["/World/Cube"], prim_name="test_int", prim_type=Sdf.ValueTypeNames.Int, value=123456)
attr = prim.GetAttribute('primvars:test_int')
self.assertTrue(attr.IsValid())
self.assertEqual(attr.Get(), 123456)
# try and change using bool
omni.kit.commands.execute("PrimVarCommand", prim_path=["/World/Cube"], prim_name="test_int", prim_type=Sdf.ValueTypeNames.Bool, value=True)
attr = prim.GetAttribute('primvars:test_int')
self.assertTrue(attr.IsValid())
self.assertEqual(attr.Get(), 123456)
# change primvar
omni.kit.commands.execute("PrimVarCommand", prim_path=["/World/Cube"], prim_name="test_int", prim_type=Sdf.ValueTypeNames.Int, value=654321)
attr = prim.GetAttribute('primvars:test_int')
self.assertTrue(attr.IsValid())
self.assertEqual(attr.Get(), 654321)
# undo
omni.kit.undo.undo()
omni.kit.undo.undo()
omni.kit.undo.undo()
# verify undo removed primvar
attr = prim.GetAttribute('primvars:test_int')
self.assertFalse(attr.IsValid())
# create primvar as bool
omni.kit.commands.execute("PrimVarCommand", prim_path=["/World/Cube"], prim_name="test_bool", prim_type=Sdf.ValueTypeNames.Bool, value=True)
attr = prim.GetAttribute('primvars:test_bool')
self.assertTrue(attr.IsValid())
self.assertEqual(attr.Get(), True)
# try and change using int
omni.kit.commands.execute("PrimVarCommand", prim_path=["/World/Cube"], prim_name="test_bool", prim_type=Sdf.ValueTypeNames.Int, value=123456)
attr = prim.GetAttribute('primvars:test_bool')
self.assertTrue(attr.IsValid())
self.assertEqual(attr.Get(), True)
# change primvar
omni.kit.commands.execute("PrimVarCommand", prim_path=["/World/Cube"], prim_name="test_bool", prim_type=Sdf.ValueTypeNames.Bool, value=False)
attr = prim.GetAttribute('primvars:test_bool')
self.assertTrue(attr.IsValid())
self.assertEqual(attr.Get(), False)
# undo
omni.kit.undo.undo()
omni.kit.undo.undo()
omni.kit.undo.undo()
# verify undo removed primvar
attr = prim.GetAttribute('primvars:test_bool')
self.assertFalse(attr.IsValid())
async def test_command_toggle_prim_var(self):
stage = omni.usd.get_context().get_stage()
prim = stage.GetPrimAtPath("/World/Cube")
attr = prim.GetAttribute('primvars:test_bool')
self.assertFalse(attr.IsValid())
# create primvar as bool
omni.kit.commands.execute("TogglePrimVarCommand", prim_path=["/World/Cube"], prim_name="test_bool")
attr = prim.GetAttribute('primvars:test_bool')
self.assertTrue(attr.IsValid())
self.assertEqual(attr.Get(), True)
# try and change using int
omni.kit.commands.execute("PrimVarCommand", prim_path=["/World/Cube"], prim_name="test_bool", prim_type=Sdf.ValueTypeNames.Int, value=123456)
attr = prim.GetAttribute('primvars:test_bool')
self.assertTrue(attr.IsValid())
self.assertEqual(attr.Get(), True)
# change primvar
omni.kit.commands.execute("TogglePrimVarCommand", prim_path=["/World/Cube"], prim_name="test_bool")
attr = prim.GetAttribute('primvars:test_bool')
self.assertTrue(attr.IsValid())
self.assertEqual(attr.Get(), False)
# undo
omni.kit.undo.undo()
omni.kit.undo.undo()
omni.kit.undo.undo()
# verify undo removed primvar
attr = prim.GetAttribute('primvars:test_bool')
self.assertFalse(attr.IsValid())
async def test_command_toggle_instanceable(self):
stage = omni.usd.get_context().get_stage()
prim = stage.GetPrimAtPath("/World/Cube")
self.assertFalse(prim.IsInstanceable())
# toggle instanceable
omni.kit.commands.execute("ToggleInstanceableCommand", prim_path=["/World/Cube"])
self.assertTrue(prim.IsInstanceable())
# toggle instanceable
omni.kit.commands.execute("ToggleInstanceableCommand", prim_path=["/World/Cube"])
self.assertFalse(prim.IsInstanceable())
# undo
omni.kit.undo.undo()
omni.kit.undo.undo()
omni.kit.undo.undo()
# verify undo
self.assertFalse(prim.IsInstanceable())
| 5,718 |
Python
| 38.171233 | 149 | 0.659671 |
omniverse-code/kit/exts/omni.kit.property.geometry/omni/kit/property/geometry/tests/test_geometry.py
|
## Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
##
## NVIDIA CORPORATION and its licensors retain all intellectual property
## and proprietary rights in and to this software, related documentation
## and any modifications thereto. Any use, reproduction, disclosure or
## distribution of this software and related documentation without an express
## license agreement from NVIDIA CORPORATION is strictly prohibited.
##
import omni.kit.app
import omni.kit.commands
import omni.kit.test
import omni.ui as ui
from omni.ui.tests.test_base import OmniUiTest
from omni.kit import ui_test
from pxr import Kind, Sdf, Gf, UsdGeom
from omni.kit.property.geometry import geometry_properties
import pathlib
class TestGeometryWidget(OmniUiTest):
# Before running each test
async def setUp(self):
await super().setUp()
from omni.kit.property.geometry.scripts.geometry_properties import TEST_DATA_PATH
self._golden_img_dir = TEST_DATA_PATH.absolute().joinpath("golden_img").absolute()
self._usd_path = TEST_DATA_PATH.absolute()
from omni.kit.property.usd.usd_attribute_widget import UsdPropertiesWidget
import omni.kit.window.property as p
self._w = p.get_window()
# After running each test
async def tearDown(self):
await super().tearDown()
async def test_geometry_ui(self):
usd_context = omni.usd.get_context()
await self.docked_test_window(
window=self._w._window,
width=450,
height=700,
restore_window = ui.Workspace.get_window("Layer") or ui.Workspace.get_window("Stage"),
restore_position = ui.DockPosition.BOTTOM)
test_file_path = self._usd_path.joinpath("geometry_test.usda").absolute()
await usd_context.open_stage_async(str(test_file_path))
await omni.kit.app.get_app().next_update_async()
# Select the prim.
usd_context.get_selection().set_selected_prim_paths(["/World/Cube"], True)
# Need to wait for an additional frames for omni.ui rebuild to take effect
await ui_test.human_delay(10)
await self.finalize_test(golden_img_dir=self._golden_img_dir, golden_img_name="test_geometry_ui.png")
async def test_geometry_mixed_ui(self):
usd_context = omni.usd.get_context()
await self.docked_test_window(
window=self._w._window,
width=450,
height=700,
restore_window = ui.Workspace.get_window("Layer") or ui.Workspace.get_window("Stage"),
restore_position = ui.DockPosition.BOTTOM)
test_file_path = self._usd_path.joinpath("geometry_test.usda").absolute()
await usd_context.open_stage_async(str(test_file_path))
await omni.kit.app.get_app().next_update_async()
# Select the prim.
usd_context.get_selection().set_selected_prim_paths(["/World/Cube", "/World/Looks"], True)
# Need to wait for an additional frames for omni.ui rebuild to take effect
await ui_test.human_delay(10)
await self.finalize_test(golden_img_dir=self._golden_img_dir, golden_img_name="test_geometry_mixed_ui.png")
async def test_custom_visual_attribute_ui(self):
usd_context = omni.usd.get_context()
await self.docked_test_window(
window=self._w._window,
width=450,
height=800,
restore_window = ui.Workspace.get_window("Layer") or ui.Workspace.get_window("Stage"),
restore_position = ui.DockPosition.BOTTOM)
test_file_path = self._usd_path.joinpath("geometry_test.usda").absolute()
await usd_context.open_stage_async(str(test_file_path))
await omni.kit.app.get_app().next_update_async()
inst = geometry_properties.get_instance()
self.assertIsNotNone(inst)
def is_cube(prim):
return prim.IsA(UsdGeom.Cube)
inst.register_custom_visual_attribute("foo", "Foo", "bool", False, is_cube)
inst.register_custom_visual_attribute("bar", "Bar", "int", 1234, is_cube)
# Select the prim.
usd_context.get_selection().set_selected_prim_paths(["/World/Cube"], True)
# Need to wait for an additional frames for omni.ui rebuild to take effect
await ui_test.human_delay(10)
await omni.kit.app.get_app().next_update_async()
await self.finalize_test(golden_img_dir=self._golden_img_dir, golden_img_name="test_geometry_ui_custom.png")
# Clean up
inst.deregister_custom_visual_attribute("foo")
inst.deregister_custom_visual_attribute("bar")
await ui_test.human_delay(10)
await omni.kit.app.get_app().next_update_async()
| 4,713 |
Python
| 38.283333 | 116 | 0.664333 |
omniverse-code/kit/exts/omni.kit.property.geometry/docs/CHANGELOG.md
|
# Changelog
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
## [1.2.2] - 2022-09-27
### Changes
- Changed primvars:numSplits* text
## [1.2.1] - 2022-05-13
### Changes
- Cleaned up ImageWithProvider vs Image usage
## [1.2.0] - 2021-05-31
### Added
- Added extent regeneration on size/radius/axis changes
## [1.1.0] - 2021-03-19
### Added
- Added soft range [0, 5] for refinementLevel.
## [1.0.7] - 2021-02-19
### Changes
- Added UI test
## [1.0.6] - 2020-12-09
### Changes
- Added extension icon
- Added readme
- Updated preview image
## [1.0.5] - 2020-11-20
### Changes
- Silenced unknown kind warning
## [1.0.4] - 2020-11-06
### Changes
- Update Kind to use metadata model
## [1.0.3] - 2020-10-27
### Changes
- Fixed spacing on kind widget
## [1.0.2] - 2020-10-22
### Changes
- Improved layout
## [1.0.1] - 2020-10-22
### Changes
- Moved schema into bundle
## [1.0.0] - 2020-10-05
### Changes
- Сreated
| 950 |
Markdown
| 16.611111 | 80 | 0.636842 |
omniverse-code/kit/exts/omni.kit.property.geometry/docs/README.md
|
# omni.kit.property.geometry
## Introduction
Property window extensions are for viewing and editing Usd Prim Attributes
## This extension supports editing of these Usd Types;
- UsdGeom.BasisCurves
- UsdGeom.Capsule
- UsdGeom.Cone
- UsdGeom.Cube
- UsdGeom.Cylinder
- UsdGeom.HermiteCurves
- UsdGeom.Mesh
- UsdGeom.NurbsCurves
- UsdGeom.NurbsPatch
- UsdGeom.PointInstancer
- UsdGeom.Points
- UsdGeom.Subset
- UsdGeom.Sphere
- UsdGeom.Xform
- UsdGeom.Gprim
- UsdGeom.PointBased
- UsdGeom.Boundable
- UsdGeom.Curves
- UsdGeom.Imageable
- UsdGeom.PointBased
- UsdUI.Backdrop
### and supports editing of these Usd APIs;
- UsdGeom.ModelAPI
- UsdGeom.MotionAPI
- UsdGeom.PrimvarsAPI
- UsdGeom.XformCommonAPI
- UsdGeom.ModelAPI
- UsdUI.NodeGraphNodeAPI
- UsdUI.SceneGraphPrimAPI
| 777 |
Markdown
| 17.975609 | 74 | 0.788932 |
omniverse-code/kit/exts/omni.kit.property.geometry/docs/index.rst
|
omni.kit.property.geometry
###########################
Property Geometry Values
.. toctree::
:maxdepth: 1
CHANGELOG
| 127 |
reStructuredText
| 9.666666 | 27 | 0.551181 |
omniverse-code/kit/exts/omni.kit.viewport.menubar.display/omni/kit/viewport/menubar/display/style.py
|
from pathlib import Path
CURRENT_PATH = Path(__file__).parent
ICON_PATH = CURRENT_PATH.parent.parent.parent.parent.parent.joinpath("data").joinpath("icons")
UI_STYLE = {"Menu.Item.Icon::Display": {"image_url": f"{ICON_PATH}/viewport_visibility.svg"}}
| 253 |
Python
| 35.285709 | 94 | 0.727273 |
omniverse-code/kit/exts/omni.kit.viewport.menubar.display/omni/kit/viewport/menubar/display/extension.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ["ViewportDisplayMenuBarExtension", "get_instance"]
from typing import Union
from omni.kit.viewport.menubar.core import BaseCategoryItem
from .display_menu_container import DEFAULT_SECTION, DisplayMenuContainer
import omni.ext
_extension_instance = None
def get_instance():
global _extension_instance
return _extension_instance
class ViewportDisplayMenuBarExtension(omni.ext.IExt):
"""The Entry Point for the Display Settings in Viewport Menu Bar"""
def on_startup(self, ext_id):
self._display_menu = DisplayMenuContainer()
global _extension_instance
_extension_instance = self
def on_shutdown(self):
self._display_menu.destroy()
self._display_menu = None
global _extension_instance
_extension_instance = None
def register_custom_setting(self, text: str, setting_path: str):
"""
Register custom display setting.
Args:
text (str): Text shown in menu item.
setting_path (str): Setting path for custom display setting (bool value).
"""
if self._display_menu:
self._display_menu.register_custom_setting(text, setting_path)
def deregister_custom_setting(self, text: str):
"""
Deregister custom display setting.
Args:
text (str): Text shown in menu item.
"""
if self._display_menu:
self._display_menu.deregister_custom_setting(text)
def register_custom_category_item(self, category: str, item: BaseCategoryItem, section: str = DEFAULT_SECTION):
"""
Register custom display setting in category.
Args:
category (str): Category to add menu item. Can be an existing category e.g. "Heads Up Display" or a new one.
item (item: BaseCategoryItem): Item to append.
section (str): Optional section to organise category, default no section.
"""
if self._display_menu:
self._display_menu.register_custom_category_item(category, item, section)
def deregister_custom_category_item(self, category: str, item: BaseCategoryItem):
"""
Deregister custom display setting in category.
Args:
category (str): Category to remove menu item. Can be an existing category e.g. "Heads Up Display" or a new one.
item (item: BaseCategoryItem): Item to remove.
"""
if self._display_menu:
self._display_menu.deregister_custom_category_item(category, item)
| 2,971 |
Python
| 36.15 | 123 | 0.672164 |
omniverse-code/kit/exts/omni.kit.viewport.menubar.display/omni/kit/viewport/menubar/display/__init__.py
|
from .extension import *
| 25 |
Python
| 11.999994 | 24 | 0.76 |
omniverse-code/kit/exts/omni.kit.viewport.menubar.display/omni/kit/viewport/menubar/display/model.py
|
import omni.ui as ui
class DisplayLayerModel(ui.SimpleBoolModel):
def __init__(self, layer) -> None:
self._layer = layer
super().__init__()
def get_value_as_bool(self) -> bool:
return self._layer.visible
def set_value(self, visible: bool):
if visible != self._layer.visible:
self._layer.visible = visible
self._value_changed()
def begin_edit(self) -> None:
pass
def end_edit(self) -> None:
pass
| 493 |
Python
| 21.454544 | 44 | 0.56998 |
omniverse-code/kit/exts/omni.kit.viewport.menubar.display/omni/kit/viewport/menubar/display/display_menu_container.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ["DisplayMenuContainer"]
from omni.kit.viewport.menubar.core import (
IconMenuDelegate,
SettingModel,
ViewportMenuContainer,
CategoryMenuContainer,
SelectableMenuItem,
SimpleCategoryModel,
CategoryStateItem,
BaseCategoryItem,
CategoryCustomItem,
CategoryCollectionItem
)
from .style import UI_STYLE
import carb
import carb.settings
import omni.ui as ui
import omni.kit.app
import omni.usd
from functools import partial
from typing import Dict, List, Optional
SHOW_BY_TYPE_EXCLUDE_LIST = "/exts/omni.kit.viewport.menubar.display/showByType/exclude_list"
HEADS_UP_CATEGORY_NAME = "Heads Up Display"
SHOW_BY_TYPE_CATEGORY_NAME = "Show By Type"
SHOW_BY_PURPOSE_CATEGORY_NAME = "Show By Purpose"
DEFAULT_CATEGORIES = [HEADS_UP_CATEGORY_NAME, SHOW_BY_TYPE_CATEGORY_NAME, SHOW_BY_PURPOSE_CATEGORY_NAME]
DEFAULT_SECTION = "default"
def _make_viewport_setting(viewport_api_id: str, setting: str):
return f"/persistent/app/viewport/{viewport_api_id}/{setting}/visible"
class DisplayMenuContainer(ViewportMenuContainer):
"""The menu with the visibility settings"""
def __init__(self):
super().__init__(
name="Display",
delegate=IconMenuDelegate("Display"),
visible_setting_path="/exts/omni.kit.viewport.menubar.display/visible",
order_setting_path="/exts/omni.kit.viewport.menubar.display/order",
style=UI_STYLE
)
self._root_menu: Optional[ui.Menu] = None
self._category_models: Dict[str, SimpleCategoryModel] = {}
self._custom_settings: List[List[str, str]] = []
self._custom_category_items: Dict[str, List[BaseCategoryItem]] = {}
self._section_categories: Dict[str, List[str]] = {}
self._section_categories[DEFAULT_SECTION] = DEFAULT_CATEGORIES[:] # Copy the default categories list
def destroy(self):
super().destroy()
def register_custom_setting(self, text: str, setting_path: str):
self._custom_settings.append((text, setting_path))
if self._root_menu:
self._root_menu.invalidate()
def deregister_custom_setting(self, text: str):
found = [item for item in self._custom_settings if item[0] == text]
if found:
for item in found:
self._custom_settings.remove(item)
if self._root_menu:
self._root_menu.invalidate()
def register_custom_category_item(self, category: str, item: BaseCategoryItem, section: str):
is_top_category = False
if category not in DEFAULT_CATEGORIES and category not in self._category_models:
if item.text == category and isinstance(item, CategoryCollectionItem):
self._category_models[category] = SimpleCategoryModel(category, root=item)
is_top_category = True
else:
self._category_models[category] = SimpleCategoryModel(category)
if category not in self._custom_category_items:
self._custom_category_items[category] = []
if section not in self._section_categories:
self._section_categories[section] = []
if not is_top_category:
self._custom_category_items[category].append(item)
if category not in self._section_categories[section]:
self._section_categories[section].append(category)
if self._root_menu:
self._root_menu.invalidate()
def deregister_custom_category_item(self, category: str, item: BaseCategoryItem):
if category in self._custom_category_items:
if item in self._custom_category_items[category]:
self._custom_category_items[category].remove(item)
if category not in DEFAULT_CATEGORIES:
if (item.text == category and isinstance(item, CategoryCollectionItem)) or len(self._custom_category_items[category]) == 0:
del self._category_models[category]
# Now clean up section
sections = list(self._section_categories.keys())
for section in sections:
if category in self._section_categories[section]:
self._section_categories[section].remove(category)
if len(self._section_categories[section]) == 0:
del self._section_categories[section]
if self._root_menu:
self._root_menu.invalidate()
def build_fn(self, viewport_context: dict):
self._root_menu = ui.Menu(self.name, delegate=self._delegate,
on_build_fn=partial(self._build_menu_items, viewport_context),
style=self._style)
def _build_menu_items(self, viewport_context: dict, *args, **kwargs):
viewport = viewport_context.get("viewport_api")
viewport_api_id: str = str(viewport.id)
settings = carb.settings.get_settings()
show_by_type_items: list[BaseCategoryItem] = [
CategoryStateItem("Cameras", setting_path=_make_viewport_setting(viewport_api_id, "scene/cameras")),
CategoryStateItem("Lights", setting_path=_make_viewport_setting(viewport_api_id, "scene/lights")),
CategoryStateItem("Skeletons", setting_path=_make_viewport_setting(viewport_api_id, "scene/skeletons")),
CategoryStateItem("Audio", setting_path=_make_viewport_setting(viewport_api_id, "scene/audio")),
]
if (exclude_list := settings.get(SHOW_BY_TYPE_EXCLUDE_LIST)):
show_by_type_items = [item for item in show_by_type_items if item.text not in exclude_list]
# 105.1: Support alternate label of memory (i.e. "Host Memory", "Process Memory", "Memory")
# Defaults to pre 105.1 label (Host Memory) when not specified
mem_label = settings.get("/exts/omni.kit.viewport.window/hud/hostMemory/label")
if mem_label is None:
mem_label = "Host"
default_category_models = {
HEADS_UP_CATEGORY_NAME: SimpleCategoryModel(
HEADS_UP_CATEGORY_NAME,
[
CategoryStateItem("FPS", setting_path=_make_viewport_setting(viewport_api_id, "hud/renderFPS")),
CategoryStateItem("Device Memory", setting_path=_make_viewport_setting(viewport_api_id, "hud/deviceMemory")),
CategoryStateItem(f"{mem_label} Memory", setting_path=_make_viewport_setting(viewport_api_id, "hud/hostMemory")),
CategoryStateItem("Resolution", setting_path=_make_viewport_setting(viewport_api_id, "hud/renderResolution")),
CategoryStateItem("Progress", setting_path=_make_viewport_setting(viewport_api_id, "hud/renderProgress")),
]
),
SHOW_BY_TYPE_CATEGORY_NAME: SimpleCategoryModel(
SHOW_BY_TYPE_CATEGORY_NAME,
show_by_type_items
),
SHOW_BY_PURPOSE_CATEGORY_NAME: SimpleCategoryModel(
SHOW_BY_PURPOSE_CATEGORY_NAME,
[
CategoryStateItem("Guide", setting_path="/persistent/app/hydra/displayPurpose/guide"),
CategoryStateItem("Proxy", setting_path="/persistent/app/hydra/displayPurpose/proxy"),
CategoryStateItem("Render", setting_path="/persistent/app/hydra/displayPurpose/render"),
]
)
}
self._category_models.update(default_category_models)
# XXX: These add_item calls currently must occur to add the separator!
self._category_models[SHOW_BY_TYPE_CATEGORY_NAME].add_item(CategoryCustomItem(
"Meshes",
lambda: SelectableMenuItem("Meshes",
SettingModel(setting_path=_make_viewport_setting(viewport_api_id, "scene/meshes")))
))
self._category_models[HEADS_UP_CATEGORY_NAME].add_item(CategoryCustomItem(
"Camera Speed",
lambda: SelectableMenuItem("Camera Speed",
SettingModel(_make_viewport_setting(viewport_api_id, "hud/cameraSpeed")))
))
identifier = "omni.kit.viewport.menubar.display"
# Create default section categories first
for name in self._section_categories[DEFAULT_SECTION]:
model = self._category_models[name]
if name in self._custom_category_items:
for item in self._custom_category_items[name]:
model.add_item(item)
# XXX: Workaround nested creation of these items not being able to trigger an action!
trigger_fns = None
if name == SHOW_BY_TYPE_CATEGORY_NAME:
icon_click_id = f"{identifier}.{name}.{name}" # Left-most check/mixed icon was toggled
trigger_fns = {
"Cameras": partial(self.__trigger_action, "toggle_camera_visibility", viewport_api=viewport),
"Lights": partial(self.__trigger_action, "toggle_light_visibility", viewport_api=viewport),
"Skeletons": partial(self.__trigger_action, "toggle_skeleton_visibility", viewport_api=viewport),
"Audio": partial(self.__trigger_action, "toggle_audio_visibility", viewport_api=viewport),
"Meshes": partial(self.__trigger_action, "toggle_mesh_visibility", viewport_api=viewport),
icon_click_id: partial(self.__trigger_action, "toggle_show_by_type_visibility", viewport_api=viewport),
}
CategoryMenuContainer(model, identifier=f"{identifier}.{name}", trigger_fns=trigger_fns)
# Now iterate named sections, with a separator for each.
for section, categories in self._section_categories.items():
if section is DEFAULT_SECTION:
continue
ui.Separator(text=section)
for name in categories:
model = self._category_models[name]
if name in self._custom_category_items:
for item in self._custom_category_items[name]:
model.add_item(item)
CategoryMenuContainer(model, identifier=f"{identifier}.{name}")
ui.Separator()
# This currently is just easier tied to legacy global setting
SelectableMenuItem("Selection Outline", SettingModel(_make_viewport_setting(viewport_api_id, "guide/selection")),
triggered_fn=partial(self.__trigger_action, "toggle_selection_hilight_visibility", viewport_api=viewport),
trigger_will_set_model=True
)
SelectableMenuItem("Axis", SettingModel(_make_viewport_setting(viewport_api_id, "guide/axis")),
triggered_fn=partial(self.__trigger_action, "toggle_axis_visibility", viewport_api=viewport),
trigger_will_set_model=True
)
SelectableMenuItem("Grid", SettingModel(_make_viewport_setting(viewport_api_id, "guide/grid")),
triggered_fn=partial(self.__trigger_action, "toggle_grid_visibility", viewport_api=viewport),
trigger_will_set_model=True
)
# Custom display settings
if self._custom_settings:
ui.Separator()
for (text, setting_path) in self._custom_settings:
SelectableMenuItem(text, SettingModel(setting_path))
def __trigger_action(self, action: str, *args, **kwargs):
import omni.kit.actions.core
action_registry = omni.kit.actions.core.get_action_registry()
if action_registry:
exc_action = action_registry.get_action("omni.kit.viewport.actions", action)
if exc_action:
exc_action.execute(*args, **kwargs)
else:
carb.log_error(f"Could not find action to run: '{action}'")
else:
carb.log_error(f"Could not get action_registry to run '{action}")
| 12,371 |
Python
| 47.140078 | 135 | 0.629941 |
omniverse-code/kit/exts/omni.kit.viewport.menubar.display/omni/kit/viewport/menubar/display/tests/__init__.py
|
from .test_ui import *
| 23 |
Python
| 10.999995 | 22 | 0.695652 |
omniverse-code/kit/exts/omni.kit.viewport.menubar.display/omni/kit/viewport/menubar/display/tests/test_ui.py
|
import omni.kit.test
from re import I
from omni.ui.tests.test_base import OmniUiTest
import omni.kit.ui_test as ui_test
from omni.kit.ui_test import Vec2
import omni.usd
import omni.kit.app
from pathlib import Path
import carb.input
import asyncio
import omni.ui as ui
from omni.kit.viewport.menubar.core import CategoryCollectionItem, CategoryStateItem, CategoryCustomItem, ViewportMenuDelegate, SelectableMenuItem
CURRENT_PATH = Path(__file__).parent
TEST_DATA_PATH = CURRENT_PATH.parent.parent.parent.parent.parent.parent.joinpath("data").joinpath("tests")
TEST_WIDTH, TEST_HEIGHT = 600, 400
TEST_SETTING_TRUE = "/exts/test/setting/true"
TEST_SETTING_FALSE = "/exts/test/setting/false"
class TestSettingMenuWindow(OmniUiTest):
async def setUp(self):
self._golden_img_dir = TEST_DATA_PATH.absolute().joinpath("golden_img").absolute()
await self.create_test_area(width=TEST_WIDTH, height=TEST_HEIGHT)
await omni.kit.app.get_app().next_update_async()
async def test_general(self):
await self._show_display_menu("menubar_display.png", None)
async def test_heads_up(self):
await self._show_display_menu("menubar_display_headsup.png", 86)
async def test_show_by_type(self):
await self._show_display_menu("menubar_display_show_type.png", 106)
async def test_show_by_purpose(self):
await self._show_display_menu("menubar_display_show_purpose.png", 126)
async def test_show_custom_menu_item(self):
inst = omni.kit.viewport.menubar.display.get_instance()
custom_collection_item = CategoryCollectionItem(
"Custom catetory",
[
CategoryStateItem("Custom Item", ui.SimpleBoolModel(True)),
]
)
inst.register_custom_category_item("Show By Type", custom_collection_item)
def _build_menu():
with ui.Menu("Physics", delegate=ViewportMenuDelegate()):
SelectableMenuItem("Joints", ui.SimpleBoolModel(True))
with ui.Menu("Colliders", delegate=ViewportMenuDelegate()):
SelectableMenuItem("None", ui.SimpleBoolModel(True))
SelectableMenuItem("Selected", ui.SimpleBoolModel(False))
SelectableMenuItem("All", ui.SimpleBoolModel(False))
ui.Separator()
SelectableMenuItem("Normals", ui.SimpleBoolModel(False))
physics_item = CategoryCustomItem("Physics", _build_menu)
inst.register_custom_category_item("Show By Type", physics_item)
settings = carb.settings.get_settings()
settings.set(TEST_SETTING_FALSE, False)
settings.set(TEST_SETTING_TRUE, True)
inst.register_custom_setting("test new setting (True)", TEST_SETTING_TRUE)
inst.register_custom_setting("test new setting (False)", TEST_SETTING_FALSE)
await omni.kit.app.get_app().next_update_async()
await self._show_display_menu("menubar_display_custom.png", 106)
inst.deregister_custom_category_item("Show By Type", custom_collection_item)
inst.deregister_custom_category_item("Show By Type", physics_item)
inst.deregister_custom_setting("test new setting (True)")
inst.deregister_custom_setting("test new setting (False)")
await omni.kit.app.get_app().next_update_async()
async def test_show_custom_category_and_section(self):
inst = omni.kit.viewport.menubar.display.get_instance()
category = "Draw Overlay"
section = "Selection Display"
did_shown_changed_callback = False
def on_shown(s):
print("on_shown: {s}")
nonlocal did_shown_changed_callback
did_shown_changed_callback = True
overlay_item = CategoryCollectionItem(
category,
[
CategoryCustomItem("Points", lambda: SelectableMenuItem("Points", model=ui.SimpleBoolModel())),
CategoryCustomItem("Normals", lambda: SelectableMenuItem("Normals", model=ui.SimpleBoolModel()))
],
shown_changed_fn=on_shown
)
inst.register_custom_category_item(category, overlay_item, section)
await omni.kit.app.get_app().next_update_async()
await self._show_display_menu("menubar_display_custom_category_and_section.png", 166)
self.assertTrue(did_shown_changed_callback)
inst.deregister_custom_category_item(category, overlay_item)
await omni.kit.app.get_app().next_update_async()
async def _show_display_menu(self, golden_img_name: str, y: int = None) -> None:
# Enable mouse input
app_window = omni.appwindow.get_default_app_window()
for device in [carb.input.DeviceType.MOUSE]:
app_window.set_input_blocking_state(device, None)
try:
await ui_test.emulate_mouse_move(Vec2(20, 46), human_delay_speed=4)
await ui_test.emulate_mouse_click()
if y is not None:
await ui_test.emulate_mouse_move(Vec2(20, y))
await self.finalize_test(golden_img_dir=self._golden_img_dir, golden_img_name=golden_img_name)
finally:
for i in range(3):
await omni.kit.app.get_app().next_update_async()
await ui_test.emulate_mouse_move(Vec2(300, 26))
await ui_test.emulate_mouse_click()
for i in range(3):
await omni.kit.app.get_app().next_update_async()
| 5,475 |
Python
| 39.865671 | 146 | 0.652603 |
omniverse-code/kit/exts/omni.kit.viewport.menubar.display/docs/index.rst
|
omni.kit.viewport.menubar.display
#################################
Display Setting in Viewport MenuBar
.. toctree::
:maxdepth: 1
README
CHANGELOG
.. automodule:: omni.kit.viewport.menubar.display
:platform: Windows-x86_64, Linux-x86_64
:members:
:undoc-members:
:show-inheritance:
:imported-members:
| 336 |
reStructuredText
| 15.849999 | 49 | 0.622024 |
omniverse-code/kit/exts/omni.kit.usdz_export/PACKAGE-LICENSES/omni.kit.usdz_export-LICENSE.md
|
Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
NVIDIA CORPORATION and its licensors retain all intellectual property
and proprietary rights in and to this software, related documentation
and any modifications thereto. Any use, reproduction, disclosure or
distribution of this software and related documentation without an express
license agreement from NVIDIA CORPORATION is strictly prohibited.
| 412 |
Markdown
| 57.999992 | 74 | 0.839806 |
omniverse-code/kit/exts/omni.kit.usdz_export/config/extension.toml
|
[package]
title = "USDZ Exporter"
description = "Packages assets into a USDZ archive."
authors = ["NVIDIA"]
version = "1.0.1"
changelog="docs/CHANGELOG.md"
preview_image = "data/preview.png"
readme = "docs/README.md"
#icon = "data/icon.png"
category = "Internal"
feature = true
[[python.module]]
name = "omni.kit.usdz_export"
[dependencies]
"omni.kit.pip_archive" = {}
"omni.ui" = {}
"omni.usd" = {}
"omni.usd.libs" = {}
"omni.kit.tool.collect" = {}
"omni.kit.window.file_exporter" = {}
# Additional python module with tests, to make them discoverable by test system.
[[python.module]]
name = "omni.kit.usdz_export.tests"
[[test]]
args = [
"--/app/asyncRendering=false",
"--/rtx/materialDb/syncLoads=true",
"--/omni.kit.plugin/syncUsdLoads=true",
"--/rtx/hydra/materialSyncLoads=true"
]
dependencies = [
"omni.kit.material.library",
]
| 861 |
TOML
| 21.102564 | 80 | 0.671312 |
omniverse-code/kit/exts/omni.kit.usdz_export/omni/kit/usdz_export/__init__.py
|
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from .extension_usdz import UsdzExportExtension
from .layers_menu import export, usdz_export
| 527 |
Python
| 42.999996 | 76 | 0.812144 |
omniverse-code/kit/exts/omni.kit.usdz_export/omni/kit/usdz_export/extension_usdz.py
|
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from .layers_menu import layers_available
from .layers_menu import LayersMenu
import omni.ext
import omni.kit.app
class UsdzExportExtension(omni.ext.IExt):
def on_startup(self, ext_id):
# Setup a callback for the event
app = omni.kit.app.get_app_interface()
ext_manager = app.get_extension_manager()
self.__extensions_subscription = ext_manager.get_change_event_stream().create_subscription_to_pop(
self._on_event, name="omni.kit.usdz_export"
)
self.__layers_menu = None
self._on_event(None)
def _on_event(self, event):
# Create/destroy the menu in the Layers window
if self.__layers_menu:
if not layers_available():
self.__layers_menu.destroy()
self.__layers_menu = None
else:
if layers_available():
self.__layers_menu = LayersMenu()
def on_shutdown(self):
self.__extensions_subscription = None
if self.__layers_menu:
self.__layers_menu.destroy()
self.__layers_menu = None
| 1,530 |
Python
| 33.795454 | 106 | 0.65817 |
omniverse-code/kit/exts/omni.kit.usdz_export/omni/kit/usdz_export/layers_menu.py
|
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from .utils import is_extension_loaded, copy, list_folder_async
from pxr import Sdf, Usd
from pathlib import Path
from zipfile import ZipFile
from functools import partial
from typing import Callable, List
from omni.kit.window.file_exporter import get_file_exporter
from omni.kit.widget.prompt import PromptManager
import carb
import omni.kit.tool.collect as collect
import omni.usd
import asyncio
import tempfile
import os
import shutil
import omni.kit.app
import omni.kit.notification_manager as nm
def layers_available() -> bool:
"""Returns True if the extension "omni.kit.widget.layers" is loaded"""
return is_extension_loaded("omni.kit.widget.layers")
async def usdz_export(identifier, export_path):
try:
target_out = export_path
carb.log_info(f"Starting to export layer '{identifier}' to '{target_out}'")
prompt = PromptManager.post_simple_prompt("Please Wait", "Exporting to USDZ...", ok_button_info=None, modal=True)
# Waits for prompt to be shown
await omni.kit.app.get_app().next_update_async()
await omni.kit.app.get_app().next_update_async()
layer = Sdf.Layer.FindOrOpen(identifier)
if not layer:
message = f"Failed to export layer {identifier} as it does not exist."
carb.log_error(message)
nm.post_notification(message, status=nm.NotificationStatus.WARNING)
return
with tempfile.TemporaryDirectory() as tmp_path:
tmp_path = Path(tmp_path)
collect_path = tmp_path.joinpath("collected")
split_ext = os.path.splitext(identifier)
# Can't collect USDZ files because MDLs can't be resolved
if (split_ext[1] == '.usdz'):
input_usdz_temp_path = str(tmp_path.joinpath('temp_copy.usdz'))
await copy(identifier, str(input_usdz_temp_path))
with ZipFile(input_usdz_temp_path, 'r') as zip_ref:
zip_ref.extractall(str(tmp_path))
tmp_file_path = str(tmp_path.joinpath("main.usdc"))
layer.Export(tmp_file_path)
entry_layer_to_collect = tmp_file_path
elif not omni.usd.is_usd_writable_filetype(identifier) or identifier.startswith('anon'):
tmp_file_path = str(tmp_path.joinpath("main.usdc"))
layer.Export(tmp_file_path)
entry_layer_to_collect = tmp_file_path
else:
entry_layer_to_collect = identifier
collector = collect.Collector(entry_layer_to_collect, str(collect_path), flat_collection=True)
await collector.collect(None, None)
# must create USDZ locally because the UsdUtils package cannot handle omniverse:// URIs
absolute_paths, relative_paths = await list_folder_async(str(collect_path))
local_out_path = collect_path.joinpath("local_out.usdz")
# Create usdz package manually without using USD API as it cannot handle UDIM textures.
zip_writer = Usd.ZipFileWriter.CreateNew(str(local_out_path))
with zip_writer:
for absolute_path, relative_path in zip(absolute_paths, relative_paths):
url = omni.client.break_url(absolute_path)
absolute_path = url.path
# FIXME: omni.client will return windows path prefixed with '/'
if os.name == "nt" and absolute_path[0] == '/':
absolute_path = absolute_path[1:]
zip_writer.AddFile(absolute_path, relative_path)
await copy(str(local_out_path), target_out)
layer = None
zip_writer = None
finally:
prompt.visible = False
prompt = None
carb.log_info(f"Finished exporting layer '{identifier}' to '{target_out}'")
def export(objects):
"""Export the target layer to USDZ"""
def on_export(callback: Callable, flatten: bool, filename: str, dirname: str, extension: str = '', selections: List[str] = []):
nonlocal objects
path = f"{dirname}/{filename}{extension}"
item = objects["item"]
identifier = item().identifier
asyncio.ensure_future(usdz_export(identifier, path))
file_picker = get_file_exporter()
file_picker.show_window(
title="Export To USDZ",
export_button_label="Export",
export_handler=partial(on_export, None, False),
file_extension_types=[(".usdz", "Zipped package")]
)
class LayersMenu:
"""
When this object is alive, Layers 2.0 has an additional action
for exporting the layer to USDZ.
"""
def __init__(self):
import omni.kit.widget.layers as layers
self.__menu_subscription = layers.ContextMenu.add_menu(
[
{"name": ""},
{
"name": "Export USDZ",
"glyph": "menu_rename.svg",
"show_fn": [
layers.ContextMenu.is_layer_item,
layers.ContextMenu.is_not_missing_layer,
layers.ContextMenu.is_layer_not_locked_by_other,
layers.ContextMenu.is_layer_and_parent_unmuted
],
"onclick_fn": export,
}
]
)
def destroy(self):
"""Remove the menu from Layers 2.0"""
self.__menu_subscription = None
| 5,903 |
Python
| 38.891892 | 131 | 0.614942 |
omniverse-code/kit/exts/omni.kit.usdz_export/omni/kit/usdz_export/utils.py
|
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import os
import omni.kit.app
import traceback
import carb
import omni.client
import omni.client.utils as clientutils
def is_extension_loaded(extansion_name: str) -> bool:
"""
Returns True if the extension with the given name is loaded.
"""
def is_ext(id: str, extension_name: str) -> bool:
id_name = id.split("-")[0]
return id_name == extension_name
app = omni.kit.app.get_app_interface()
ext_manager = app.get_extension_manager()
extensions = ext_manager.get_extensions()
loaded = next((ext for ext in extensions if is_ext(ext["id"], extansion_name) and ext["enabled"]), None)
return not not loaded
async def copy(src_path: str, dest_path: str):
carb.log_info(f"Copying from {src_path} to {dest_path}...")
try:
result = await omni.client.copy_async(src_path, dest_path, omni.client.CopyBehavior.OVERWRITE)
if result != omni.client.Result.OK:
carb.log_error(f"Cannot copy from {src_path} to {dest_path}, error code: {result}.")
return False
else:
return True
except Exception as e:
traceback.print_exc()
carb.log_error(str(e))
return False
async def list_folder_async(folder_path):
def compute_absolute_path(base_path, is_base_path_folder, path, is_path_folder):
if is_base_path_folder and not base_path.endswith("/"):
base_path += "/"
if is_path_folder and not path.endswith("/"):
path += "/"
return clientutils.make_absolute_url_if_possible(base_path, path)
def remove_prefix(text, prefix):
if text.startswith(prefix):
return text[len(prefix) :]
return text
absolute_paths = []
relative_paths = []
result, entry = await omni.client.stat_async(folder_path)
if result == omni.client.Result.OK and entry.flags & omni.client.ItemFlags.CAN_HAVE_CHILDREN:
is_folder = True
else:
is_folder = False
folder_path = clientutils.make_file_url_if_possible(folder_path)
if not is_folder:
absolute_paths = [folder_path]
relative_paths = [os.path.basename(folder_path)]
else:
if not folder_path.endswith("/"):
folder_path += "/"
folder_queue = [folder_path]
while len(folder_queue) > 0:
folder = folder_queue.pop(0)
(result, entries) = await omni.client.list_async(folder)
if result != omni.client.Result.OK:
break
folders = set((e.relative_path for e in entries if e.flags & omni.client.ItemFlags.CAN_HAVE_CHILDREN))
for f in folders:
folder_queue.append(compute_absolute_path(folder, True, f, False))
files = set((e.relative_path for e in entries if not e.flags & omni.client.ItemFlags.CAN_HAVE_CHILDREN))
for file in files:
absolute_path = compute_absolute_path(folder, True, file, False)
absolute_paths.append(absolute_path)
relative_path = remove_prefix(absolute_path, folder_path[:-1])
relative_path = relative_path.replace("\\", "/")
if relative_path != "/" and relative_path.startswith("/"):
relative_path = relative_path[1:]
if len(relative_path) > 0:
relative_paths.append(relative_path)
return absolute_paths, relative_paths
| 3,859 |
Python
| 36.115384 | 116 | 0.63177 |
omniverse-code/kit/exts/omni.kit.usdz_export/omni/kit/usdz_export/tests/__init__.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from .usdz_export_test import TestUsdzExport
| 473 |
Python
| 46.399995 | 76 | 0.811839 |
omniverse-code/kit/exts/omni.kit.usdz_export/omni/kit/usdz_export/tests/usda_test.py
|
## Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
##
## NVIDIA CORPORATION and its licensors retain all intellectual property
## and proprietary rights in and to this software, related documentation
## and any modifications thereto. Any use, reproduction, disclosure or
## distribution of this software and related documentation without an express
## license agreement from NVIDIA CORPORATION is strictly prohibited.
##
from ..layer_watch import LayerWatch
from omni.ui.tests.test_base import OmniUiTest
from pathlib import Path
from pxr import Usd
from pxr import UsdGeom
import omni.client
import omni.kit
import omni.usd
import os
import time
import unittest
OMNI_SERVER = "omniverse://kit.nucleus.ov-ci.nvidia.com"
OMNI_USER = "omniverse"
OMNI_PASS = "omniverse"
class TestUsdaEdit(OmniUiTest):
def __set_omni_credentials(self):
# Save the environment to be able to restore it
self.__OMNI_USER = os.environ.get("OMNI_USER", None)
self.__OMNI_PASS = os.environ.get("OMNI_PASS", None)
# Set the credentials
os.environ["OMNI_USER"] = OMNI_USER
os.environ["OMNI_PASS"] = OMNI_PASS
def __restore_omni_credentials(self):
if self.__OMNI_USER is not None:
os.environ["OMNI_USER"] = self.__OMNI_USER
else:
os.environ.pop("OMNI_USER")
if self.__OMNI_PASS is not None:
os.environ["OMNI_PASS"] = self.__OMNI_PASS
else:
os.environ.pop("OMNI_PASS")
async def test_open_file(self):
# New stage with a sphere
await omni.usd.get_context().new_stage_async()
omni.kit.commands.execute("CreatePrim", prim_path="/Sphere", prim_type="Sphere", select_new_prim=False)
stage = omni.usd.get_context().get_stage()
# Create USDA
usda_filename = LayerWatch().start_watch(stage.GetRootLayer().identifier)
# Check it's a valid stage
duplicate = Usd.Stage.Open(usda_filename)
self.assertTrue(duplicate)
# Check it has the sphere
sphere = duplicate.GetPrimAtPath("/Sphere")
self.assertTrue(sphere)
UsdGeom.Cylinder.Define(duplicate, '/Cylinder')
duplicate.Save()
await omni.kit.app.get_app().next_update_async()
# Check cylinder is created
cylinder = duplicate.GetPrimAtPath("/Cylinder")
self.assertTrue(cylinder)
# Remove USDA
LayerWatch().stop_watch(stage.GetRootLayer().identifier)
# Check the file is removed
self.assertFalse(Path(usda_filename).exists())
await omni.kit.app.get_app().next_update_async()
@unittest.skip("Works locally, but fails on TC for server connection in linux -> flaky")
async def test_edit_file_on_nucleus(self):
self.__set_omni_credentials()
# Create a new stage on server
temp_usd_folder = f"{OMNI_SERVER}/Users/test_usda_edit_{str(time.time())}"
temp_usd_file_path = f"{temp_usd_folder}/test_edit_file_on_nucleus.usd"
# cleanup first
await omni.client.delete_async(temp_usd_folder)
# create the folder
result = await omni.client.create_folder_async(temp_usd_folder)
self.assertEqual(result, omni.client.Result.OK)
stage = Usd.Stage.CreateNew(temp_usd_file_path)
await omni.kit.app.get_app().next_update_async()
UsdGeom.Xform.Define(stage, '/xform')
UsdGeom.Sphere.Define(stage, '/xform/sphere')
await omni.kit.app.get_app().next_update_async()
stage.Save()
# Start watching and edit the temp stage
usda_filename = LayerWatch().start_watch(stage.GetRootLayer().identifier)
temp_stage = Usd.Stage.Open(usda_filename)
# Create another sphere
UsdGeom.Sphere.Define(temp_stage, '/xform/sphere1')
# Save the stage
temp_stage.Save()
# UsdStage saves the temorary file and renames it to usda, so we need
# to touch it to let LayerWatch know it's changed.
Path(usda_filename).touch()
# Small delay because watchdog in LayerWatch doesn't call the callback
# right away. So we need to give it some time.
await LayerWatch().wait_import_async(stage.GetRootLayer().identifier)
# Remove USDA
LayerWatch().stop_watch(stage.GetRootLayer().identifier)
stage.Reload()
# Check the second sphere is there
sphere = stage.GetPrimAtPath("/xform/sphere1")
self.assertTrue(sphere)
# Remove the temp folder
result = await omni.client.delete_async(temp_usd_folder)
self.assertEqual(result, omni.client.Result.OK)
self.__restore_omni_credentials()
| 4,700 |
Python
| 34.885496 | 111 | 0.657021 |
omniverse-code/kit/exts/omni.kit.usdz_export/omni/kit/usdz_export/tests/usdz_export_test.py
|
## Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
##
## NVIDIA CORPORATION and its licensors retain all intellectual property
## and proprietary rights in and to this software, related documentation
## and any modifications thereto. Any use, reproduction, disclosure or
## distribution of this software and related documentation without an express
## license agreement from NVIDIA CORPORATION is strictly prohibited.
##
from omni.ui.tests.test_base import OmniUiTest
from pathlib import Path
from pxr import Usd
from pxr import UsdGeom
import carb
import omni.client
import omni.kit
import omni.usd
import os
import time
import unittest
from omni.kit.usdz_export import usdz_export
OMNI_SERVER = "omniverse://ov-test"
class TestUsdzExport(OmniUiTest):
def get_test_dir(self):
token = carb.tokens.get_tokens_interface()
data_dir = token.resolve("${data}")
return f"{data_dir}"
async def test_export_usdz_file(self):
usdz_size = 2600000
usdz_size_tc = 2675966
current_path = Path(__file__)
test_data_path = current_path.parent.parent.parent.parent.parent.joinpath("data")
test_stage_path = str(test_data_path.joinpath("test_stage").joinpath("scene.usd"))
test_dir = self.get_test_dir()
export_file_path = Path(test_dir).joinpath("out.usdz").resolve()
await usdz_export(test_stage_path, export_file_path.__str__())
self.assertTrue(os.path.isfile(export_file_path.__str__()), 'out.usdz does not exist')
size = os.stat(export_file_path).st_size
self.assertTrue(size >= usdz_size and size <= usdz_size_tc, f'File size mismatch, expected {usdz_size} but got {size}')
| 1,702 |
Python
| 36.844444 | 127 | 0.706228 |
omniverse-code/kit/exts/omni.kit.usdz_export/docs/CHANGELOG.md
|
# Changelog
## [1.0.1] - 2022-11-08
- Add "omni.kit.window.file_exporter" as dependency.
## [1.0.0] - 2022-08-18
- Initial extension.
| 137 |
Markdown
| 14.333332 | 52 | 0.635036 |
omniverse-code/kit/exts/omni.kit.usdz_export/docs/README.md
|
# USDZ Exporter [omni.kit.usdz_export]
Exports selected layer to a USDZ archive.
| 83 |
Markdown
| 15.799997 | 41 | 0.759036 |
omniverse-code/kit/fabric/include/carb/flatcache/IToken.h
|
// Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <carb/Interface.h>
#ifndef __CUDACC__
// InterfaceUtils.h provides carb::getCachedInterface and is not CUDA-compatible
#include <carb/InterfaceUtils.h>
#endif // __CUDACC__
// Set to empty macro when IToken::iToken static member is removed
#define FLATCACHE_ITOKEN_INIT \
const carb::flatcache::IToken* carb::flatcache::iToken = nullptr;
namespace carb
{
namespace flatcache
{
// TokenC are integer keys that identify paths to C-ABI interfaces
struct TokenC
{
uint64_t token;
// Note that in the name comparisons below we mask off USD's lifetime bit.
// For example, tokens created from the same string are considered equal even
// if one was created with finite lifetime and the other infinite lifetime.
constexpr bool operator<(const TokenC& other) const
{
return (token & ~1) < (other.token & ~1);
}
constexpr bool operator==(const TokenC& other) const
{
return (token & ~1) == (other.token & ~1);
}
constexpr bool operator!=(const TokenC& other) const
{
return (token & ~1) != (other.token & ~1);
}
};
static_assert(std::is_standard_layout<TokenC>::value, "Struct must be standard layout as it is used in C-ABI interfaces");
// We don't reference count the uninitialized (or empty) token, and we use
// this fact to avoid unnecessary dll calls to addRef()/removeRef(), for
// example during std::vector resize. To do this we need to check whether a
// token is uninitialized without the dll call getEmptyToken(), so we store
// its value here in a constant.
// We run automated test "IToken::getEmptyToken() dll call can be replaced with
// constant, kUninitializedToken" to ensure that this constant never
// changes.
static constexpr TokenC kUninitializedToken{0};
// C-ABI interface to pxr::TfToken
struct IToken
{
CARB_PLUGIN_INTERFACE("carb::flatcache::IToken", 0, 1);
TokenC (*getHandle)(const char* name);
const char* (*getText)(TokenC handle);
void (*addRef)(TokenC handle);
void (*removeRef)(TokenC handle);
TokenC (*getEmptyToken)();
uint64_t (*size)(TokenC handle);
};
// C++ wrapper for IToken
class Token
{
static carb::flatcache::IToken& sIToken();
public:
// DEPRECATED: keeping for binary compatibility
// Will be removed in October 2021 - @TODO set FLATCACHE_ITOKEN_INIT to empty macro when removed!
// Still safe to use if initialized in a given dll
static const carb::flatcache::IToken* iToken;
Token() : mHandle(kUninitializedToken)
{
}
Token(const char* string)
{
mHandle = sIToken().getHandle(string);
}
// Needs to be noexcept for std::vector::resize() to move instead of copy
~Token() noexcept
{
#ifndef __CUDACC__
if (mHandle != kUninitializedToken)
{
if (!carb::isFrameworkValid())
{
return;
}
// IToken can be nullptr durin exit process
if (auto iToken = carb::getCachedInterface<carb::flatcache::IToken>())
{
iToken->removeRef(mHandle);
}
}
#endif // __CUDACC__
}
// Copy constructor
Token(const Token& other) : mHandle(other.mHandle)
{
if (mHandle != kUninitializedToken)
{
sIToken().addRef(mHandle);
}
}
// Copy construct from integer
Token(TokenC token) : mHandle(token)
{
if (mHandle != kUninitializedToken)
{
sIToken().addRef(mHandle);
}
}
// Move constructor
// Needs to be noexcept for std::vector::resize() to move instead of copy
Token(Token&& other) noexcept
{
// We are moving the src handle so don't need to change its refcount
mHandle = other.mHandle;
// Make source invalid
other.mHandle = kUninitializedToken;
}
// Copy assignment
Token& operator=(const Token& other)
{
if (this != &other)
{
if (mHandle != kUninitializedToken)
{
sIToken().removeRef(mHandle);
}
mHandle = other.mHandle;
if (other.mHandle != kUninitializedToken)
{
sIToken().addRef(mHandle);
}
}
return *this;
}
// Move assignment
Token& operator=(Token&& other) noexcept
{
if (&other == this)
return *this;
// We are about to overwrite the dest handle, so decrease its refcount
if (mHandle != kUninitializedToken)
{
sIToken().removeRef(mHandle);
}
// We are moving the src handle so don't need to change its refcount
mHandle = other.mHandle;
other.mHandle = kUninitializedToken;
return *this;
}
const char* getText() const
{
return sIToken().getText(mHandle);
}
uint64_t size() const
{
return sIToken().size(mHandle);
}
std::string getString() const
{
return std::string(sIToken().getText(mHandle), sIToken().size(mHandle));
}
// Note that in the name comparisons below TokenC masks off USD's lifetime bit.
// In other words, tokens created from the same string are considered equal even
// if one was created with finite lifetime and the other infinite lifetime.
constexpr bool operator<(const Token& other) const
{
return mHandle < other.mHandle;
}
constexpr bool operator!=(const Token& other) const
{
return mHandle != other.mHandle;
}
constexpr bool operator==(const Token& other) const
{
return mHandle == other.mHandle;
}
constexpr operator TokenC() const
{
return mHandle;
}
private:
TokenC mHandle;
};
static_assert(std::is_standard_layout<Token>::value, "Token must be standard layout as it is used in C-ABI interfaces");
#ifndef __CUDACC__
inline carb::flatcache::IToken& Token::sIToken()
{
// Acquire carbonite interface on first use
carb::flatcache::IToken* iToken = carb::getCachedInterface<carb::flatcache::IToken>();
CARB_ASSERT(iToken);
return *iToken;
}
#endif // __CUDACC__
inline uint64_t swapByteOrder(uint64_t val)
{
#if !CARB_COMPILER_MSC
// Compilers other than MSVC tend to turn the following into a single instruction like bswap
val =
((val & 0xFF00000000000000u) >> 56u) |
((val & 0x00FF000000000000u) >> 40u) |
((val & 0x0000FF0000000000u) >> 24u) |
((val & 0x000000FF00000000u) >> 8u) |
((val & 0x00000000FF000000u) << 8u) |
((val & 0x0000000000FF0000u) << 24u) |
((val & 0x000000000000FF00u) << 40u) |
((val & 0x00000000000000FFu) << 56u);
#else
// MSVC does not currently optimize the above code, so we have to use an intrinsic to get bswap
val = _byteswap_uint64(val);
#endif
return val;
}
inline size_t hash(TokenC token)
{
size_t tokenWithoutMortalityBit = token.token & ~1;
// The following Hash function was chosen to match the one in pxr\base\tf\hash.h
// This is based on Knuth's multiplicative hash for integers. The
// constant is the closest prime to the binary expansion of the inverse
// golden ratio. The best way to produce a hash table bucket index from
// the result is to shift the result right, since the higher order bits
// have the most entropy. But since we can't know the number of buckets
// in a table that's using this, we just reverse the byte order instead,
// to get the highest entropy bits into the low-order bytes.
return swapByteOrder(tokenWithoutMortalityBit * 11400714819323198549ULL);
}
inline size_t hash(Token const& token)
{
return hash(TokenC(token));
}
}
}
namespace std
{
template <>
struct hash<carb::flatcache::Token>
{
std::size_t operator()(const carb::flatcache::Token& key) const
{
return carb::flatcache::hash(key);
}
};
template <>
class hash<carb::flatcache::TokenC>
{
public:
size_t operator()(const carb::flatcache::TokenC& key) const
{
return carb::flatcache::hash(key);
}
};
}
| 8,572 |
C
| 27.768456 | 122 | 0.640457 |
omniverse-code/kit/fabric/include/carb/flatcache/Defines.h
|
// Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
// Improved #define preprocessor directives that support compile-time checking for mispelled or missing
// directives. Basically, the same as #define MY_FEATURE 0/1, but with a bit more compile-time safety,
// and ease of use around mixing or combining boolean logic.
//
// Example usage:
// #define MY_FEATURE_A IN_USE
// #define MY_FEATURE_B NOT_IN_USE
// #define MY_FEATURE_C USE_IF( USING( MY_FEATURE_A ) && USING( MY_FEATURE_B ) )
// ...
// void doStuff()
// {
// #if USING( MY_FEATURE_C )
// doStuff_C();
// #else // #if USING( MY_FEATURE_C )
// doStuff_NotC();
// #endif // #if USING( MY_FEATURE_C )
// }
#define IN_USE &&
#define NOT_IN_USE &&!
#define USE_IF(X) &&((X)?1:0)&&
#define USING(X) (1 X 1)
#ifndef NDEBUG
#define DEVELOPMENT_BUILD IN_USE
#else // #ifndef NDEBUG
#define DEVELOPMENT_BUILD NOT_IN_USE
#endif // #ifndef NDEBUG
#ifdef _WIN32
#define WINDOWS_BUILD IN_USE
#define LINUX_BUILD NOT_IN_USE
#elif defined(__linux__) // #ifdef _WIN32
#define WINDOWS_BUILD NOT_IN_USE
#define LINUX_BUILD IN_USE
#else // #elif defined(__linux__) // #ifdef _WIN32
#error "Unsupported platform"
#endif
#define ASSERTS USE_IF( USING( DEVELOPMENT_BUILD ) )
| 1,630 |
C
| 29.203703 | 103 | 0.707975 |
omniverse-code/kit/fabric/include/carb/flatcache/WrapperImpl.h
|
// Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
// The purpose of this file is to implement the C++ classes StageInProgress,
// StageAtTime, StageAtTimeInterval and StageHistoryWindow by calling the
// carbonite C-ABI interfaces, IStageInProgress, IStageAtTime,
// IStageAtTimeWindow and IStageHistoryWindow.
//
//
#include "StageWithHistory.h"
#include <carb/InterfaceUtils.h>
#include <carb/logging/Log.h>
#include <type_traits>
#include <cstdint>
namespace carb
{
namespace flatcache
{
// StageInProgress implementation starts here
// RAII constructor
inline StageInProgress::StageInProgress(StageWithHistory& stageWithHistory, size_t simFrameNumber)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
m_stageInProgress = iStageInProgress->create(stageWithHistory.m_usdStageId, simFrameNumber);
m_usdStageId = stageWithHistory.m_usdStageId;
m_createdFromId = false;
}
// Non-RAII constructor
inline StageInProgress::StageInProgress(StageInProgressId stageInProgressId)
{
m_stageInProgress = stageInProgressId;
m_createdFromId = true;
// m_usdStageId is not valid when m_createdFromId==true
}
inline StageInProgress::~StageInProgress()
{
if (!m_createdFromId)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
iStageInProgress->destroy(m_usdStageId);
}
}
inline size_t StageInProgress::getFrameNumber()
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
return iStageInProgress->getFrameNumber(m_stageInProgress);
}
inline ValidMirrors StageInProgress::getAttributeValidBits(const Path& path, const Token& attrName) const
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
return iStageInProgress->getAttributeValidBits(m_stageInProgress, path, attrName);
}
inline RationalTime StageInProgress::getFrameTime()
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
return iStageInProgress->getFrameTime(m_stageInProgress);
}
template <typename T>
T* StageInProgress::getAttribute(const Path& path, const Token& attrName)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
SpanC ptrAndSize = iStageInProgress->getAttribute(m_stageInProgress, path, attrName);
if (sizeof(T) == ptrAndSize.elementSize)
{
return reinterpret_cast<T*>(ptrAndSize.ptr);
}
else
{
CARB_LOG_WARN_ONCE("Trying to access %zu bytes from %s.%s, but flatcache has only %zu bytes", sizeof(T),
path.getText(), attrName.getText(), ptrAndSize.elementSize);
return nullptr;
}
}
template <typename T>
const T* StageInProgress::getAttributeRd(const Path& path, const Token& attrName)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
ConstSpanC ptrAndSize = iStageInProgress->getAttributeRd(m_stageInProgress, path, attrName);
if (sizeof(T) == ptrAndSize.elementSize)
{
return reinterpret_cast<const T*>(ptrAndSize.ptr);
}
else
{
CARB_LOG_WARN_ONCE("Trying to access %zu bytes from %s.%s, but flatcache has only %zu bytes", sizeof(T),
path.getText(), attrName.getText(), ptrAndSize.elementSize);
return nullptr;
}
}
template <typename T>
T* StageInProgress::getAttributeWr(const Path& path, const Token& attrName)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
SpanC ptrAndSize = iStageInProgress->getAttributeWr(m_stageInProgress, path, attrName);
if (sizeof(T) == ptrAndSize.elementSize)
{
return reinterpret_cast<T*>(ptrAndSize.ptr);
}
else
{
CARB_LOG_WARN_ONCE("Trying to access %zu bytes from %s.%s, but flatcache has only %zu bytes", sizeof(T),
path.getText(), attrName.getText(), ptrAndSize.elementSize);
return nullptr;
}
}
template <typename T>
T* StageInProgress::getAttributeGpu(const Path& path, const Token& attrName)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
SpanC ptrAndSize = iStageInProgress->getAttributeGpu(m_stageInProgress, path, attrName);
if (sizeof(T) == ptrAndSize.elementSize)
{
return reinterpret_cast<T*>(ptrAndSize.ptr);
}
else
{
CARB_LOG_WARN_ONCE("Trying to access %zu bytes from %s.%s, but flatcache has only %zu bytes", sizeof(T),
path.getText(), attrName.getText(), ptrAndSize.elementSize);
return nullptr;
}
}
template <typename T>
const T* StageInProgress::getAttributeRdGpu(const Path& path, const Token& attrName)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
ConstSpanC ptrAndSize = iStageInProgress->getAttributeRdGpu(m_stageInProgress, path, attrName);
if (sizeof(T) == ptrAndSize.elementSize)
{
return reinterpret_cast<const T*>(ptrAndSize.ptr);
}
else
{
CARB_LOG_WARN_ONCE("Trying to access %zu bytes from %s.%s, but flatcache has only %zu bytes", sizeof(T),
path.getText(), attrName.getText(), ptrAndSize.elementSize);
return nullptr;
}
}
template <typename T>
T* StageInProgress::getAttributeWrGpu(const Path& path, const Token& attrName)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
SpanC ptrAndSize = iStageInProgress->getAttributeWrGpu(m_stageInProgress, path, attrName);
if (sizeof(T*) == ptrAndSize.elementSize)
{
return reinterpret_cast<T*>(ptrAndSize.ptr);
}
else
{
CARB_LOG_WARN_ONCE("Trying to access %zu bytes from %s.%s, but flatcache has only %zu bytes", sizeof(T),
path.getText(), attrName.getText(), ptrAndSize.elementSize);
return nullptr;
}
}
template <typename T>
T& StageInProgress::getOrCreateAttributeWr(const Path& path, const Token& attrName, Type type)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
SpanC ptrAndSize = iStageInProgress->getOrCreateAttributeWr(m_stageInProgress, path, attrName, TypeC(type));
if (sizeof(T) != ptrAndSize.elementSize)
{
CARB_LOG_WARN_ONCE("Trying to access %zu bytes from %s.%s, but flatcache has only %zu bytes", sizeof(T),
path.getText(), attrName.getText(), ptrAndSize.elementSize);
}
return *reinterpret_cast<T*>(ptrAndSize.ptr);
}
template <typename T>
gsl::span<T> StageInProgress::getArrayAttribute(const Path& path, const Token& attrName)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
SpanC arrayData = iStageInProgress->getArrayAttributeWr(m_stageInProgress, path, attrName);
if (sizeof(T) != arrayData.elementSize)
{
CARB_LOG_WARN_ONCE(
"Trying to access array with elements of size %zu bytes from %s.%s, but flatcache has only %zu bytes",
sizeof(T), path.getText(), attrName.getText(), arrayData.elementSize);
return gsl::span<T>();
}
gsl::span<T> retval(reinterpret_cast<T*>(arrayData.ptr), arrayData.elementCount);
return retval;
}
template <typename T>
gsl::span<const T> StageInProgress::getArrayAttributeRd(const Path& path, const Token& attrName)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
ConstSpanC arrayData = iStageInProgress->getArrayAttributeRd(m_stageInProgress, path, attrName);
if (sizeof(T) != arrayData.elementSize)
{
CARB_LOG_WARN_ONCE(
"Trying to access array with elements of size %zu bytes from %s.%s, but flatcache has only %zu bytes",
sizeof(T), path.getText(), attrName.getText(), arrayData.elementSize);
return gsl::span<const T>();
}
gsl::span<const T> retval(reinterpret_cast<const T*>(arrayData.ptr), arrayData.elementCount);
return retval;
}
template <typename T>
gsl::span<T> StageInProgress::getArrayAttributeWr(const Path& path, const Token& attrName)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
SpanC arrayData = iStageInProgress->getArrayAttributeWr(m_stageInProgress, path, attrName);
if (sizeof(T) != arrayData.elementSize)
{
CARB_LOG_WARN_ONCE(
"Trying to access array with elements of size %zu bytes from %s.%s, but flatcache has only %zu bytes",
sizeof(T), path.getText(), attrName.getText(), arrayData.elementSize);
return gsl::span<T>();
}
gsl::span<T> retval(reinterpret_cast<T*>(arrayData.ptr), arrayData.elementCount);
return retval;
}
inline size_t StageInProgress::getArrayAttributeSize(const Path& path, const Token& attrName)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
return iStageInProgress->getArrayAttributeSize(m_stageInProgress, path, attrName);
}
inline void StageInProgress::setArrayAttributeSize(const Path& path, const Token& attrName, size_t elemCount)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
iStageInProgress->setArrayAttributeSize(m_stageInProgress, path, attrName, elemCount);
}
template <typename T>
inline gsl::span<T> StageInProgress::setArrayAttributeSizeAndGet(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
size_t indexInBucket,
const Token& attrName,
size_t newElemCount)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
SpanC newArrayC = iStageInProgress->setArrayAttributeSizeAndGet(
m_stageInProgress, primBucketList.m_primBucketListId, primBucketListIndex, indexInBucket, attrName, newElemCount);
T* typedElementsPtr = reinterpret_cast<T*>(newArrayC.ptr);
return { typedElementsPtr, newArrayC.elementCount };
}
inline void StageInProgress::createPrim(const Path& path)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
iStageInProgress->createPrim(m_stageInProgress, path);
}
inline void StageInProgress::destroyPrim(const Path& path)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
iStageInProgress->destroyPrim(m_stageInProgress, path);
}
inline void StageInProgress::createAttribute(const Path& path, const Token& attrName, Type type)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
iStageInProgress->createAttribute(m_stageInProgress, path, attrName, TypeC(type));
}
template <int n>
inline void StageInProgress::createAttributes(const Path& path, std::array<AttrNameAndType, n> attributes)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
std::array<TokenC, n> names;
std::array<TypeC, n> types;
for (int c = 0; c < n; ++c)
{
names[c] = attributes[c].name;
types[c] = TypeC(attributes[c].type);
}
iStageInProgress->createAttributes(m_stageInProgress, path, names.data(), types.data(), n);
}
inline void StageInProgress::destroyAttribute(const Path& path, const Token& attrName, Type)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
iStageInProgress->destroyAttribute2(m_stageInProgress, path, attrName);
}
inline void StageInProgress::destroyAttribute(const Path& path, const Token& attrName)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
iStageInProgress->destroyAttribute2(m_stageInProgress, path, attrName);
}
template <int n>
inline void StageInProgress::destroyAttributes(const Path& path, const std::array<Token, n>& attributes)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
std::array<TokenC, n> names;
for (int c = 0; c < n; ++c)
{
names[c] = TokenC(attributes[c]);
}
iStageInProgress->destroyAttributes(m_stageInProgress, path, names.data(), n);
}
inline void StageInProgress::destroyAttributes(const Path& path, const std::vector<Token>& attributes)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
const size_t n = attributes.size();
std::vector<TokenC> names(n);
for (size_t c = 0; c < n; ++c)
{
names[c] = TokenC(attributes[c]);
}
iStageInProgress->destroyAttributes(m_stageInProgress, path, names.data(), (uint32_t)n);
}
inline PrimBucketList StageInProgress::findPrims(const carb::flatcache::set<AttrNameAndType>& all,
const carb::flatcache::set<AttrNameAndType>& any,
const carb::flatcache::set<AttrNameAndType>& none)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
PrimBucketListId primBucketListId = iStageInProgress->findPrims(m_stageInProgress, all, any, none);
return { primBucketListId };
}
inline void StageInProgress::attributeEnableChangeTracking(const Token& attrName, ListenerId listenerId)
{
auto iChangeTrackerConfig = carb::getCachedInterface<carb::flatcache::IChangeTrackerConfig>();
iChangeTrackerConfig->attributeEnable(m_stageInProgress, attrName, listenerId);
}
inline void StageInProgress::enablePrimCreateTracking(ListenerId listenerId)
{
auto iChangeTrackerConfig = carb::getCachedInterface<carb::flatcache::IChangeTrackerConfig>();
iChangeTrackerConfig->enablePrimCreateTracking(m_stageInProgress, listenerId);
}
inline void StageInProgress::attributeDisableChangeTracking(const Token& attrName, ListenerId listenerId)
{
auto iChangeTrackerConfig = carb::getCachedInterface<carb::flatcache::IChangeTrackerConfig>();
iChangeTrackerConfig->attributeDisable(m_stageInProgress, attrName, listenerId);
}
inline void StageInProgress::pauseChangeTracking(ListenerId listenerId)
{
auto iChangeTrackerConfig = carb::getCachedInterface<carb::flatcache::IChangeTrackerConfig>();
iChangeTrackerConfig->pause(m_stageInProgress, listenerId);
}
inline void StageInProgress::resumeChangeTracking(ListenerId listenerId)
{
auto iChangeTrackerConfig = carb::getCachedInterface<carb::flatcache::IChangeTrackerConfig>();
iChangeTrackerConfig->resume(m_stageInProgress, listenerId);
}
inline bool StageInProgress::isChangeTrackingPaused(ListenerId listenerId)
{
auto iChangeTrackerConfig = carb::getCachedInterface<carb::flatcache::IChangeTrackerConfig>();
return iChangeTrackerConfig->isChangeTrackingPaused(m_stageInProgress, listenerId);
}
inline bool StageInProgress::isListenerAttached(ListenerId listenerId)
{
auto iChangeTrackerConfig = carb::getCachedInterface<carb::flatcache::IChangeTrackerConfig>();
return iChangeTrackerConfig->isListenerAttached(m_stageInProgress, listenerId);
}
inline void StageInProgress::detachListener(ListenerId listenerId)
{
auto iChangeTrackerConfig = carb::getCachedInterface<carb::flatcache::IChangeTrackerConfig>();
iChangeTrackerConfig->detachListener(m_stageInProgress, listenerId);
}
inline size_t StageInProgress::getListenerCount()
{
auto iChangeTrackerConfig = carb::getCachedInterface<carb::flatcache::IChangeTrackerConfig>();
return iChangeTrackerConfig->getListenerCount(m_stageInProgress);
}
inline ChangedPrimBucketList StageInProgress::getChanges(ListenerId listenerId)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
PrimBucketListId changeListId = iStageInProgress->getChanges(m_stageInProgress, listenerId);
return ChangedPrimBucketList(changeListId);
}
inline void StageInProgress::popChanges(ListenerId listenerId)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
iStageInProgress->popChanges(m_stageInProgress, listenerId);
}
template <typename T>
gsl::span<T> StageInProgress::getAttributeArray(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName)
{
SpanC array;
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
iStageInProgress->getAttributeArray(
&array, m_stageInProgress, primBucketList.m_primBucketListId, primBucketListIndex, attrName);
T* typedElementsPtr = reinterpret_cast<T*>(array.ptr);
gsl::span<T> retval(typedElementsPtr, array.elementCount);
return retval;
}
template <typename T>
gsl::span<const T> StageInProgress::getAttributeArrayRd(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName) const
{
ConstSpanC array;
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
iStageInProgress->getAttributeArrayRd(
&array, m_stageInProgress, primBucketList.m_primBucketListId, primBucketListIndex, attrName);
const T* typedElementsPtr = reinterpret_cast<const T*>(array.ptr);
gsl::span<const T> retval(typedElementsPtr, array.elementCount);
return retval;
}
template <typename T>
gsl::span<T> StageInProgress::getAttributeArrayWr(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName)
{
SpanC array;
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
iStageInProgress->getAttributeArrayWr(
&array, m_stageInProgress, primBucketList.m_primBucketListId, primBucketListIndex, attrName);
T* typedElementsPtr = reinterpret_cast<T*>(array.ptr);
gsl::span<T> retval(typedElementsPtr, array.elementCount);
return retval;
}
template <typename T>
gsl::span<T> StageInProgress::getAttributeArrayGpu(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName)
{
SpanC array;
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
iStageInProgress->getAttributeArrayGpu(
&array, m_stageInProgress, primBucketList.m_primBucketListId, primBucketListIndex, attrName);
T* typedElementsPtr = reinterpret_cast<T*>(array.ptr);
gsl::span<T> retval(typedElementsPtr, array.elementCount);
return retval;
}
template <typename T>
gsl::span<const T> StageInProgress::getAttributeArrayRdGpu(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName) const
{
ConstSpanC array;
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
iStageInProgress->getAttributeArrayRdGpu(
&array, m_stageInProgress, primBucketList.m_primBucketListId, primBucketListIndex, attrName);
const T* typedElementsPtr = reinterpret_cast<const T*>(array.ptr);
gsl::span<const T> retval(typedElementsPtr, array.elementCount);
return retval;
}
template <typename T>
gsl::span<T> StageInProgress::getAttributeArrayWrGpu(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName)
{
SpanC array;
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
iStageInProgress->getAttributeArrayWrGpu(
&array, m_stageInProgress, primBucketList.m_primBucketListId, primBucketListIndex, attrName);
T* typedElementsPtr = reinterpret_cast<T*>(array.ptr);
gsl::span<T> retval(typedElementsPtr, array.elementCount);
return retval;
}
template <typename T>
gsl::span<T> StageInProgress::getOrCreateAttributeArrayWr(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName,
Type type)
{
SpanC array;
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
iStageInProgress->getOrCreateAttributeArrayWr(
&array, m_stageInProgress, primBucketList.m_primBucketListId, primBucketListIndex, attrName, TypeC(type));
T* typedElementsPtr = reinterpret_cast<T*>(array.ptr);
gsl::span<T> retval(typedElementsPtr, array.elementCount);
return retval;
}
template <typename T>
std::vector<gsl::span<T>> StageInProgress::getArrayAttributeArray(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName) const
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
ArrayPointersAndSizesC pointersAndSizes = iStageInProgress->getArrayAttributeArrayWithSizes(
m_stageInProgress, primBucketList.m_primBucketListId, primBucketListIndex, attrName);
size_t primCount = pointersAndSizes.elementCount;
std::vector<gsl::span<T>> arrays(primCount);
for (size_t i = 0; i != primCount; i++)
{
T* typedElementsPtr = reinterpret_cast<T*>(pointersAndSizes.arrayPtrs[i]);
arrays[i] = { typedElementsPtr, pointersAndSizes.sizes[i] };
}
return arrays;
}
template <typename T>
std::vector<gsl::span<const T>> StageInProgress::getArrayAttributeArrayRd(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName) const
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
ConstArrayPointersAndSizesC pointersAndSizes = iStageInProgress->getArrayAttributeArrayWithSizesRd(
m_stageInProgress, primBucketList.m_primBucketListId, primBucketListIndex, attrName);
size_t primCount = pointersAndSizes.elementCount;
std::vector<gsl::span<const T>> arrays(primCount);
for (size_t i = 0; i != primCount; i++)
{
const T* typedElementsPtr = reinterpret_cast<const T*>(pointersAndSizes.arrayPtrs[i]);
arrays[i] = { typedElementsPtr, pointersAndSizes.sizes[i] };
}
return arrays;
}
template <typename T>
std::vector<gsl::span<T>> StageInProgress::getArrayAttributeArrayWr(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName) const
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
ArrayPointersAndSizesC pointersAndSizes = iStageInProgress->getArrayAttributeArrayWithSizesWr(
m_stageInProgress, primBucketList.m_primBucketListId, primBucketListIndex, attrName);
size_t primCount = pointersAndSizes.elementCount;
std::vector<gsl::span<T>> arrays(primCount);
for (size_t i = 0; i != primCount; i++)
{
T* typedElementsPtr = reinterpret_cast<T*>(pointersAndSizes.arrayPtrs[i]);
arrays[i] = { typedElementsPtr, pointersAndSizes.sizes[i] };
}
return arrays;
}
inline gsl::span<const Path> StageInProgress::getPathArray(const PrimBucketList& primBucketList,
size_t primBucketListIndex) const
{
ConstPathCSpan arrayC;
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
iStageInProgress->getPathArray(&arrayC, m_stageInProgress, primBucketList.m_primBucketListId, primBucketListIndex);
const Path* array = reinterpret_cast<const Path*>(arrayC.ptr);
gsl::span<const Path> retval(array, arrayC.elementCount);
return retval;
}
inline void StageInProgress::printBucketNames() const
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
iStageInProgress->printBucketNames(m_stageInProgress);
}
inline void StageInProgress::logAttributeWriteForNotice(const Path& path, const Token& attrName)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
iStageInProgress->logAttributeWriteForNotice(m_stageInProgress, path, attrName);
}
inline flatcache::set<AttrNameAndType> StageInProgress::getAttributeNamesAndTypes(const PrimBucketList& primBucketList,
size_t primBucketListIndex) const
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
size_t attrCount = iStageInProgress->getBucketAttributeCount(
m_stageInProgress, primBucketList.m_primBucketListId, primBucketListIndex);
flatcache::set<AttrNameAndType> namesAndTypes;
namesAndTypes.v.resize(attrCount);
// getBucketAttributeNamesAndTypes is guaranteed to return an ordered vector, so we don't have to sort namesAndTypes
iStageInProgress->getBucketAttributeNamesAndTypes(
namesAndTypes.data(), attrCount, m_stageInProgress, primBucketList.m_primBucketListId, primBucketListIndex);
return namesAndTypes;
}
// Connection API
inline void StageInProgress::createConnection(const Path& path, const Token& connectionName, const Connection& connection)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
iStageInProgress->createConnection(m_stageInProgress, path, connectionName, connection);
}
inline void StageInProgress::createConnections(const Path& path, const gsl::span<Token>& connectionNames, const gsl::span<Connection>& connections )
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
if(connectionNames.size() != connections.size())
return;
const TokenC* namesC = reinterpret_cast<const TokenC*>(connectionNames.data());
iStageInProgress->createConnections(m_stageInProgress, path, namesC, connections.data(), connectionNames.size());
}
inline void StageInProgress::destroyConnection(const Path& path, const Token& connectionName)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
iStageInProgress->destroyConnection(m_stageInProgress, path, connectionName);
}
inline void StageInProgress::destroyConnections(const Path& path, const gsl::span<Token>& connectionNames)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
const TokenC* namesC = reinterpret_cast<const TokenC*>(connectionNames.data());
iStageInProgress->destroyConnections(m_stageInProgress, path, namesC, connectionNames.size());
}
inline Connection* StageInProgress::getConnection(const Path& path, const Token& connectionName)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
return iStageInProgress->getConnection(m_stageInProgress, path, connectionName);
}
inline const Connection* StageInProgress::getConnectionRd(const Path& path, const Token& connectionName)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
return iStageInProgress->getConnectionRd(m_stageInProgress, path, connectionName);
}
inline Connection* StageInProgress::getConnectionWr(const Path& path, const Token& connectionName)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
return iStageInProgress->getConnectionWr(m_stageInProgress, path, connectionName);
}
inline void StageInProgress::copyAttributes(const Path& srcPath, const Path& dstPath)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
iStageInProgress->copyAllAttributes(m_stageInProgress, srcPath, dstPath);
}
inline void StageInProgress::copyAttributes(const Path& srcPath, const gsl::span<Token>& srcAttrs, const Path& dstPath)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
size_t n = srcAttrs.size();
const TokenC* srcAttrsC = reinterpret_cast<const TokenC*>(srcAttrs.data());
iStageInProgress->copySpecifiedAttributes(m_stageInProgress, srcPath, srcAttrsC, dstPath, srcAttrsC, n);
}
inline void StageInProgress::copyAttributes(const Path& srcPath, const gsl::span<Token>& srcAttrs, const Path& dstPath, const gsl::span<Token>& dstAttrs)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
if(srcAttrs.size() != dstAttrs.size())
{
return;
}
size_t n = srcAttrs.size();
const TokenC* srcAttrsC = reinterpret_cast<const TokenC*>(srcAttrs.data());
const TokenC* dstAttrsC = reinterpret_cast<const TokenC*>(dstAttrs.data());
iStageInProgress->copySpecifiedAttributes(m_stageInProgress, srcPath, srcAttrsC, dstPath, dstAttrsC, n);
}
inline bool StageInProgress::primExists(const Path& path)
{
auto iStageReaderWriter = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
bool retval = iStageReaderWriter->getAttributeCount(m_stageInProgress, path) != 0;
return retval;
}
// PrimBucketList implementation starts here
inline carb::flatcache::IPrimBucketList* PrimBucketList::sIPrimBucketList()
{
// Acquire carbonite interface on first use
return carb::getCachedInterface<carb::flatcache::IPrimBucketList>();
}
inline size_t PrimBucketList::bucketCount() const
{
return sIPrimBucketList()->getBucketCount(m_primBucketListId);
}
inline size_t PrimBucketList::size() const
{
return sIPrimBucketList()->getBucketCount(m_primBucketListId);
}
inline void PrimBucketList::print() const
{
return sIPrimBucketList()->print(m_primBucketListId);
}
inline PrimBucketList::~PrimBucketList()
{
sIPrimBucketList()->destroy(m_primBucketListId);
}
inline BucketChanges ChangedPrimBucketList::getChanges(size_t index)
{
return BucketChanges(sIPrimBucketList()->getChanges(m_primBucketListId, index));
}
inline AddedPrimIndices ChangedPrimBucketList::getAddedPrims(size_t index)
{
return AddedPrimIndices(sIPrimBucketList()->getAddedPrims(m_primBucketListId, index));
}
// StageAtTimeInterval implementation starts here
inline carb::flatcache::IStageAtTimeInterval* StageAtTimeInterval::sIStageAtTimeInterval()
{
return carb::getCachedInterface<carb::flatcache::IStageAtTimeInterval>();
}
inline StageAtTimeInterval::StageAtTimeInterval(StageWithHistory& stageWithHistory,
RationalTime beginTime,
RationalTime endTime,
bool includeEndTime)
{
m_stageAtTimeInterval =
sIStageAtTimeInterval()->create(stageWithHistory.m_stageWithHistory, beginTime, endTime, includeEndTime);
}
inline StageAtTimeInterval::StageAtTimeInterval(StageWithHistoryId stageWithHistoryId,
RationalTime beginTime,
RationalTime endTime,
bool includeEndTime)
{
m_stageAtTimeInterval = sIStageAtTimeInterval()->create(stageWithHistoryId, beginTime, endTime, includeEndTime);
}
inline ValidMirrors StageAtTimeInterval::getAttributeValidBits(const PathC& path, const TokenC& attrName) const
{
return sIStageAtTimeInterval()->getAttributeValidBits(m_stageAtTimeInterval, path, attrName);
}
template <typename T>
std::vector<const T*> StageAtTimeInterval::getAttributeRd(const Path& path, const Token& attrName) const
{
size_t count = sIStageAtTimeInterval()->getTimesampleCount(m_stageAtTimeInterval);
std::vector<const T*> retval(count);
const void** retvalData = reinterpret_cast<const void**>(retval.data());
size_t bytesPerAttr =
sIStageAtTimeInterval()->getAttributeRd(retvalData, count, m_stageAtTimeInterval, path, attrName);
if (sizeof(T) == bytesPerAttr)
{
return retval;
}
else
{
CARB_LOG_WARN_ONCE("Trying to access %zu bytes from %s.%s, but flatcache has only %zu bytes", sizeof(T),
path.getText(), attrName.getText(), bytesPerAttr);
return std::vector<const T*>();
}
}
template <typename T>
std::vector<const T*> StageAtTimeInterval::getAttributeRdGpu(const Path& path, const Token& attrName) const
{
size_t count = sIStageAtTimeInterval()->getTimesampleCount(m_stageAtTimeInterval);
std::vector<const T*> retval(count);
std::vector<ConstSpanC> arrays(count);
sIStageAtTimeInterval()->getAttributeRdGpu(arrays.data(), count, m_stageAtTimeInterval, path, attrName);
for (size_t i = 0; i != count; i++)
{
if (arrays[i].elementSize == sizeof(T))
{
retval[i] = reinterpret_cast<const T*>(arrays[i].ptr);
}
else
{
retval[i] = nullptr;
}
}
return retval;
}
inline std::vector<size_t> StageAtTimeInterval::getArrayAttributeSize(const Path& path, const Token& attrName) const
{
size_t count = sIStageAtTimeInterval()->getTimesampleCount(m_stageAtTimeInterval);
std::vector<size_t> sizes(count);
sIStageAtTimeInterval()->getArrayAttributeSize(sizes.data(), count, m_stageAtTimeInterval, path, attrName);
return sizes;
}
template <typename T>
std::vector<gsl::span<const T>> StageAtTimeInterval::getArrayAttributeRd(const Path& path, const Token& attrName) const
{
size_t count = sIStageAtTimeInterval()->getTimesampleCount(m_stageAtTimeInterval);
std::vector<ConstSpanWithTypeC> arrays(count);
std::vector<gsl::span<const T>> retval(count);
sIStageAtTimeInterval()->getArrayAttributeWithSizeRd(arrays.data(), count, m_stageAtTimeInterval, path, attrName);
for (size_t i = 0; i != count; i++)
{
if (arrays[i].elementSize != sizeof(T))
{
retval[i] = gsl::span<T>();
continue;
}
const T* ptr = reinterpret_cast<const T*>(arrays[i].ptr);
retval[i] = gsl::span<const T>(ptr, arrays[i].elementCount);
}
return retval;
}
inline std::vector<ConstArrayAsBytes> StageAtTimeInterval::getArrayAttributeRawRd(const Path& path, const Token& attrName) const
{
size_t count = sIStageAtTimeInterval()->getTimesampleCount(m_stageAtTimeInterval);
std::vector<ConstSpanWithTypeC> arrays(count);
std::vector<ConstArrayAsBytes> retval(count);
sIStageAtTimeInterval()->getArrayAttributeWithSizeRd(arrays.data(), count, m_stageAtTimeInterval, path, attrName);
for (size_t i = 0; i != count; i++)
{
const gsl::byte* ptr = reinterpret_cast<const gsl::byte*>(arrays[i].ptr);
retval[i].arrayBytes = gsl::span<const gsl::byte>(ptr, arrays[i].elementCount * arrays[i].elementSize);
retval[i].bytesPerElement = arrays[i].elementSize;
retval[i].elementType = Type(arrays[i].type);
}
return retval;
}
inline std::vector<RationalTime> StageAtTimeInterval::getTimestamps() const
{
size_t count = sIStageAtTimeInterval()->getTimesampleCount(m_stageAtTimeInterval);
std::vector<RationalTime> retval(count);
sIStageAtTimeInterval()->getTimestamps(retval.data(), count, m_stageAtTimeInterval);
return retval;
}
inline size_t StageAtTimeInterval::getTimeSampleCount() const
{
return sIStageAtTimeInterval()->getTimesampleCount(m_stageAtTimeInterval);
}
inline PrimBucketList StageAtTimeInterval::findPrims(const carb::flatcache::set<AttrNameAndType>& all,
const carb::flatcache::set<AttrNameAndType>& any,
const carb::flatcache::set<AttrNameAndType>& none)
{
PrimBucketListId primBucketListId = sIStageAtTimeInterval()->findPrims(m_stageAtTimeInterval, all, any, none);
return { primBucketListId };
}
template <typename T>
std::vector<gsl::span<const T>> StageAtTimeInterval::getAttributeArrayRd(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName) const
{
size_t count = sIStageAtTimeInterval()->getTimesampleCount(m_stageAtTimeInterval);
std::vector<ConstSpanC> outC(count);
ConstSpanC* outCData = outC.data();
sIStageAtTimeInterval()->getAttributeArrayRd(
outCData, count, m_stageAtTimeInterval, primBucketList.m_primBucketListId, primBucketListIndex, attrName);
std::vector<gsl::span<const T>> retval(count);
size_t i = 0;
for (ConstSpanC array : outC)
{
const T* typedElementsPtr = reinterpret_cast<const T*>(array.ptr);
retval[i] = gsl::span<const T>(typedElementsPtr, array.elementCount);
i++;
}
return retval;
}
template <typename T>
std::vector<gsl::span<const T>> StageAtTimeInterval::getAttributeArrayRdGpu(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName) const
{
size_t count = sIStageAtTimeInterval()->getTimesampleCount(m_stageAtTimeInterval);
std::vector<ConstSpanC> outC(count);
ConstSpanC* outCData = outC.data();
sIStageAtTimeInterval()->getAttributeArrayRdGpu(
outCData, count, m_stageAtTimeInterval, primBucketList.m_primBucketListId, primBucketListIndex, attrName);
std::vector<gsl::span<const T>> retval(count);
size_t i = 0;
for (ConstSpanC array : outC)
{
const T* typedElementsPtr = reinterpret_cast<const T*>(array.ptr);
retval[i] = gsl::span<const T>(typedElementsPtr, array.elementCount);
i++;
}
return retval;
}
template <typename T>
std::vector<std::vector<gsl::span<const T>>> StageAtTimeInterval::getArrayAttributeArrayRd(
const PrimBucketList& primBucketList, size_t primBucketListIndex, const Token& attrName) const
{
size_t count = sIStageAtTimeInterval()->getTimesampleCount(m_stageAtTimeInterval);
std::vector<ConstArrayPointersAndSizesC> outC(count);
ConstArrayPointersAndSizesC* outCData = outC.data();
sIStageAtTimeInterval()->getArrayAttributeArrayWithSizesRd(
outCData, count, m_stageAtTimeInterval, primBucketList.m_primBucketListId, primBucketListIndex, attrName);
std::vector<std::vector<gsl::span<const T>>> retval(count);
size_t i = 0;
for (ConstArrayPointersAndSizesC pointersAndSizes : outC)
{
size_t primCount = pointersAndSizes.elementCount;
retval[i].resize(primCount);
for (size_t j = 0; j != primCount; j++)
{
const T* typedElementsPtr = reinterpret_cast<const T*>(pointersAndSizes.arrayPtrs[j]);
retval[i][j] = { typedElementsPtr, pointersAndSizes.sizes[j] };
}
i++;
}
return retval;
}
inline std::vector<gsl::span<const char>> StageAtTimeInterval::getAttributeArrayRawRd(
const PrimBucketList& primBucketList, size_t primBucketListIndex, const Token& attrName) const
{
size_t count = sIStageAtTimeInterval()->getTimesampleCount(m_stageAtTimeInterval);
std::vector<ConstSpanC> outC(count);
ConstSpanC* outCData = outC.data();
sIStageAtTimeInterval()->getAttributeArrayRd(
outCData, count, m_stageAtTimeInterval, primBucketList.m_primBucketListId, primBucketListIndex, attrName);
std::vector<gsl::span<const char>> retval(count);
size_t i = 0;
for (ConstSpanC array : outC)
{
const char* typedElementsPtr = reinterpret_cast<const char*>(array.ptr);
retval[i] = gsl::span<const char>(typedElementsPtr, array.elementCount * array.elementSize);
i++;
}
return retval;
}
inline std::vector<gsl::span<const Path>> StageAtTimeInterval::getPathArray(const PrimBucketList& primBucketList,
size_t primBucketListIndex) const
{
size_t count = sIStageAtTimeInterval()->getTimesampleCount(m_stageAtTimeInterval);
std::vector<ConstPathCSpan> outC(count);
ConstPathCSpan* outCData = outC.data();
sIStageAtTimeInterval()->getPathArray(
outCData, count, m_stageAtTimeInterval, primBucketList.m_primBucketListId, primBucketListIndex);
std::vector<gsl::span<const Path>> retval(count);
size_t i = 0;
for (ConstPathCSpan arrayC : outC)
{
const Path* array = reinterpret_cast<const Path*>(arrayC.ptr);
retval[i] = gsl::span<const Path>(array, arrayC.elementCount);
i++;
}
return retval;
}
inline std::vector<const Connection*> StageAtTimeInterval::getConnectionRd(const Path& path, const Token& connectionName)
{
size_t count = sIStageAtTimeInterval()->getTimesampleCount(m_stageAtTimeInterval);
std::vector<const Connection*> retval(count);
const void** retvalData = reinterpret_cast<const void**>(retval.data());
sIStageAtTimeInterval()->getConnectionRd(retvalData, count, m_stageAtTimeInterval, path, connectionName);
return retval;
}
inline void StageAtTimeInterval::printBucketNames() const
{
sIStageAtTimeInterval()->printBucketNames(m_stageAtTimeInterval);
}
inline std::vector<size_t> StageAtTimeInterval::getAttributeCounts(const PrimBucketList& primBucketList,
size_t primBucketListIndex) const
{
std::vector<size_t> counts;
size_t count = sIStageAtTimeInterval()->getTimesampleCount(m_stageAtTimeInterval);
counts.resize(count);
sIStageAtTimeInterval()->getAttributeCounts(
m_stageAtTimeInterval, primBucketList.m_primBucketListId, primBucketListIndex, count, counts.data());
return counts;
}
inline std::pair<std::vector<std::vector<Token>>, std::vector<std::vector<Type>>> StageAtTimeInterval::getAttributeNamesAndTypes(
const PrimBucketList& primBucketList, size_t primBucketListIndex) const
{
std::vector<std::vector<Token>> outNames;
std::vector<std::vector<Type>> outTypes;
size_t count = sIStageAtTimeInterval()->getTimesampleCount(m_stageAtTimeInterval);
std::vector<size_t> outSizes;
outSizes.resize(count);
sIStageAtTimeInterval()->getAttributeCounts(
m_stageAtTimeInterval, primBucketList.m_primBucketListId, primBucketListIndex, count, outSizes.data());
outNames.resize(count);
outTypes.resize(count);
// Make array of pointers to inner arrays to allow us to call
// getAttributeNamesAndTypes, which takes a C-style 2D array
// not a std::vector<std::vector>.
// Also set size of inner arrays
std::vector<Token*> outNamesPtrs(count);
std::vector<Type*> outTypesPtrs(count);
for (size_t i = 0; i < count; ++i)
{
outNames[i].resize(outSizes[i]);
outTypes[i].resize(outSizes[i]);
outNamesPtrs[i] = outNames[i].data();
outTypesPtrs[i] = outTypes[i].data();
}
sIStageAtTimeInterval()->getAttributeNamesAndTypes(m_stageAtTimeInterval,
primBucketList.m_primBucketListId,
primBucketListIndex,
count,
outSizes.data(),
outNamesPtrs.data(),
outTypesPtrs.data());
return { outNames, outTypes };
}
inline StageAtTimeInterval::~StageAtTimeInterval()
{
sIStageAtTimeInterval()->destroy(m_stageAtTimeInterval);
}
inline void StageAtTimeInterval::exportUsd(UsdStageId usdStageId) const
{
auto iStageAtTimeInterval = carb::getCachedInterface<carb::flatcache::IStageAtTimeInterval>();
iStageAtTimeInterval->exportUsd(m_stageAtTimeInterval, usdStageId);
}
/**
* @brief Linear interpolation for carb types Double3, Float3, Float4 (color)
* See InterpolationUsd.h for extended type support
*
* @details This is intended to be used internally by StageAtTime read methods in order
* to calculate values that were not written by StageInProgress directly.
*
* Enables the decoupling of the sim and render threads by allowing them access
* to ringbuffer values at various frequencies.
*/
template <typename T>
const T interpolate(const T& a, const T& b, float theta)
{
T result = T(a * (1.0f - theta)) + T(b * theta);
return result;
// T result = std::lerp(a, b, theta);
}
template <>
inline const carb::Double3 interpolate(const carb::Double3& a, const carb::Double3& b, float theta)
{
if (theta < 0.0 || theta > 1.0)
{
CARB_LOG_WARN_ONCE("WrapperImpl interpolate(): theta %f outside range [0.0, 1.0]", theta);
}
carb::Double3 result;
double tmp = 1.0 - theta;
result.x = (a.x * tmp) + (b.x * theta);
result.y = (a.y * tmp) + (b.y * theta);
result.z = (a.z * tmp) + (b.z * theta);
return result;
}
template <>
inline const carb::Float3 interpolate(const carb::Float3& a, const carb::Float3& b, float theta)
{
if (theta < 0.0f || theta > 1.0f)
{
CARB_LOG_WARN_ONCE("WrapperImpl interpolate(): theta %f outside range [0.0, 1.0]", theta);
}
carb::Float3 result;
float tmp = 1.0f - theta;
result.x = (a.x * tmp) + (b.x * theta);
result.y = (a.y * tmp) + (b.y * theta);
result.z = (a.z * tmp) + (b.z * theta);
return result;
}
template <>
inline const carb::Float4 interpolate(const carb::Float4& a, const carb::Float4& b, float theta)
{
if (theta < 0.0f || theta > 1.0f)
{
CARB_LOG_WARN_ONCE("WrapperImpl interpolate(): theta %f outside range [0.0, 1.0]", theta);
}
carb::Float4 result;
float tmp = 1.0f - theta;
result.x = (a.x * tmp) + (b.x * theta);
result.y = (a.y * tmp) + (b.y * theta);
result.z = (a.z * tmp) + (b.z * theta);
result.w = (a.w * tmp) + (b.w * theta);
return result;
}
template <>
inline const carb::flatcache::Token interpolate(const carb::flatcache::Token& a, const carb::flatcache::Token& b, float theta)
{
if (theta < 0.0f || theta > 1.0f)
{
CARB_LOG_WARN_ONCE("WrapperImpl interpolate(): theta %f outside range [0.0, 1.0]", theta);
}
return theta < 0.5f ? a : b;
}
// Auxiliary function used when handling data that is not going to be interpolated (bool, string, int, uint)
// Returns pair of values from first and second sampled frame, or the value found and nullptr if data is only available
// in one frame
template <typename T>
inline optional<std::pair<optional<T>,optional<T>>> StageAtTime::getNonInterpolatableAttributeRd(const Path& path, const Token& attrName) const
{
auto rawSamples = m_historyWindow.getAttributeRd<T>(path, attrName);
std::vector<RationalTime> sampleTimes = m_historyWindow.getTimestamps();
if (rawSamples.size() != sampleTimes.size())
{
return carb::cpp17::nullopt;
}
// checking that if the rawSamples are not empty, we have something valid in rawSamples[0]
CARB_ASSERT(rawSamples.empty() || rawSamples[0]);
// Communicate zero samples found
if ( rawSamples.empty() || !rawSamples[0] )
{
return carb::cpp17::nullopt;
}
if (rawSamples.size() == 1)
{
std::pair<carb::cpp17::optional<T>, carb::cpp17::optional<T>> result(*rawSamples[0], carb::cpp17::nullopt);
return result;
}
else if ( (rawSamples.size() == 2) && rawSamples[1] )
{
std::pair<carb::cpp17::optional<T>, carb::cpp17::optional<T>> result(*rawSamples[0], *rawSamples[1]);
return result;
}
return carb::cpp17::nullopt;
}
inline uint64_t StageAtTimeInterval::writeCacheToDisk(const char* file, uint8_t* workingBuffer, uint64_t workingBufferSize) const
{
return sIStageAtTimeInterval()->writeCacheToDisk(m_stageAtTimeInterval, file, workingBuffer, workingBufferSize);
}
inline void StageAtTimeInterval::addRefCount()
{
return sIStageAtTimeInterval()->addRefCount(m_stageAtTimeInterval);
}
inline bool StageAtTimeInterval::removeRefCount()
{
return sIStageAtTimeInterval()->removeRefCount(m_stageAtTimeInterval);
}
inline unsigned int StageAtTimeInterval::getRefCount()
{
return sIStageAtTimeInterval()->getRefCount(m_stageAtTimeInterval);
}
// StageAtTime implementation starts here
// This is defined here rather than in Carbonite plugin to allow use of templates and inlining
inline ValidMirrors StageAtTime::getAttributeValidBits(const PathC& path, const TokenC& attrName) const
{
return m_historyWindow.getAttributeValidBits(path, attrName);
}
// The method reports interpolatable data types, and is specialized as optional<pair<optional<T>,optional<T>
// in order to report non-interpolatable data types as encountered in either or both samples
template <typename T>
inline optional<T> StageAtTime::getAttributeRd(const Path& path, const Token& attrName) const
{
auto rawSamples = m_historyWindow.getAttributeRd<T>(path, attrName);
// Communicate zero samples found
if (rawSamples.size() == 0)
{
return carb::cpp17::nullopt;
}
// Linear interpolation supports at most two samples
CARB_ASSERT(rawSamples.size() <= 2);
if (rawSamples.size() == 1)
{
CARB_ASSERT(rawSamples[0]);
return *rawSamples[0];
}
else if (rawSamples.size() == 2)
{
CARB_ASSERT(rawSamples[0]);
CARB_ASSERT(rawSamples[1]);
// Calculate linear approximation of f(time)
T a_f = *rawSamples[0];
T b_f = *rawSamples[1];
return interpolate(a_f, b_f, (float)m_theta);
}
return carb::cpp17::nullopt;
}
// The following functions are marked for deletion since the specified types cannot be interpolated
// StageAtTime reports the non-interpolatable types read from Flatcache as a pair<optional<T>, optional<T>>
template <>
inline optional<bool> StageAtTime::getAttributeRd(const Path& path, const Token& attrName) const = delete;
template <>
inline optional<int> StageAtTime::getAttributeRd(const Path& path, const Token& attrName) const = delete;
template <>
inline optional<unsigned int> StageAtTime::getAttributeRd(const Path& path, const Token& attrName) const = delete;
template <>
inline optional<unsigned char> StageAtTime::getAttributeRd(const Path& path, const Token& attrName) const = delete;
template <>
inline optional<int64_t> StageAtTime::getAttributeRd(const Path& path, const Token& attrName) const = delete;
template <>
inline optional<uint64_t> StageAtTime::getAttributeRd(const Path& path, const Token& attrName) const = delete;
template <>
inline optional<carb::flatcache::Token> StageAtTime::getAttributeRd(const Path& path, const Token& attrName) const = delete;
// Specialize StageAtTime::getAttributeRd for non-interpolatable types: bool, int, uint
// In these cases the returned type will be a pair of values from the samples found, or nullopt otherwise
template <>
inline optional<std::pair<optional<bool>, optional<bool>>> StageAtTime::getAttributeRd(const Path& path, const Token& attrName) const
{
auto result = getNonInterpolatableAttributeRd<bool>(path, attrName);
return result;
}
template <>
inline optional<std::pair<optional<int>, optional<int>>> StageAtTime::getAttributeRd(const Path& path, const Token& attrName) const
{
return getNonInterpolatableAttributeRd<int>(path, attrName);
}
template <>
inline optional<std::pair<optional<unsigned int>, optional<unsigned int>>> StageAtTime::getAttributeRd(const Path& path, const Token& attrName) const
{
return getNonInterpolatableAttributeRd<unsigned int>(path, attrName);
}
template <>
inline optional<std::pair<optional<unsigned char>, optional<unsigned char>>> StageAtTime::getAttributeRd(const Path& path, const Token& attrName) const
{
return getNonInterpolatableAttributeRd<unsigned char>(path, attrName);
}
template <>
inline optional<std::pair<optional<int64_t>, optional<int64_t>>> StageAtTime::getAttributeRd(const Path& path, const Token& attrName) const
{
return getNonInterpolatableAttributeRd<int64_t>(path, attrName);
}
template <>
inline optional<std::pair<optional<uint64_t>, optional<uint64_t>>> StageAtTime::getAttributeRd(const Path& path, const Token& attrName) const
{
return getNonInterpolatableAttributeRd<uint64_t>(path, attrName);
}
template <>
inline optional<std::pair<optional<carb::flatcache::Token>, optional<carb::flatcache::Token>>> StageAtTime::getAttributeRd(
const Path& path, const Token& attrName) const
{
return getNonInterpolatableAttributeRd<carb::flatcache::Token>(path, attrName);
}
template <typename T>
const T* StageAtTime::getAttributeRdGpu(const Path& path, const Token& attrName) const
{
auto rawSamples = m_historyWindow.getAttributeRdGpu<T>(path, attrName);
std::vector<RationalTime> sampleTimes = m_historyWindow.getTimestamps();
CARB_ASSERT(rawSamples.size() == sampleTimes.size());
// This API doesn't have a way to communicate zero samples found
CARB_ASSERT(rawSamples.size() != 0);
// Linear interpolation supports at most two samples
CARB_ASSERT(rawSamples.size() <= 2);
if (rawSamples.size() == 1)
{
CARB_ASSERT(rawSamples[0]);
return rawSamples[0];
}
else if (rawSamples.size() == 2)
{
// For GPU types there is no support for interpolation yet
// Return first sample value instead for now
CARB_LOG_WARN_ONCE("Support for interpolation of array attributes is not supported yet, returning first time sample instead!");
CARB_ASSERT(rawSamples[0]);
return rawSamples[0];
}
return nullptr;
}
inline size_t StageAtTime::getArrayAttributeSize(const Path& path, const Token& attrName) const
{
auto rawSamples = m_historyWindow.getArrayAttributeSize(path, attrName);
std::vector<RationalTime> sampleTimes = m_historyWindow.getTimestamps();
CARB_ASSERT(rawSamples.size() == sampleTimes.size());
// This API doesn't have a way to communicate zero samples found
CARB_ASSERT(rawSamples.size() != 0);
// Linear interpolation supports at most two samples
CARB_ASSERT(rawSamples.size() <= 2);
if (rawSamples.size() == 1)
{
return rawSamples[0];
}
else if (rawSamples.size() == 2)
{
// For GPU types there is no support for interpolation yet
// Return first sample value instead for now
return rawSamples[0];
}
return 0;
}
template <typename T>
inline gsl::span<const T> StageAtTime::getArrayAttributeRd(const Path& path, const Token& attrName)
{
auto rawSamples = m_historyWindow.getArrayAttributeRd<T>(path, attrName);
std::vector<RationalTime> sampleTimes = m_historyWindow.getTimestamps();
CARB_ASSERT(rawSamples.size() == sampleTimes.size());
// This API doesn't have a way to communicate zero samples found
CARB_ASSERT(rawSamples.size() != 0);
// Linear interpolation supports at most two samples
CARB_ASSERT(rawSamples.size() <= 2);
if (rawSamples.size() == 1)
{
return rawSamples[0];
}
else if (rawSamples.size() == 2)
{
// For array types there is no support for interpolation yet
// Return first sample value instead for now
CARB_LOG_WARN_ONCE("Support for interpolation of array attributes is not supported yet, returning first time sample instead!");
return rawSamples[0];
}
return gsl::span<const T>();
}
/**
* @brief Auxiliary function used by AttributeArrayResult<T> and AttributeArrayResult<std::vector<T>>
*
* @details Used to assess if a prim is present in both of the sampled frames
*/
inline bool checkPathCorrespondence(std::vector<gsl::span<const carb::flatcache::Path>> paths, size_t index, size_t& pos_f0, size_t& pos_f1)
{
if (paths.size() > 1)
{
// in the common case, the prim exists in both frames
if ((index < paths[1].size()) && (paths[0][index] == paths[1][index]))
{
pos_f0 = pos_f1 = index;
return true;
}
auto pathIt = std::find(paths[1].begin(), paths[1].end(), paths[0][index]);
if (pathIt != paths[1].end())
{
pos_f0 = index; // TODO: this isn't needed, can infer it
pos_f1 = std::distance(paths[1].begin(), pathIt);
return true;
}
}
return false;
}
/**
* @brief Returned by StageAtTime.getAttributeArrayRd
*
* @details Holds at most two samples (one from frame n, and one from frame n+1)
* checkPathCorrespondence verifies if the path in frame n exists in frame n+1
* If no corresponding path exists, the value will be returned and not interpolated
*/
template <typename T>
class AttributeArrayResult
{
public:
size_t size() const
{
return m_samples[0].size();
}
bool empty() const
{
return (size() == 0);
}
std::vector<gsl::span<const T>> const* data() const
{
return &m_samples;
}
std::vector<gsl::span<const T>>* data()
{
return &m_samples;
}
T operator[](const size_t valueIndex) const
{
{
if (valueIndex >= m_samples[0].size() || m_samples[0].empty())
{
CARB_LOG_WARN_ONCE("AttributeArrayResult[] out of bounds");
return T();
}
if (m_samples.size() == 1)
{
return m_samples[0][valueIndex];
}
else if (m_samples.size() == 2)
{
size_t pos0, pos1;
if (checkPathCorrespondence(m_paths, valueIndex, pos0, pos1))
{
T a = (m_samples[0][pos0]);
T b = (m_samples[1][pos1]);
T result = interpolate<T>(a, b, m_theta);
return result;
}
return m_samples[0][valueIndex];
}
}
return T();
};
std::vector<gsl::span<const carb::flatcache::Path>> m_paths;
std::vector<gsl::span<const T>> m_samples;
float m_theta;
};
/**
* @brief Returned by StageAtTime.getArrayAttributeArrayRd
*
* @details Enables access to a vector of readily interpolated attribute values
*/
template <typename T>
class AttributeArrayResult<std::vector<T>>
{
public:
size_t size() const
{
return m_samples[0].size();
}
bool empty() const
{
return (size() == 0);
}
std::vector<std::vector<gsl::span<const T>>> const* data() const
{
return m_samples;
}
std::vector<std::vector<gsl::span<const T>>>* data()
{
return m_samples;
}
std::vector<T> operator[](const size_t primIndex)
{
std::vector<T> interpolatedAttributeValues;
if (m_samples.size() == 1)
{
interpolatedAttributeValues.resize(m_samples[0][primIndex].size());
std::copy(m_samples[0][primIndex].begin(), m_samples[0][primIndex].end(), interpolatedAttributeValues.begin());
return interpolatedAttributeValues;
}
else if (m_samples.size() == 2)
{
size_t pos0, pos1;
if (checkPathCorrespondence(m_paths, primIndex, pos0, pos1))
{
auto values_f0 = m_samples[0][primIndex];
auto values_f1 = m_samples[1][primIndex];
interpolatedAttributeValues.reserve(values_f0.size());
// interpolate attrib values for the requested {prim index : attrib val index}
for (size_t valueIndex = 0; valueIndex < values_f0.size(); ++valueIndex)
{
T a = (values_f0[valueIndex]);
T b = (values_f1[valueIndex]);
T result = interpolate<T>(a, b, m_theta);
interpolatedAttributeValues.emplace_back(result);
}
return interpolatedAttributeValues;
}
interpolatedAttributeValues.resize(m_samples[0][primIndex].size());
std::copy(m_samples[0][primIndex].begin(), m_samples[0][primIndex].end(), interpolatedAttributeValues.begin());
return interpolatedAttributeValues;
}
return std::vector<T>();
}
std::vector<gsl::span<const carb::flatcache::Path>> m_paths;
std::vector<std::vector<gsl::span<const T>>> m_samples;
float m_theta;
};
template <typename T>
AttributeArrayResult<T> StageAtTime::getAttributeArrayRd(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName) const
{
size_t sampleCount = m_historyWindow.getTimeSampleCount();
if (sampleCount > 0)
{
AttributeArrayResult<T> arrAttRes;
arrAttRes.m_samples = m_historyWindow.getAttributeArrayRd<T>(primBucketList, primBucketListIndex, attrName);
arrAttRes.m_paths = m_historyWindow.getPathArray(primBucketList, primBucketListIndex);
arrAttRes.m_theta = (float)m_theta;
return arrAttRes;
}
else
{
CARB_LOG_WARN_ONCE(
"getAttributeArrayRd %s: Data not available at time, possible dropped frame", attrName.getText());
return AttributeArrayResult<T>();
}
}
template <typename T>
AttributeArrayResult<T> StageAtTime::getAttributeArrayRdGpu(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName) const
{
size_t sampleCount = m_historyWindow.getTimeSampleCount();
if (sampleCount > 0)
{
AttributeArrayResult<T> arrAttRes;
arrAttRes.m_samples = m_historyWindow.getAttributeArrayRdGpu<T>(primBucketList, primBucketListIndex, attrName);
arrAttRes.m_paths = m_historyWindow.getPathArray(primBucketList, primBucketListIndex);
arrAttRes.m_theta = (float)m_theta;
return arrAttRes;
}
else
{
CARB_LOG_WARN_ONCE(
"getAttributeArrayRdGpu %s: Data not available at time, possible dropped frame", attrName.getText());
return AttributeArrayResult<T>();
}
}
inline std::vector<gsl::span<const char>> StageAtTime::getAttributeArrayRawRd(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName) const
{
return m_historyWindow.getAttributeArrayRawRd(primBucketList, primBucketListIndex, attrName);
}
template <typename T>
AttributeArrayResult<std::vector<T>> StageAtTime::getArrayAttributeArrayRd(
const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName) const
{
size_t sampleCount = m_historyWindow.getTimeSampleCount();
AttributeArrayResult<std::vector<T>> result;
if (sampleCount > 0)
{
result.m_samples = m_historyWindow.getArrayAttributeArrayRd<T>(primBucketList, primBucketListIndex, attrName);
result.m_paths = m_historyWindow.getPathArray(primBucketList, primBucketListIndex);
result.m_theta = (float)m_theta;
return result;
}
else
{
CARB_LOG_WARN_ONCE(
"getAttributeArrayRd %s: Data not available at time, possible dropped frame", attrName.getText());
return AttributeArrayResult<std::vector<T>>();
}
}
inline gsl::span<const Path> StageAtTime::getPathArray(const PrimBucketList& primBucketList,
size_t primBucketListIndex) const
{
size_t sampleCount = m_historyWindow.getTimeSampleCount();
if (sampleCount == 1)
{
return m_historyWindow.getPathArray(primBucketList, primBucketListIndex)[0];
}
else if (sampleCount == 0)
{
CARB_LOG_WARN_ONCE("getPathArray: Data not available at time, possible dropped frame");
return gsl::span<const Path>();
}
else if (sampleCount == 2)
{
// TODO: make this correct when prims are being added and deleted
// To do this we need to make a new array out:
// out[i] = in0[i] , if in0[i] == in1[i]
// = kUninitializedPath, otherwise
return m_historyWindow.getPathArray(primBucketList, primBucketListIndex)[0];
#if 0
gsl::span<const Path> in0 = m_historyWindow.getPathArray(primBucketList, primBucketListIndex)[0];
gsl::span<const Path> in1 = m_historyWindow.getPathArray(primBucketList, primBucketListIndex)[1];
std::vector<Path> multiframePaths;
for (size_t i = 0; i < in0.size(); ++i)
in0[i] == in1[i] ? multiframePaths.emplace_back(in0[i]) : multiframePaths.emplace_back(flatcache::kUninitializedPath);
return multiframePaths;
#endif
}
return gsl::span<const Path>();
}
inline std::vector<const Connection*> StageAtTime::getConnectionRd(const Path& path, const Token& connectionName)
{
return m_historyWindow.getConnectionRd(path, connectionName);
}
inline void StageAtTime::printBucketNames() const
{
m_historyWindow.printBucketNames();
}
inline size_t StageAtTime::getAttributeCount(const PrimBucketList& primBucketList, size_t primBucketListIndex) const
{
std::vector<size_t> counts = m_historyWindow.getAttributeCounts(primBucketList, primBucketListIndex);
if (counts.size() == 1)
{
return counts[0];
}
// Perform a set intersection to get a valid count size;
if (counts.size() == 2)
{
//
// TODO: The attributes are internally sorted vectors, see flatcache::set.
// Ideally we'd make a C-ABI type that makes it clear that these are sorted,
// wrap with flatcache::set in the C++ wrapper and then use the standard library set intersection.
//
auto namesAndTypes = m_historyWindow.getAttributeNamesAndTypes(primBucketList, primBucketListIndex);
const std::vector<std::vector<Token>>& names = namesAndTypes.first;
const std::vector<std::vector<Type>>& types = namesAndTypes.second;
std::vector<Token> intersection;
// Perform a set intersection but we need to track the types as we intersect
const std::vector<Token>& workingNames = names[0].size() < names[1].size() ? names[0] : names[1];
const std::vector<Type>& workingTypes = names[0].size() < names[1].size() ? types[0] : types[1];
const std::vector<Token>& testingNames = names[0].size() < names[1].size() ? names[1] : names[0];
const std::vector<Type>& testingTypes = names[0].size() < names[1].size() ? types[1] : types[0];
// Since attribute vectors are sorted we can track last spotted locations to be more efficient.
size_t last = 0;
for (size_t i = 0; i < workingNames.size(); ++i)
{
for (size_t j = last; j < testingNames.size(); ++j)
{
if (workingNames[i] == testingNames[j])
{
if (workingTypes[i] == testingTypes[j])
{
intersection.push_back(workingNames[i]);
}
// Store hit location to start next search
last = j;
break;
}
}
}
return intersection.size();
}
return 0;
}
inline std::pair<std::vector<Token>, std::vector<Type>> StageAtTime::getAttributeNamesAndTypes(
const PrimBucketList& primBucketList, size_t primBucketListIndex) const
{
std::vector<Token> outNames;
std::vector<Type> outTypes;
std::vector<std::vector<Token>> names;
std::vector<std::vector<Type>> types;
std::tie(names, types) = m_historyWindow.getAttributeNamesAndTypes(primBucketList, primBucketListIndex);
if (names.size() == 1)
{
outNames = std::move(names[0]);
outTypes = std::move(types[0]);
}
if (names.size() == 2)
{
// Assuming that the invariant that names and types of the same slot are the same count holds.
outNames.reserve(std::min(names[0].size(), names[1].size()));
outTypes.reserve(std::min(types[0].size(), types[1].size()));
// Perform a set intersection but we need to track the types as we intersect
std::vector<Token>& workingNames = names[0].size() < names[1].size() ? names[0] : names[1];
std::vector<Type>& workingTypes = names[0].size() < names[1].size() ? types[0] : types[1];
std::vector<Token>& testingNames = names[0].size() < names[1].size() ? names[1] : names[0];
std::vector<Type>& testingTypes = names[0].size() < names[1].size() ? types[1] : types[0];
// Since attribute vectors are sorted we can track last spotted locations to be more efficient.
size_t last = 0;
for (size_t i = 0; i < workingNames.size(); ++i)
{
for (size_t j = last; j < testingNames.size(); ++j)
{
if (workingNames[i] == testingNames[j])
{
if (workingTypes[i] == testingTypes[j])
{
outNames.push_back(workingNames[i]);
outTypes.push_back(workingTypes[i]);
}
// Store hit location to start next search
last = j;
break;
}
}
}
}
return { outNames, outTypes };
}
inline uint64_t StageAtTime::writeCacheToDisk(const char* file, uint8_t* workingBuffer, uint64_t workingBufferSize) const
{
size_t sampleCount = m_historyWindow.getTimeSampleCount();
if (sampleCount != 1)
{
CARB_LOG_ERROR_ONCE("Can't call StageAtTime::WriteCacheToDisk for interpolated values");
return 0;
}
return m_historyWindow.writeCacheToDisk(file, workingBuffer, workingBufferSize);
}
inline void StageAtTime::addRefCount()
{
m_historyWindow.addRefCount();
}
inline bool StageAtTime::removeRefCount()
{
return m_historyWindow.removeRefCount();
}
inline unsigned int StageAtTime::getRefCount()
{
return m_historyWindow.getRefCount();
}
// StageWithHistory implementation starts here
inline StageWithHistory::StageWithHistory(UsdStageId usdStageId, size_t historyFrameCount, RationalTime simPeriod, bool withCuda)
{
auto iStageWithHistory = carb::getCachedInterface<carb::flatcache::IStageWithHistory>();
m_stageWithHistory = iStageWithHistory->create2(usdStageId, historyFrameCount, simPeriod, withCuda);
m_usdStageId = usdStageId;
}
inline StageWithHistory::~StageWithHistory()
{
auto iStageWithHistory = carb::getCachedInterface<carb::flatcache::IStageWithHistory>();
iStageWithHistory->destroy(m_usdStageId);
}
inline ListenerId StageWithHistory::createListener()
{
auto iChangeTrackerConfig = carb::getCachedInterface<carb::flatcache::IStageWithHistory>();
ListenerId newId = iChangeTrackerConfig->createListener();
return newId;
}
// Templated methods do not get compiled unless they are instantiated.
// The following code is not intended to be executed, it just instantiates each
// templated method once to make sure that they compile.
inline void instantiationTest(StageInProgress& stage,
StageAtTimeInterval& stageAtInterval,
StageAtTime& stageAtTime,
const Path& path,
const Token& attrName)
{
int* x0 = stage.getAttribute<int>(path, attrName);
CARB_UNUSED(x0);
const int* x1 = stage.getAttributeRd<int>(path, attrName);
CARB_UNUSED(x1);
int* x2 = stage.getAttributeWr<int>(path, attrName);
CARB_UNUSED(x2);
gsl::span<int> x3 = stage.getArrayAttribute<int>(path, attrName);
CARB_UNUSED(x3);
gsl::span<const int> x4 = stage.getArrayAttributeRd<int>(path, attrName);
CARB_UNUSED(x4);
gsl::span<int> x5 = stage.getArrayAttributeWr<int>(path, attrName);
CARB_UNUSED(x5);
PrimBucketList pbl = stage.findPrims({}, {}, {});
gsl::span<int> x6 = stage.getAttributeArray<int>(pbl, 0, attrName);
CARB_UNUSED(x6);
std::vector<const int*> x7 = stageAtInterval.getAttributeRd<int>(path, attrName);
CARB_UNUSED(x7);
std::vector<gsl::span<const int>> x8 = stageAtInterval.getAttributeArrayRd<int>(pbl, 0, attrName);
CARB_UNUSED(x8);
optional<float> x9 = stageAtTime.getAttributeRd<float>(path, attrName);
CARB_UNUSED(x9);
optional<std::pair<optional<int>, optional<int>>> x10 = stageAtTime.getAttributeRd <std::pair<optional<int>, optional<int>>>(path, attrName);
CARB_UNUSED(x10);
carb::flatcache::AttributeArrayResult<int> x11 = stageAtTime.getAttributeArrayRd<int>(pbl, 0, attrName);
CARB_UNUSED(x11);
carb::flatcache::AttributeArrayResult<std::vector<int>> x12 = stageAtTime.getArrayAttributeArrayRd<int>(pbl, 0, attrName);
CARB_UNUSED(x12);
}
} // namespace flatcache
} // namespace carb
| 74,405 |
C
| 37.176501 | 153 | 0.672603 |
omniverse-code/kit/fabric/include/carb/flatcache/IPath.h
|
// Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <carb/Framework.h>
#include <carb/Interface.h>
#include <carb/flatcache/IToken.h>
#include <carb/flatcache/Intrinsics.h>
#include <functional>
// Set to empty macro when IPath::iPath static member is removed
#define FLATCACHE_IPATH_INIT \
const carb::flatcache::IPath* carb::flatcache::Path::iPath = nullptr;
namespace carb
{
namespace flatcache
{
// PathC are integer keys that identify paths to C-ABI interfaces
struct PathC
{
uint64_t path;
constexpr bool operator<(const PathC& other) const
{
return path < other.path;
}
constexpr bool operator==(const PathC& other) const
{
return path == other.path;
}
constexpr bool operator!=(const PathC& other) const
{
return path != other.path;
}
};
static_assert(std::is_standard_layout<PathC>::value, "Struct must be standard layout as it is used in C-ABI interfaces");
// We don't reference count the uninitialized (or empty) path, and we use
// this fact to avoid unnecessary dll calls to addRef()/removeRef(), for
// example during std::vector resize. To do this we need to check whether a
// path is uninitialized without the dll call getEmptyPath(), so we store
// its value here in a constant.
// We run automated test "IPath::getEmptyPath() dll call can be replaced with
// constant, Path::kUninitializedPath" to ensure that this constant never
// changes.
static constexpr PathC kUninitializedPath{0};
// C-ABI interface to pxr::SdfPath
struct IPath
{
CARB_PLUGIN_INTERFACE("carb::flatcache::IPath", 0, 1);
PathC (*getHandle)(const char* name);
const char* (*getText)(PathC handle);
PathC (*getParent)(PathC handle);
PathC (*appendChild)(PathC handle, TokenC childName);
void (*addRef)(PathC handle);
void (*removeRef)(PathC handle);
PathC (*getEmptyPath)();
// Creates a path by appending a given relative path to this path.
PathC (*appendPath)(PathC handle, PathC path);
// Returns the number of path elements in this path.
uint32_t (*getPathElementCount)(PathC handle);
};
// C++ wrapper for IPath
class Path
{
static carb::flatcache::IPath& sIPath();
public:
// DEPRECATED: keeping for binary compatibility
// Will be removed in October 2021 - @TODO set FLATCACHE_IPATH_INIT to empty macro when removed!
// Still safe to use if initialized in a given dll
static const carb::flatcache::IPath* iPath;
Path() : mHandle(kUninitializedPath)
{
}
Path(const char* path)
{
mHandle = sIPath().getHandle(path);
}
// Needs to be noexcept for std::vector::resize() to move instead of copy
~Path() noexcept
{
// We see the compiler construct and destruct many uninitialized
// temporaries, for example when resizing std::vector.
// We don't want to do an IPath dll call for these, so skip if handle
// is uninitialized.
if (mHandle != kUninitializedPath)
{
sIPath().removeRef(mHandle);
}
}
// Copy constructor
Path(const Path& other) : mHandle(other.mHandle)
{
if (mHandle != kUninitializedPath)
{
sIPath().addRef(mHandle);
}
}
// Copy construct from integer
Path(PathC handle) : mHandle(handle)
{
if (mHandle != kUninitializedPath)
{
sIPath().addRef(mHandle);
}
}
// Move constructor
// Needs to be noexcept for std::vector::resize() to move instead of copy
Path(Path&& other) noexcept
{
// We are moving the src handle so don't need to change its refcount
mHandle = other.mHandle;
// Make source invalid
other.mHandle = kUninitializedPath;
}
// Copy assignment
Path& operator=(const Path& other)
{
if (this != &other)
{
if (mHandle != kUninitializedPath)
{
sIPath().removeRef(mHandle);
}
if (other.mHandle != kUninitializedPath)
{
sIPath().addRef(other.mHandle);
}
}
mHandle = other.mHandle;
return *this;
}
// Move assignment
Path& operator=(Path&& other) noexcept
{
if (&other == this)
return *this;
// We are about to overwrite the dest handle, so decrease its refcount
if (mHandle != kUninitializedPath)
{
sIPath().removeRef(mHandle);
}
// We are moving the src handle so don't need to change its refcount
mHandle = other.mHandle;
other.mHandle = kUninitializedPath;
return *this;
}
const char* getText() const
{
return sIPath().getText(mHandle);
}
constexpr bool operator<(const Path& other) const
{
return mHandle < other.mHandle;
}
constexpr bool operator!=(const Path& other) const
{
return mHandle != other.mHandle;
}
constexpr bool operator==(const Path& other) const
{
return mHandle == other.mHandle;
}
constexpr operator PathC() const
{
return mHandle;
}
private:
PathC mHandle;
};
static_assert(std::is_standard_layout<Path>::value, "Path must be standard layout as it is used in C-ABI interfaces");
#ifndef __CUDACC__
inline carb::flatcache::IPath& Path::sIPath()
{
// Acquire carbonite interface on first use
carb::flatcache::IPath* iPath = carb::getCachedInterface<carb::flatcache::IPath>();
CARB_ASSERT(iPath);
return *iPath;
}
#endif // __CUDACC__
}
}
namespace std
{
template <>
class hash<carb::flatcache::PathC>
{
public:
inline size_t operator()(const carb::flatcache::PathC& key) const
{
// lower 8 bits have no entropy, so just remove the useless bits
return key.path >> 8;
}
};
template <>
class hash<carb::flatcache::Path>
{
public:
inline size_t operator()(const carb::flatcache::Path& key) const
{
return std::hash<carb::flatcache::PathC>()(carb::flatcache::PathC(key));
}
};
}
| 6,493 |
C
| 26.171548 | 121 | 0.639458 |
omniverse-code/kit/fabric/include/carb/flatcache/FlatCacheUSD.h
|
// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <carb/flatcache/IPath.h>
#include <carb/logging/Log.h>
#include <pxr/base/tf/token.h>
#include <pxr/usd/sdf/path.h>
namespace carb
{
namespace flatcache
{
// asInt() is the same as SdfPath::_AsInt()
// Flatcache relies on asInt(a)==asInt(b) <=> a is same path as b,
// which is how SdfPath::operator== is currently defined.
// If USD changes sizeof(pxr::SdfPath), we will need to change PathC to make it
// the same size.
inline PathC asInt(const pxr::SdfPath& path)
{
static_assert(sizeof(pxr::SdfPath) == sizeof(PathC), "Change PathC to make the same size as pxr::SdfPath");
PathC ret;
std::memcpy(&ret, &path, sizeof(pxr::SdfPath));
return ret;
}
inline const PathC* asInt(const pxr::SdfPath* path)
{
static_assert(sizeof(pxr::SdfPath) == sizeof(PathC), "Change PathC to make the same size as pxr::SdfPath");
return reinterpret_cast<const PathC*>(path);
}
inline TokenC asInt(const pxr::TfToken& token)
{
static_assert(sizeof(pxr::TfToken) == sizeof(TokenC), "Change TokenC to make the same size as pxr::TfToken");
TokenC ret;
std::memcpy(&ret, &token, sizeof(pxr::TfToken));
return ret;
}
inline const TokenC* asInt(const pxr::TfToken* token)
{
static_assert(sizeof(pxr::TfToken) == sizeof(TokenC), "Change TokenC to make the same size as pxr::TfToken");
return reinterpret_cast<const TokenC*>(token);
}
// Return reference to ensure that reference count doesn't change
inline const pxr::TfToken& intToToken(const TokenC& token)
{
static_assert(sizeof(pxr::TfToken) == sizeof(TokenC), "Change TokenC to make the same size as pxr::TfToken");
return reinterpret_cast<const pxr::TfToken&>(token);
}
inline const pxr::SdfPath& intToPath(const PathC& path)
{
static_assert(sizeof(pxr::SdfPath) == sizeof(PathC), "Change PathC to make the same size as pxr::SdfPath");
return reinterpret_cast<const pxr::SdfPath&>(path);
}
inline const pxr::SdfPath* intToPath(const Path* path)
{
static_assert(sizeof(pxr::SdfPath) == sizeof(Path), "Change Path to make the same size as pxr::SdfPath");
return reinterpret_cast<const pxr::SdfPath*>(path);
}
}
}
| 2,585 |
C
| 31.325 | 113 | 0.716828 |
omniverse-code/kit/fabric/include/carb/flatcache/PrimChanges.h
|
// Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <carb/flatcache/AttrNameAndType.h>
#include <carb/flatcache/IFlatcache.h>
#include <carb/flatcache/IPath.h>
#include <gsl/span>
#include <cstddef>
namespace carb
{
namespace flatcache
{
struct AttrAndChangedIndices
{
AttrNameAndType attr;
// For which prims did this attribute change?
bool allIndicesChanged;
gsl::span<const size_t> changedIndices;
};
struct BucketChanges
{
// For each attribute, which prims changed?
std::vector<AttrAndChangedIndices> attrChangedIndices;
gsl::span<const Path> pathArray;
BucketChanges() = default;
BucketChanges(BucketChangesC in) :
pathArray({ in.pathArray.ptr,in.pathArray.elementCount })
{
size_t count = in.changedIndices.elementCount;
attrChangedIndices.resize(count);
for (size_t i = 0; i != count; i++)
{
const ConstChangedIndicesC& inAttrChanges = in.changedIndices.ptr[i];
attrChangedIndices[i].attr = in.changedAttributes.ptr[i];
attrChangedIndices[i].allIndicesChanged = inAttrChanges.allIndicesChanged;
attrChangedIndices[i].changedIndices =
gsl::span<const size_t>(inAttrChanges.changedIndices.ptr, inAttrChanges.changedIndices.elementCount);
}
}
};
class AddedPrimIndices
{
// Which prims were added?
gsl::span<const size_t> addedIndices;
public:
AddedPrimIndices(AddedPrimIndicesC in)
{
addedIndices = gsl::span<const size_t>(in.addedIndices.ptr, in.addedIndices.elementCount);
}
size_t size() const
{
return addedIndices.size();
}
// This iterator first iterates over the deletedElements that were replaced
// by new elements, then the contiguous range of elements added at the end
// of the bucket
struct iterator
{
using iterator_category = std::input_iterator_tag;
using difference_type = size_t;
using value_type = size_t;
using reference = size_t;
iterator(
gsl::span<const size_t>::iterator _addedIndicesIterator,
gsl::span<const size_t>::iterator _addedIndicesEnd) :
addedIndicesIterator(_addedIndicesIterator),
addedIndicesEnd(_addedIndicesEnd)
{}
reference operator*() const
{
return *addedIndicesIterator;
}
iterator& operator++()
{
addedIndicesIterator++;
return *this;
}
bool operator==(iterator other) const
{
return addedIndicesIterator == other.addedIndicesIterator;
}
bool operator!=(iterator other) const { return !(*this == other); }
difference_type operator-(iterator other)
{
return addedIndicesIterator - other.addedIndicesIterator;
}
private:
gsl::span<const size_t>::iterator addedIndicesIterator;
gsl::span<const size_t>::iterator addedIndicesEnd;
};
iterator begin()
{
return iterator(addedIndices.begin(), addedIndices.end());
}
iterator end()
{
return iterator(addedIndices.end(), addedIndices.end());
}
};
}
}
| 3,624 |
C
| 26.462121 | 117 | 0.656457 |
omniverse-code/kit/fabric/include/carb/flatcache/Intrinsics.h
|
// Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <cstdint>
#include <cstdlib>
#include <cstddef>
#include <carb/flatcache/Defines.h>
#if USING( WINDOWS_BUILD )
#include <intrin.h>
#elif USING( LINUX_BUILD ) // #if USING( WINDOWS_BUILD )
// no linux-specific includes at this time
#else // #if USING( WINDOWS_BUILD )
#error "Unsupported platform"
#endif // #if USING( WINDOWS_BUILD )
namespace carb
{
namespace flatcache
{
inline uint32_t clz32( const uint32_t x )
{
#if USING( WINDOWS_BUILD )
DWORD z;
return _BitScanReverse( &z, x ) ? 31 - z : 32;
#elif USING( LINUX_BUILD ) // #if USING( WINDOWS_BUILD )
return x ? __builtin_clz( x ) : 32;
#else // #if USING( WINDOWS_BUILD )
#error "Unsupported platform"
#endif // #if USING( WINDOWS_BUILD )
}
inline uint32_t clz64( const uint64_t x )
{
#if USING( WINDOWS_BUILD )
DWORD z;
return _BitScanReverse64( &z, x ) ? 63 - z : 64;
#elif USING( LINUX_BUILD ) // #if USING( WINDOWS_BUILD )
return x ? __builtin_clzll( x ) : 64;
#else // #if USING( WINDOWS_BUILD )
#error "Unsupported platform"
#endif // #if USING( WINDOWS_BUILD )
}
inline uint32_t ctz32( const uint32_t x )
{
#if USING( WINDOWS_BUILD )
DWORD z;
return _BitScanForward( &z, x ) ? z : 32;
#elif USING( LINUX_BUILD ) // #if USING( WINDOWS_BUILD )
return x ? __builtin_ctz( x ) : 32;
#else // #if USING( WINDOWS_BUILD )
#error "Unsupported platform"
#endif // #if USING( WINDOWS_BUILD )
}
inline uint32_t ctz64( const uint64_t x )
{
#if USING( WINDOWS_BUILD )
DWORD z;
return _BitScanForward64( &z, x ) ? z : 64;
#elif USING( LINUX_BUILD ) // #if USING( WINDOWS_BUILD )
return x ? __builtin_ctzll( x ) : 64;
#else // #if USING( WINDOWS_BUILD )
#error "Unsupported platform"
#endif // #if USING( WINDOWS_BUILD )
}
inline uint64_t bswap64( const uint64_t x )
{
#if USING( WINDOWS_BUILD )
return _byteswap_uint64( x );
#elif USING( LINUX_BUILD ) // #if USING( WINDOWS_BUILD )
return __builtin_bswap64 ( x );
#else // #if USING( WINDOWS_BUILD )
#error "Unsupported platform"
#endif // #if USING( WINDOWS_BUILD )
}
inline uint64_t rotr64( const uint64_t value, const int shift )
{
#if USING( WINDOWS_BUILD )
return _rotr64( value, shift );
#elif USING( LINUX_BUILD ) // #if USING( WINDOWS_BUILD )
return (value >> shift) | (value << (64 - shift));
#else // #if USING( WINDOWS_BUILD )
#error "Unsupported platform"
#endif // #if USING( WINDOWS_BUILD )
}
inline uint64_t rotl64( const uint64_t value, const int shift )
{
#if USING( WINDOWS_BUILD )
return _rotl64( value, shift );
#elif USING( LINUX_BUILD ) // #if USING( WINDOWS_BUILD )
return (value << shift) | (value >> (64 - shift));
#else // #if USING( WINDOWS_BUILD )
#error "Unsupported platform"
#endif // #if USING( WINDOWS_BUILD )
}
} // namespace flatcache
} // namespace carb
| 3,199 |
C
| 27.318584 | 77 | 0.680213 |
omniverse-code/kit/fabric/include/carb/flatcache/FlatCache.h
|
// Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <carb/Interface.h>
#include <carb/flatcache/PathToAttributesMap.h>
namespace carb
{
namespace flatcache
{
// Callers of createCache() and getCache() can store anything they want in
// UserId. For example, OmniGraph uses it to store the OmniGraph pointer.
struct UserId
{
uint64_t id;
bool operator<(UserId b) const
{
return id < b.id;
}
bool operator==(UserId b) const
{
return id == b.id;
}
bool operator!=(UserId b) const
{
return id != b.id;
}
};
constexpr UserId kDefaultUserId = { 0 };
constexpr UserId kInvalidUserId = { ~uint64_t(0) };
// Flatcache has the option to save a finite number of frames of history,
// organized as a ringbuffer. This is typically used to buffer data between
// simulation rendering. The simplest case, double buffering, allows simulation
// and rendering to run in parallel, each running for the full frame.
// Longer buffers can be used to feed one or more renderers running at
// different rates to simulation.
// To enable this feature, pass CacheType::eWithHistory to createCache(),
// otherwise pass CacheType::eWithoutHistory.
// Multiple caches can be created for each UsdStageId, but at most one can have
// history.
enum class CacheType
{
eWithHistory,
eWithoutHistory,
eWithoutHistoryAndWithCuda,
eWithHistoryAndCuda
};
struct FlatCache
{
CARB_PLUGIN_INTERFACE("carb::flatcache::FlatCache", 0, 4);
// Abstractly, a flatcache maps USD paths to USD attributes, just like a
// UsdStage does.
// Concretely we represent a flatcache by objects of type PathToAttributesMap.
// This method creates a PathToAttributesMap for a given stage, but doesn't
// populate it with values. This allows the cache to be filled lazily as
// values are needed.
// Instead, it traverses the given Usd stage making an index of paths to
// attributes.
// The cache uses the index to organize data into contiguous arrays,
// and also allows you to find prims by type and/or attribute without
// traversing the stage.
// This method also specifies the stage to be used by calls to usdToCache()
// and cacheToUsd().
PathToAttributesMap&(CARB_ABI* createCache)(UsdStageId usdStageId, UserId userId, CacheType cacheType);
void(CARB_ABI* addPrimToCache)(PathToAttributesMap& cache, const pxr::UsdPrim& prim, const std::set<TokenC>& filter);
// Destroy the cache associated with the given stage.
void(CARB_ABI* destroyCache)(UsdStageId usdStageId, UserId userId);
// Prefetch the whole USD stage to the cache
// Typically you only call this at stage load time, because the USD notify
// handler updates the cache if the stage changes.
void(CARB_ABI* usdToCache)(PathToAttributesMap& cache);
// Write back all dirty cached data to the USD stage.
// If your renderer doesn't use the cache then you need to do this
// before rendering.
void(CARB_ABI* cacheToUsd)(PathToAttributesMap& cache);
// Write back only one bucket to usd
void(CARB_ABI* cacheBucketToUsd)(PathToAttributesMap& cache, BucketId bucketId, bool skipMeshPoints);
TypeC(CARB_ABI* usdTypeToTypeC)(pxr::SdfValueTypeName usdType);
PathToAttributesMap*(CARB_ABI* getCache)(UsdStageId usdStageId, UserId userId);
pxr::SdfValueTypeName(CARB_ABI* typeCtoUsdType)(TypeC typeC);
size_t(CARB_ABI* getUsdTypeCount)();
void(CARB_ABI* getAllUsdTypes)(TypeC* outArray, size_t outArraySize);
/** @brief Import a prim in cache
*/
void(CARB_ABI* addPrimToCacheNoOverwrite)(PathToAttributesMap& cache,
const pxr::UsdPrim& prim,
const std::set<TokenC>& filter);
void(CARB_ABI* initStaticVariables)();
void(CARB_ABI* exportUsd)(PathToAttributesMap& cache, pxr::UsdStageRefPtr usdStage, const double* timeCode, const double* prevTimeCode);
/** @brief Attempt to serialize the cache into the specified buffer.
*
* @cache[in] The cache to serialize
* @dest[in/out] Pointer to buffer to be written to, will start writing to head
* of pointer. dest will be left pointing to the point after the last write
* @destSize Size of buffer that was allocated for the data (in bytes)
* @pathStringCache - looking up strings is slow, yo
*
* @return Number of bytes written success is determined by (return <= @destSize)
*
*
* @invariant It is safe to write to any memory within[dest, dest+size] for the
* duration of the function call.
*
* @note If the cache will not fit into the size of memory allocated in
* @dest then it will stop writing, but continue to run the serialize
* algorithm to calculate the actual amount of data that needs to be
* written
*
* @Todo : make cache const - not possible because serializeMirroredAray is not
* const, however, that is because getArraySpanC is used which also doesn't
* have a const version, so that needs to be addressed first, this is because
* in the call stack we end up with a copy from GPU -> CPU which would need to
* be avoided
*/
uint64_t(CARB_ABI* serializeCache)(PathToAttributesMap& cache, uint8_t* dest, size_t destSize, SerializationCache& pathStringCache);
/** @brief Given a buffer that has the serialized version of a cache written
* using the serialize function, this function will override all the data
* in the cache with the data from the buffer
*
* @cache[in/out] Reference to the cache to be populated
* @pathCache[in/out] Looking up SDFPath via string can be expensive to it
* is worthwhile to cache this data across many repeated
* calls.
* @input[in] Pointer to buffer of data containing serialized cache
* @inputSize[in] Size of data in the buffer
* @skipStageConfirmation[in] Whether we should skip making sure the destination stage is open.
*
* @return True if buffer was successfully de-serialized
*
* @note : this currently has to clear the cache before it is populated which is a possibly
* expensive operation
*
* @TODO: whould we care that it came from the same version of the USD file?
*/
bool(CARB_ABI* deserializeCache)(
PathToAttributesMap& destStage,
DeserializationCache& pathCache,
const uint8_t* input,
const size_t inputSize,
bool skipStageConfirmation);
/** @brief Write a cache file to disk at a specified location
*
* @note many parameters to this function are optional
* @cache[in] That cache to be written to disk
* @file[in The location the file is desired to be written to
* @workingBuffer[in] [Optional] In order to avoid costly reallocations
* the code will attempt to re-use the memory at the buffer
* location if it is large enough. If the buffer isn't larg
* enough the cost of allocation, and re-traversal may be paid
* @workingBufferSize[in] [Optional] If workingBuffer is non null, then this desrcibes the length
* of the buffer
* @return The amount of data needed to serialize the cache, a return value of 0 indicates an error
*
*/
uint64_t(CARB_ABI* writeCacheToDisk)(PathToAttributesMap& cache,
const char* file,
uint8_t* workingBuffer,
uint64_t workingBufferSize);
/** @brief Read a cache file from the specified location
*
* @file[in] The location the file is desired to be written to
* @cache[in/out] That cache to be populates
* @pathCache[in/out] Looking up SDFPath via string can be expensive to it
* is worthwhile to cache this data across many repeated
* calls.
* @buffer[in/out] Buffer to use to read the cache file in to, passed to
* allow reuse than allocate per call. Will be resized if not large enough.
* @return Whether the read was successful
*
*/
bool(CARB_ABI* readCacheFromDisk)(PathToAttributesMap& cache,
const char* fileName,
DeserializationCache& pathCache,
std::vector<uint8_t>& buffer);
/** @brief Enable/Disable change notifications on USD changes.
*
* @enable[in] True/False enable notifications
*
*/
void(CARB_ABI* setEnableChangeNotifies)(bool enable);
/** @brief Return whether change notifications on USD changes is enabled.
*
* @return True if change notifications on USD changes is enabled, else False.
*
*/
bool(CARB_ABI* getEnableChangeNotifies)();
/** @brief make buckets for all prims on a USD stage, but only if this
* hasn't been done before.
*
* This is used to lazily create an index of all prims on a stage, without
* the time or memory cost of fetching all the attribute values. The user
* can then use findPrims to, for example, find all the prims of a
* particular type.
*
* If a SimStageWithHistory hasn't been created for this stage then a
* warning will be printed and no population will be done.
*
* @cache[in] The PathToAttributesMap to populate
*/
void(CARB_ABI* minimalPopulateIfNecessary)(PathToAttributesMap& cache);
};
}
}
| 9,956 |
C
| 40.144628 | 140 | 0.677581 |
omniverse-code/kit/fabric/include/carb/flatcache/Allocator.h
|
#pragma once
#include <cmath>
#include <carb/logging/Log.h>
#include <carb/Defines.h>
#include <carb/flatcache/Defines.h>
#include <carb/flatcache/Intrinsics.h>
#define ALLOCATOR_HEADER USE_IF( USING( DEVELOPMENT_BUILD ) )
#define ALLOCATOR_STATS USE_IF( USING( ALLOCATOR_HEADER ) ) // requires Header's byte tracking per-allocation
#define ALLOCATOR_LEAK_CHECK USE_IF( USING( ALLOCATOR_HEADER ) ) // requires Header's byte tracking per-allocation
namespace carb
{
namespace flatcache
{
inline const char* humanReadableSize( const uint64_t bytes ) noexcept
{
auto va = [](auto ...params) -> const char* {
static char tmp[1024];
#ifdef _WIN32
_snprintf_s(tmp, sizeof(tmp), params...);
#else
snprintf(tmp, sizeof(tmp), params...);
#endif
return (const char*)&tmp;
};
constexpr const char SIZE_UNITS[64][3]{
" B", " B", " B", " B", " B", " B", " B", " B", " B", " B",
"KB", "KB", "KB", "KB", "KB", "KB", "KB", "KB", "KB", "KB",
"MB", "MB", "MB", "MB", "MB", "MB", "MB", "MB", "MB", "MB",
"GB", "GB", "GB", "GB", "GB", "GB", "GB", "GB", "GB", "GB",
"TB", "TB", "TB", "TB", "TB", "TB", "TB", "TB", "TB", "TB",
"PB", "PB", "PB", "PB", "PB", "PB", "PB", "PB", "PB", "PB",
"EB", "EB", "EB", "EB"
};
static constexpr size_t B = 1ull;
static constexpr size_t KB = 1024ull;
static constexpr size_t MB = (1024ull*1024ull);
static constexpr size_t GB = (1024ull*1024ull*1024ull);
static constexpr size_t TB = (1024ull*1024ull*1024ull*1024ull);
static constexpr size_t PB = (1024ull*1024ull*1024ull*1024ull*1024ull);
static constexpr size_t EB = (1024ull*1024ull*1024ull*1024ull*1024ull*1024ull);
constexpr const size_t SIZE_BASE[64]{
B, B, B, B, B, B, B, B, B, B,
KB, KB, KB, KB, KB, KB, KB, KB, KB, KB,
MB, MB, MB, MB, MB, MB, MB, MB, MB, MB,
GB, GB, GB, GB, GB, GB, GB, GB, GB, GB,
TB, TB, TB, TB, TB, TB, TB, TB, TB, TB,
PB, PB, PB, PB, PB, PB, PB, PB, PB, PB,
EB, EB, EB, EB
};
const uint32_t power = bytes ? ( 64u - clz64( bytes ) ) - 1u : 0;
const char *const units = SIZE_UNITS[power];
const size_t base = SIZE_BASE[power];
const size_t count = bytes / base;
return va("%zu %s", count, units);
}
// A wrapper around malloc/free that aims to:
//
// * Cheaply track allocation counts and bytes, and detect leaks automatically at ~Allocator()
//
// * Cheaply track usage in terms of peak memory usage, and total lifetime usage broken down by size. Sample output:
// dumped to console appears like so:
//
// == Allocator 0x000000E67BEFCEA0 Stats ==
// allocCount: 0
// allocBytes: 0 B
// peakAllocCount: 4002
// peakAllocBytes: 4 GB
// minAllocBytes: 312 B
// maxAllocBytes: 6 MB
//
// Lifetime Allocation Histogram:
// Normalized over TOTAL allocations: 13956
// < 512 B|***** 29% 4002
// < 1 KB| 0% 0
// < 2 KB| 0% 0
// < 4 KB| 0% 0
// < 8 KB|*** 14% 2000
// < 16 KB| 0% 0
// < 32 KB|*** 14% 1994
// < 64 KB| 0% 0
// < 128 KB|*** 14% 1976
// < 256 KB| 0% 0
// < 512 KB|*** 14% 1904
// < 1 MB| 0% 0
// < 2 MB|** 12% 1616
// < 4 MB| 0% 0
// < 8 MB|* 3% 464
// ========================
struct Allocator
{
Allocator();
~Allocator();
void* alloc(const size_t bytes);
void free(void *const ptr);
template<typename T, typename ...Params>
T* new_(Params&& ...params);
template<typename T>
void delete_(T*const t);
void resetStats() noexcept;
void reportUsage() noexcept;
bool checkLeaks() noexcept;
private:
#if USING( ALLOCATOR_HEADER )
struct BlockHeader
{
size_t bytes;
};
#endif // #if USING( ALLOCATOR_HEADER )
#if USING( ALLOCATOR_STATS ) || USING( ALLOCATOR_LEAK_CHECK )
size_t allocCount;
size_t allocBytes;
#endif // #if USING( ALLOCATOR_STATS ) || USING( ALLOCATOR_LEAK_CHECK )
#if USING( ALLOCATOR_STATS )
size_t peakAllocCount;
size_t peakAllocBytes;
size_t minAllocBytes;
size_t maxAllocBytes;
static constexpr size_t ALLOC_BUCKET_COUNT = 65;
size_t lifetimeAllocCount;
size_t lifetimeAllocBuckets[ALLOC_BUCKET_COUNT];
#endif // #if USING( ALLOCATOR_STATS )
};
struct AllocFunctor
{
Allocator *allocator;
void* operator()(const size_t bytes)
{
CARB_ASSERT(allocator);
return allocator->alloc(bytes);
}
};
struct FreeFunctor
{
Allocator *allocator;
void operator()(void *const ptr)
{
CARB_ASSERT(allocator);
return allocator->free(ptr);
}
};
inline Allocator::Allocator()
{
#if USING( ALLOCATOR_STATS ) || USING( ALLOCATOR_LEAK_CHECK )
allocCount = 0;
allocBytes = 0;
#endif // #if USING( ALLOCATOR_STATS ) || USING( ALLOCATOR_LEAK_CHECK )
resetStats();
}
inline Allocator::~Allocator()
{
checkLeaks();
reportUsage();
}
inline void* Allocator::alloc(const size_t bytes)
{
#if USING( ALLOCATOR_HEADER )
const size_t totalBytes = bytes + sizeof(BlockHeader);
#endif // #if USING( ALLOCATOR_HEADER )
#if USING( ALLOCATOR_STATS ) || USING( ALLOCATOR_LEAK_CHECK )
CARB_ASSERT((allocCount + 1) > allocCount);
CARB_ASSERT((allocBytes + totalBytes) > allocBytes);
++allocCount;
allocBytes += totalBytes;
#endif // #if USING( ALLOCATOR_STATS ) || USING( ALLOCATOR_LEAK_CHECK )
#if USING( ALLOCATOR_STATS )
if ( allocBytes > peakAllocBytes )
{
peakAllocBytes = allocBytes;
peakAllocCount = allocCount;
}
if ( totalBytes < minAllocBytes )
{
minAllocBytes = totalBytes;
}
if ( totalBytes > maxAllocBytes )
{
maxAllocBytes = totalBytes;
}
const uint32_t bucket = ( 64u - clz64( totalBytes - 1ull ) );
CARB_ASSERT(lifetimeAllocBuckets[bucket] + 1 > lifetimeAllocBuckets[bucket]);
++lifetimeAllocBuckets[bucket];
++lifetimeAllocCount;
#endif // #if USING( ALLOCATOR_STATS )
#if USING( ALLOCATOR_HEADER )
BlockHeader *const header = (BlockHeader*)malloc(totalBytes);
CARB_ASSERT(header);
header->bytes = totalBytes;
return header+1;
#else // #if USING( ALLOCATOR_HEADER )
return malloc(bytes);
#endif // #if USING( ALLOCATOR_STATS )
}
inline void Allocator::free(void *const ptr)
{
#if USING( ALLOCATOR_HEADER )
CARB_ASSERT(ptr);
BlockHeader *header = (BlockHeader*)ptr;
--header;
#if USING( ALLOCATOR_STATS ) || USING( ALLOCATOR_LEAK_CHECK )
const size_t totalBytes = header->bytes;
CARB_ASSERT((allocCount - 1) < allocCount);
CARB_ASSERT((allocBytes - totalBytes) < allocBytes);
--allocCount;
allocBytes -= totalBytes;
#endif // #if USING( ALLOCATOR_STATS ) || USING( ALLOCATOR_LEAK_CHECK )
::free(header);
#else // #if USING( ALLOCATOR_STATS )
::free(ptr);
#endif // #if USING( ALLOCATOR_STATS )
}
template<typename T, typename ...Params>
inline T* Allocator::new_(Params&& ...params)
{
T *const t = (T*)Allocator::alloc(sizeof(T));
new (t) T(std::forward<Params>(params)...);
return t;
}
template<typename T>
inline void Allocator::delete_(T*const t)
{
CARB_ASSERT(t);
t->~T();
#if USING( ALLOCATOR_HEADER )
BlockHeader *header = (BlockHeader*)t;
header--;
CARB_ASSERT(header->bytes == (sizeof(BlockHeader) + sizeof(T)));
#endif // #if USING( ALLOCATOR_HEADER )
Allocator::free(t);
}
inline void Allocator::resetStats() noexcept
{
#if USING( ALLOCATOR_STATS )
peakAllocCount = 0;
peakAllocBytes = 0;
minAllocBytes = SIZE_MAX;
maxAllocBytes = 0;
lifetimeAllocCount = 0;
for ( size_t i = 0; i < ALLOC_BUCKET_COUNT; ++i )
{
lifetimeAllocBuckets[i] = 0;
}
#endif // #if USING( ALLOCATOR_STATS )
}
inline void Allocator::reportUsage() noexcept
{
#if USING( ALLOCATOR_STATS )
CARB_LOG_INFO("== Allocator 0x%p Stats ==", this);
if (!lifetimeAllocCount)
{
CARB_LOG_INFO("<no stats to report; unused allocator>");
CARB_LOG_INFO("========================");
return;
}
CARB_LOG_INFO("allocCount: %12zu", allocCount);
CARB_LOG_INFO("allocBytes: %15s", humanReadableSize(allocBytes));
CARB_LOG_INFO("peakAllocCount: %12zu", peakAllocCount);
CARB_LOG_INFO("peakAllocBytes: %15s", humanReadableSize(peakAllocBytes));
CARB_LOG_INFO("minAllocBytes: %15s", humanReadableSize(minAllocBytes));
CARB_LOG_INFO("maxAllocBytes: %15s", humanReadableSize(maxAllocBytes));
CARB_LOG_INFO("");
CARB_LOG_INFO("Lifetime Allocation Histogram:");
size_t begin = 0;
for ( ; begin < ALLOC_BUCKET_COUNT; ++begin )
{
if ( lifetimeAllocBuckets[begin] )
{
break;
}
}
size_t end = 0;
for ( ; end < ALLOC_BUCKET_COUNT; ++end )
{
if ( lifetimeAllocBuckets[ALLOC_BUCKET_COUNT - end - 1] )
{
end = ALLOC_BUCKET_COUNT - end;
break;
}
}
CARB_LOG_INFO(" Normalized over TOTAL allocations: %zu", lifetimeAllocCount);
size_t i;
float normalized[ALLOC_BUCKET_COUNT];
for ( i = begin; i < end; ++i )
{
normalized[i] = (float)lifetimeAllocBuckets[i] / (float)lifetimeAllocCount;
}
constexpr size_t WIDTH = 16;
for ( i = begin; i < end; ++i )
{
char buf[WIDTH+1] = {};
const size_t w = ( size_t )std::ceil(WIDTH * normalized[i]);
for( size_t j = 0; j < w; ++j)
{
buf[j] = '*';
}
static_assert(WIDTH == 16, "Fix CARB_LOG_INFO below");
CARB_LOG_INFO(" <%7s|%-16s %3.0f%% %12zu", humanReadableSize(1ull<<i), buf, (normalized[i] * 100.f), lifetimeAllocBuckets[i]);
}
CARB_LOG_INFO("========================");
#endif // #if USING( ALLOCATOR_STATS )
}
inline bool Allocator::checkLeaks() noexcept
{
#if USING( ALLOCATOR_LEAK_CHECK )
if (allocCount || allocBytes)
{
CARB_LOG_ERROR("PathToAttributesMap detected a memory leak of %s!\n", humanReadableSize(allocBytes));
CARB_ASSERT(false, "PathToAttributesMap detected a memory leak of %s!\n", humanReadableSize(allocBytes));
return true;
}
#endif // #if USING( ALLOCATOR_LEAK_CHECK )
return false;
}
} // namespace flatcache
} // namespace carb
| 10,798 |
C
| 28.425068 | 135 | 0.571217 |
omniverse-code/kit/fabric/include/carb/flatcache/InterpolationUsd.h
|
// Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
// This include must come first
// clang-format off
#include "UsdPCH.h"
// clang-format on
#include <pxr/base/gf/matrix4d.h>
#include <pxr/base/gf/quatf.h>
#include "carb/logging/Log.h"
/**
* @brief Defined in a separate location to the other lerp functions
* in order to avoid breaking C-ABI compatibility
*/
namespace carb
{
namespace flatcache
{
/**
* @brief Spherical interpolation specialization relying on pxr native
* interpolation for quaternions
*/
template <>
inline const pxr::GfQuatf interpolate(const pxr::GfQuatf& q0, const pxr::GfQuatf& q1, float theta)
{
if (theta < 0.0f || theta > 1.0f)
{
CARB_LOG_WARN_ONCE("InterpolationUsd interpolate(): theta %f outside range [0.0, 1.0]", theta);
}
pxr::GfQuatf result = pxr::GfSlerp(theta, q0, q1);
return result;
}
/**
* @brief pxr::Matrix4d interpolation specialization Used in Kit by OmniHydraDelegate
*/
template <>
inline const pxr::GfMatrix4d interpolate(const pxr::GfMatrix4d& m0, const pxr::GfMatrix4d& m1, float theta)
{
if (theta < 0.0f || theta > 1.0f)
{
CARB_LOG_WARN_ONCE("InterpolationUsd interpolate(): theta %f outside range [0.0, 1.0]", theta);
}
pxr::GfMatrix4d r0, r1; // rotations, where -r is inverse of r
pxr::GfVec3d s0, s1; // scale
pxr::GfMatrix4d u0, u1; // rotations, may contain shear info
pxr::GfVec3d t0, t1; // translations
pxr::GfMatrix4d p0, p1; // p is never modified; can contain projection info
// Account for rotation, translation, scale
// (order is mat = r * s * -r * u * t), eps=1e-10 used to avoid zero values
m0.Factor(&r0, &s0, &u0, &t0, &p0);
m1.Factor(&r1, &s1, &u1, &t1, &p1);
// Interpolate component-wise
pxr::GfVec3d tResult = pxr::GfLerp(theta, t0, t1);
pxr::GfVec3d sResult = pxr::GfLerp(theta, s0, s1);
pxr::GfQuatd rResult = pxr::GfSlerp(u0.ExtractRotationQuat(), u1.ExtractRotationQuat(), theta);
pxr::GfMatrix4d result = pxr::GfMatrix4d(pxr::GfRotation(rResult), pxr::GfCompMult(sResult, tResult));
return result;
}
}
}
| 2,530 |
C
| 29.865853 | 107 | 0.686166 |
omniverse-code/kit/fabric/include/carb/flatcache/RationalTime.h
|
// Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <map>
#include <stdint.h>
namespace carb
{
namespace flatcache
{
// Each frame in the history buffer is timestamped with a frame time, stored as
// a rational number to minimize rounding issues. See threadgate::TimeRatio.
struct RationalTime
{
int64_t numerator;
uint64_t denominator;
// Minimize denominator small by dividing by gcd(numerator,denominator)
RationalTime reduce() const
{
RationalTime result{0, 0};
int64_t gcdNumDen = gcd(numerator, denominator);
if (gcdNumDen != 0)
{
result.numerator = numerator / gcdNumDen;
result.denominator = denominator / gcdNumDen;
}
return result;
}
bool operator==(RationalTime rhs) const
{
RationalTime thisReduced = reduce();
RationalTime rhsReduced = rhs.reduce();
return (thisReduced.numerator == rhsReduced.numerator) && (thisReduced.denominator == rhsReduced.denominator);
}
bool operator!=(RationalTime rhs) const
{
return !(*this == rhs);
}
static int64_t gcd(int64_t a, int64_t b)
{
while (b != 0)
{
int64_t t = b;
b = a % b;
a = t;
}
return std::max(a, -a);
}
RationalTime operator-(RationalTime b) const
{
RationalTime result;
result.numerator = numerator * int64_t(b.denominator) - b.numerator * int64_t(denominator);
result.denominator = denominator * b.denominator;
return result.reduce();
}
RationalTime operator*(int64_t b) const
{
RationalTime result;
result.numerator = numerator * b;
result.denominator = denominator;
return result.reduce();
}
};
static const RationalTime kInvalidTime = { 0, 0 };
} // namespace flatcache
} // namespace carb
| 2,308 |
C
| 24.94382 | 118 | 0.642548 |
omniverse-code/kit/fabric/include/carb/flatcache/ApiLogger.h
|
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <carb/flatcache/IPath.h>
#include <carb/flatcache/IToken.h>
#include <iostream>
// To log all FlatCache methods that access a particular path and attribute,
// set the following three defines
#define ENABLE_FLATCACHE_API_LOG 0
#if ENABLE_FLATCACHE_API_LOG
#define attrToTrace "attrToLog"
#define pathToTrace "/primToLog"
namespace carb
{
namespace flatcache
{
struct ApiLogger
{
bool& enabled;
const char* desc;
ApiLogger(const char* desc, bool& enabled, const TokenC& attrNameC) : desc(desc), enabled(enabled)
{
Token attrName(attrNameC);
if (attrName == Token(attrToTrace))
{
std::cout << "begin " << desc << "\n";
enabled = true;
}
}
ApiLogger(const char* desc, bool& enabled, const PathC& pathC, const TokenC& attrNameC) : desc(desc), enabled(enabled)
{
Path path(pathC);
Token attrName(attrNameC);
if (path == Path(pathToTrace) && attrName == Token(attrToTrace))
{
std::cout << "begin " << desc << "\n";
enabled = true;
}
}
~ApiLogger()
{
if (enabled)
{
std::cout << "end " << desc << "\n";
}
enabled = false;
}
};
#define APILOGGER(...) ApiLogger logger(__VA_ARGS__)
}
}
#else
#define APILOGGER(...)
#endif
| 1,789 |
C
| 23.861111 | 122 | 0.636669 |
omniverse-code/kit/fabric/include/carb/flatcache/underlying.h
|
#pragma once
#include <type_traits>
namespace carb {
namespace flatcache {
template <typename EnumT>
constexpr inline typename std::underlying_type<EnumT>::type underlying(const EnumT& t)
{
return static_cast<typename std::underlying_type<EnumT>::type>(t);
}
} // namespace flatcache
} // namespace carb
| 312 |
C
| 18.562499 | 86 | 0.740385 |
omniverse-code/kit/fabric/include/carb/flatcache/Ordered_Set.h
|
// Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <algorithm>
#include <functional>
#include <initializer_list>
#include <vector>
namespace carb
{
namespace flatcache
{
template <class T, class Compare = std::less<T>>
struct set
{
using value_type = T;
std::vector<T> v;
Compare cmp;
using iterator =typename std::vector<T>::iterator;
using const_iterator =typename std::vector<T>::const_iterator;
iterator begin()
{
return v.begin();
}
iterator end()
{
return v.end();
}
const_iterator begin() const
{
return v.begin();
}
const_iterator end() const
{
return v.end();
}
set(const Compare& c = Compare()) : v(), cmp(c)
{
}
template <class InputIterator>
set(InputIterator first, InputIterator last, const Compare& c = Compare()) : v(first, last), cmp(c)
{
std::sort(begin(), end(), cmp);
}
set(std::initializer_list<T> _Ilist) : set(_Ilist.begin(), _Ilist.end())
{
}
void reserve(size_t newCapacity)
{
v.reserve(newCapacity);
}
void clear()
{
v.clear();
}
iterator insert(const T& t)
{
iterator i = std::lower_bound(begin(), end(), t, cmp);
if (i == end() || cmp(t, *i))
i = v.insert(i, t);
return i;
}
iterator insert(T&& t)
{
iterator i = std::lower_bound(begin(), end(), t, cmp);
if (i == end() || cmp(t, *i))
i = v.insert(i, std::move(t));
return i;
}
template <class _Iter>
void insert(_Iter _First, _Iter _Last)
{ // insert [_First, _Last) one at a time
for (; _First != _Last; ++_First)
{
insert(*_First);
}
}
iterator insert(const_iterator hint, const value_type& value)
{
// Measurements show it is faster to ignore hint in this application
return insert(value);
}
void insert(std::initializer_list<T> _Ilist)
{
insert(_Ilist.begin(), _Ilist.end());
}
size_t erase(const T& key)
{
iterator removeElement = find(key);
if (removeElement != v.end())
{
v.erase(removeElement);
return 1;
}
else
{
return 0;
}
}
iterator erase(iterator iter)
{
return v.erase(iter);
}
const_iterator find(const T& t) const
{
const_iterator i = std::lower_bound(begin(), end(), t, cmp);
return i == end() || cmp(t, *i) ? end() : i;
}
iterator find(const T& t)
{
iterator i = std::lower_bound(begin(), end(), t, cmp);
return i == end() || cmp(t, *i) ? end() : i;
}
bool contains(const T& t) const
{
const_iterator i = std::lower_bound(begin(), end(), t, cmp);
return i != end() && !cmp(t, *i);
}
bool operator==(const set<T>& other) const
{
return v == other.v;
}
bool operator!=(const set<T>& other) const
{
return v != other.v;
}
size_t size() const
{
return v.size();
}
T* data()
{
return v.data();
}
const T* data() const
{
return v.data();
}
};
template <class T, class Compare = std::less<T>>
bool operator<(const set<T, Compare>& left, const set<T, Compare>& right)
{
return left.v < right.v;
}
template<typename T>
flatcache::set<T> nWayUnion(std::vector<flatcache::set<T>>& srcBuckets)
{
flatcache::set<T> retval;
// Calculate the maximum number of destination attributes
// We could instead calculate it exactly by finding union of attribute names
size_t maxDestAttrCount = 0;
for (flatcache::set<T>& srcBucket : srcBuckets)
{
maxDestAttrCount += srcBucket.size();
}
retval.reserve(maxDestAttrCount);
auto currentDest = std::back_inserter(retval.v);
size_t bucketCount = srcBuckets.size();
// Initialize invariant that nonEmpty is the vector of buckets that have
// non-zero attribute counts
struct NonEmptySegment
{
// Invariant is current!=end
typename std::vector<T>::iterator current;
typename std::vector<T>::iterator end;
};
std::vector<NonEmptySegment> nonEmpty;
nonEmpty.reserve(bucketCount);
for (size_t i = 0; i != bucketCount; i++)
{
if (srcBuckets[i].begin() != srcBuckets[i].end())
{
nonEmpty.push_back({ srcBuckets[i].begin(), srcBuckets[i].end() });
}
}
// Keep going until there's only 1 non-empty bucket
// At that point we can just copy its attributes to the output
while (1 < nonEmpty.size())
{
// Find all the buckets that have the minimum element
// These are the ones whose iterators will get advanced
// By the loop guard and the invariant, we know that nonEmpty[0] exists
// and that nonEmpty[0].current!=nonEmpty[0].end.
// So *nonEmpty[0].current is a safe dereference
T minSoFar = *nonEmpty[0].current;
std::vector<size_t> indicesAtMin;
indicesAtMin.reserve(nonEmpty.size());
indicesAtMin.push_back(0);
for (size_t i = 1; i != nonEmpty.size(); i++)
{
if (*nonEmpty[i].current < minSoFar)
{
minSoFar = *nonEmpty[i].current;
indicesAtMin = { i };
}
else if (*nonEmpty[i].current == minSoFar)
{
indicesAtMin.push_back(i);
}
}
// Copy minimum element to the output
*currentDest = minSoFar;
++(*currentDest);
// Advance the iterators that pointed to the min
std::vector<NonEmptySegment> tempNonEmpty;
tempNonEmpty.reserve(indicesAtMin.size());
for (size_t i = 0; i != indicesAtMin.size(); i++)
{
nonEmpty[indicesAtMin[i]].current++;
}
// Maintain the invariant that nonEmpty are the non empty ones
// Replace with O(n) copy into a temporary if necessary
auto it = nonEmpty.begin();
while (it != nonEmpty.end())
{
if (it->current == it->end)
{
it = nonEmpty.erase(it);
}
else
{
++it;
}
}
}
// By the negation of the guard we know that nonEmpty has zero or one elements
if (nonEmpty.size() == 1)
{
// If one bucket is left, copy its elements to the output
std::copy(nonEmpty[0].current, nonEmpty[0].end, currentDest);
}
return retval;
}
}
}
| 7,055 |
C
| 24.381295 | 103 | 0.556768 |
omniverse-code/kit/fabric/include/carb/flatcache/StageWithHistory.h
|
// Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "IFlatcache.h"
#include <carb/Framework.h>
#include <carb/Interface.h>
#include <carb/flatcache/AttrNameAndType.h>
#include <carb/flatcache/IPath.h>
#include <carb/flatcache/IToken.h>
#include <carb/flatcache/PrimChanges.h>
#include <carb/logging/Log.h>
#include <gsl/span>
#include <carb/flatcache/Type.h>
#include <map>
#include <stdint.h>
#include "carb/cpp17/Optional.h"
using carb::cpp17::optional;
namespace carb
{
namespace flatcache
{
// The comments in this file are intended to be read top to bottom, like a book.
// This file defines flatcache::StageWithHistory, an instance of which stores
// the current stage and a configurable number of frames of state history. It
// is thread safe, in the sense that the current state can be safely read and
// written in parallel with multiple threads reading the history. The class
// definition is towards the end of this file. We first define some basic types,
// a read/write accessor class for use by the main/game/sim thread, and two
// read-only accessor classes for use on render threads. These classes provide
// access to an interpolated state and a window of state history respectively.
// To specify paths, attribute names, and attribute types we use
// flatcache::Path, flatcache::Token and graph::Type types, rather than
// USD's SdfPath, TfToken and TfType. This allows us to access the stage and
// history without including USD headers.
// The main class is this file is StageWithHistory, which is defined towards
// the end of the file.
class StageWithHistory;
template<typename T>
class AttributeArrayResult;
/**
* @invariant arrayBytes.size() must be a multiple of bytesPerElement
*/
class ConstArrayAsBytes
{
public:
gsl::span<const gsl::byte> arrayBytes;
size_t bytesPerElement;
Type elementType;
};
// findPrims() returns a list of buckets of prims, represented by PrimBucketList.
class PrimBucketList
{
friend class StageAtTimeInterval;
friend class StageInProgress;
protected:
PrimBucketListId m_primBucketListId;
static carb::flatcache::IPrimBucketList* sIPrimBucketList();
PrimBucketList(PrimBucketListId id) : m_primBucketListId(id)
{
}
public:
// PrimBucketList is opaque, you have to use the getAttributesArray methods
// of StageInProgress, StageAtTime or StageAtTimeInterval to read the
// attributes of its elements.
size_t bucketCount() const;
size_t size() const;
void print() const;
PrimBucketListId getId() const
{
return m_primBucketListId;
}
~PrimBucketList();
};
// ChangedPrimBucketList is a PrimBucketList that has changes stored for a
// particular listener. It is returned by StageInProgress::getChanges().
class ChangedPrimBucketList : public PrimBucketList
{
ChangedPrimBucketList(PrimBucketListId id) : PrimBucketList(id) {}
friend class StageInProgress;
public:
BucketChanges getChanges(size_t index);
AddedPrimIndices getAddedPrims(size_t index);
};
// The main/game/sim thread uses the following class to read and write the
// state at the current frame.
//
// StageInProgress can either be used RAII style, you construct it from a frameNumber,
// or non-RAII style, where you construct it from an existing stageInProgressId.
class StageInProgress
{
StageInProgressId m_stageInProgress;
bool m_createdFromId;
UsdStageId m_usdStageId; // Only valid if m_createFromId == false
public:
// The constructor creates a new frame and locks it for read/write
StageInProgress(StageWithHistory& stageWithHistory, size_t simFrameNumber);
// Create from an already locked frame
StageInProgress(StageInProgressId stageInProgressId);
// Returns the frame number allocated by constructor
size_t getFrameNumber();
// Returns the frame time allocated by constructor
RationalTime getFrameTime();
// Returns which mirrored array is valid: CPU, GPU, etc.
ValidMirrors getAttributeValidBits(const Path& path, const Token& attrName) const;
// getAttribute returns a read/write pointer to a non-array attribute
// If it returns nullptr then the attribute doesn't exist in the stage
template <typename T>
T* getAttribute(const Path& path, const Token& attrName);
// getAttribute returns a read-only pointer to a non-array attribute
// If it returns nullptr then the attribute doesn't exist in the stage
template <typename T>
const T* getAttributeRd(const Path& path, const Token& attrName);
// getAttribute returns a write-only pointer to a non-array attribute
// If it returns nullptr then the attribute doesn't exist in the stage
template <typename T>
T* getAttributeWr(const Path& path, const Token& attrName);
// getAttribute returns a read/write pointer to a non-array attribute
// If it returns nullptr then the attribute doesn't exist in the stage
template <typename T>
T* getAttributeGpu(const Path& path, const Token& attrName);
// getAttribute returns a read-only pointer to a non-array attribute
// If it returns nullptr then the attribute doesn't exist in the stage
template <typename T>
const T* getAttributeRdGpu(const Path& path, const Token& attrName);
// getAttribute returns a write-only pointer to a non-array attribute
// If it returns nullptr then the attribute doesn't exist in the stage
template <typename T>
T* getAttributeWrGpu(const Path& path, const Token& attrName);
// getOrCreateAttributeWr returns a write-only pointer to a non-array
// attribute. If the attribute doesn't exist, then it will create it.
// The return type is a reference rather than a pointer because the
// attribute is guaranteed to exist on exit
template <typename T>
T& getOrCreateAttributeWr(const Path& path, const Token& attrName, Type type);
// getAttribute returns a read/write span of an array attribute
// The span allows the array size to be read, but not written
// To set the array size, use setArrayAttributeSize
template <typename T>
gsl::span<T> getArrayAttribute(const Path& path, const Token& attrName);
// getAttributeRd returns a read-only span of an array attribute
// The array size is also read only
template <typename T>
gsl::span<const T> getArrayAttributeRd(const Path& path, const Token& attrName);
// getAttributeRd returns a write-only span of an array attribute
// The array size is read only, to resize use setArrayAttributeSize
template <typename T>
gsl::span<T> getArrayAttributeWr(const Path& path, const Token& attrName);
// Get the size of an array attribute. When writing CPU code, it isn't
// normally necessary to use this method, as getArrayAttribute returns a
// span containing the data pointer and the size.
// However, when writing mixed CPU/GPU code it is wasteful to copy the
// array data from GPU to CPU when just the size is required, so use this
// method in that case.
size_t getArrayAttributeSize(const Path& path, const Token& attrName);
// Set the size of an array attribute
void setArrayAttributeSize(const Path& path, const Token& attrName, size_t elemCount);
template <typename T>
gsl::span<T> setArrayAttributeSizeAndGet(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
size_t indexInBucket,
const Token& attrName,
size_t newElemCount);
// createPrim, destroyPrim, createAttribute and destroyAttribute do what
// you'd expect
void createPrim(const Path& path);
void destroyPrim(const Path& path);
void createAttribute(const Path& path, const Token& attrName, Type type);
template<int n>
void createAttributes(const Path& path, std::array<AttrNameAndType, n> attributes);
// Deprecated: type argument is not used.
void destroyAttribute(const Path& path, const Token& attrName, Type type);
void destroyAttribute(const Path& path, const Token& attrName);
template <int n>
void destroyAttributes(const Path& path, const std::array<Token, n>& attributes);
void destroyAttributes(const Path& path, const std::vector<Token>& attributes);
// findPrims() finds prims that have all the attributes in "all", and any
// of the attributes in "any", and none of the attributes in "none".
// The attributes of the resulting prims can be accessed as piecewise
// contiguous arrays, using getAttributeArray() below, which is typically
// faster than calling getAttribute for each prim.
PrimBucketList findPrims(const carb::flatcache::set<AttrNameAndType>& all,
const carb::flatcache::set<AttrNameAndType>& any = {},
const carb::flatcache::set<AttrNameAndType>& none = {});
/**
* Tell a listener to log changes for an attribute.
* Attaches listener to stage if not already attached
*
* @param[in] attrName The attribute's name
* @param[in] listenerId The listener
*/
void attributeEnableChangeTracking(const Token& attrName, ListenerId listenerId);
/**
* Tell a listener to stop logging changes for an attribute.
* Attaches listener to stage if not already attached
*
* @param[in] attrName The attribute's name
* @param[in] listenerId The listener
*/
void attributeDisableChangeTracking(const Token& attrName, ListenerId listenerId);
/**
* Tell a listener to log prim creates
* Attaches listener to stage if not already attached
*
* @param[in] attrName The attribute's name
* @param[in] listenerId The listener
*/
void enablePrimCreateTracking(ListenerId listenerId);
/**
* Pause change tracking.
*
* @param[in] listenerId The listener to pause
*/
void pauseChangeTracking(ListenerId listenerId);
/**
* Resume change tracking.
*
* @param[in] listenerId The listener to resume
*/
void resumeChangeTracking(ListenerId listenerId);
/**
* Is change tracking paused?
*
* @param[in] listenerId The listener
* @return Whether the listener is paused
*/
bool isChangeTrackingPaused(ListenerId listenerId);
/**
* Get changes
*
* @param[in] listenerId The listener
* @return The changes that occured since the last time the listener was popped
*/
ChangedPrimBucketList getChanges(ListenerId listenerId);
/**
* Clear the list of changes
*
* @param[in] listenerId The listener
*/
void popChanges(ListenerId listenerId);
/**
* Get the number of listeners
*
* @return The number of listeners listening to this stage
*/
size_t getListenerCount();
/**
* Is the listener attached to this stage
*
* @return Whether the listener is attached to this stage
*/
bool isListenerAttached(ListenerId listenerId);
/**
* Detach the listener from the stage. Future changes will not be logged for this listener.
*
* @param[in] listenerId The listener
* @return Whether the listener is attached to this stage
*/
void detachListener(ListenerId listenerId);
// getAttributeArray(primBucketList, index, attrName) returns a read/write
// contiguous array of the values of attribute "attrName" for each prim of
// bucket "index" of "primBucketList".
// "index" must be in the range [0..primBucketList.getBucketCount())
template <typename T>
gsl::span<T> getAttributeArray(const PrimBucketList& primBucketList, size_t primBucketListIndex, const Token& attrName);
template <typename T>
gsl::span<const T> getAttributeArrayRd(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName) const;
template <typename T>
gsl::span<T> getAttributeArrayWr(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName);
template <typename T>
gsl::span<T> getAttributeArrayGpu(const PrimBucketList& primBucketList, size_t primBucketListIndex, const Token& attrName);
template <typename T>
gsl::span<const T> getAttributeArrayRdGpu(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName) const;
template <typename T>
gsl::span<T> getAttributeArrayWrGpu(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName);
template <typename T>
gsl::span<T> getOrCreateAttributeArrayWr(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName,
Type type);
// getAttributeArray(primBucketList, index, attrName) returns a vector of
// array-valued attributes "attrName" for the prims of bucket "index" of
// "primBucketList". "index" must be in the range [0..primBucketList.getBucketCount())
// It gives read/write access to the values of each prim's array
template <typename T>
std::vector<gsl::span<T>> getArrayAttributeArray(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName) const;
// getAttributeArray(primBucketList, index, attrName) returns a vector of
// array-valued attributes "attrName" for the prims of bucket "index" of
// "primBucketList". "index" must be in the range [0..primBucketList.getBucketCount())
// It gives read-only access to the values of each prim's array
template <typename T>
std::vector<gsl::span<const T>> getArrayAttributeArrayRd(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName) const;
// getAttributeArray(primBucketList, index, attrName) returns a vector of
// array-valued attributes "attrName" for the prims of bucket "index" of
// "primBucketList". "index" must be in the range [0..primBucketList.getBucketCount())
// It gives write-only access to the values of each prim's array
template <typename T>
std::vector<gsl::span<T>> getArrayAttributeArrayWr(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName) const;
// getPathArray(primBucketList, index) returns a read-only contiguous array
// of the paths of the prims of bucket "index" of "primBucketList".
// "index" must be in the range [0..primBucketList.getBucketCount())
gsl::span<const Path> getPathArray(const PrimBucketList& primBucketList, size_t primBucketListIndex) const;
flatcache::set<AttrNameAndType> getAttributeNamesAndTypes(const PrimBucketList& primBucketList,
size_t primBucketListIndex) const;
// TODO: replace with an iterator for iterating over bucket names
void printBucketNames() const;
// Record that the attribute at path.attrName has been modified. Right now this is
// done explicitly to give a high degree of control over which attributes get
// passed to the notice.
void logAttributeWriteForNotice(const Path& path, const Token& attrName);
// Construct and send a TfNotice with a vector of objects paths
// that have changed, much like the ObjectsChanged notice from USD
void broadcastTfNoticeForAttributesChanged() const;
// Connection API
/**
* @brief Create a connection on the target prim
*
* @param path the target prim on which to create a connection
* @param connectionName specifies the connections attribute name on the prim
* @param connection specifies the target prim and attribute of the connection
*/
void createConnection(const Path& path, const Token& connectionName, const Connection& connection);
/**
* @brief Create an arbitrary number of connections on the target prim
*
* @param path the target prim on which to create a connection
* @param connectionNames a span of attribute names. Must match the size of the connections vector
* @param connections a span of connections. Must match the size of the connectionNames vector
*/
void createConnections(const Path& path, const gsl::span<Token>& connectionNames, const gsl::span<Connection>& connections );
/**
* @brief removes a connection from a prim
*
* @param path the target prim from which to remove a connection
* @param connectionName the name of the connection to remove
*/
void destroyConnection(const Path& path, const Token& connectionName);
/**
* @brief removes an arbitary number of connections from a prim
*
* @param path the target prim from which to remove the connections
* @param connectionNames the names of the connections to be removed
*/
void destroyConnections(const Path& path, const gsl::span<Token>& connectionNames);
/**
* @brief Get a R/W pointer to a connection on the target prim
*
* @param path the target prim
* @param connectionName the target connection name
* @return a R/W pointer to the connection
*/
Connection* getConnection(const Path& path, const Token& connectionName);
/**
* @brief Get a read only pointer to a connection on the target prim
*
* @param path the target prim
* @param connectionName the target connection name
* @return a read only pointer to the connection
*/
const Connection* getConnectionRd(const Path& path, const Token& connectionName);
/**
* @brief Get a write only pointer to a connection on the target prim
*
* @param path the target prim
* @param connectionName the target connection name
* @return a write only pointer to the connection
*/
Connection* getConnectionWr(const Path& path, const Token& connectionName);
/**
* @brief Copy all attributes from the source prim to the destination prim
* Will create attributes if they do not exist on the destination prim
* If an attribute exists on both prims they must have compatible types to copy.
*
* @param[in] srcPath the source prim
* @param[in] dstPath the destination prim
*/
void copyAttributes(const Path& srcPath, const Path& dstPath);
/**
* @brief Copy the specified attributes from the source prim to the destination prim
* Will create attributes if they do not exist on the destination prim
* If an attribute exists on both prims they must have compatible types to copy.
*
* @param[in] srcPath the source prim
* @param[in] srcAttrs a span of attributes to be copied.
* @param[in] dstPath the destination prim
*/
void copyAttributes(const Path& srcPath, const gsl::span<Token>& srcAttrs, const Path& dstPath);
/**
* @brief Copy the specified attributes from the source prim to the the specified
* attributes on the destination prim
* Will create attributes if they do not exist on the destination prim
* If an attribute exists on both prims they must have compatible types to copy.
* Note: The srcAttrs and dstAttrs must be the same size as the function assumes
* that the copy is 1 to 1 in terms of name alignment
*
* @param[in] srcPath the source prim
* @param[in] srcAttrs a span of attributes to be copied.
* @param[in] dstPath the destination prim
* @param[in] dstAttrs a span of attributes to be copied.
*/
void copyAttributes(const Path& srcPath, const gsl::span<Token>& srcAttrs, const Path& dstPath, const gsl::span<Token>& dstAttrs);
StageInProgressId getId() const
{
return m_stageInProgress;
}
/**
* @brief Check whether a prim exists at a given path
* @param[in] the path
* @return true if a prim exists at the path
*/
bool primExists(const Path& path);
// If StageInProgress was created from an Id, then do nothing
// Else unlock the current sim frame, allowing it to be read by
// other threads
~StageInProgress();
};
// The following two classes, StageAtTime and StageAtTimeInterval
// are used by reader threads to read the history. StageAtTime is
// used when the state of a stage is needed at a particular point in time.
// StageAtTimeInterval is used when we need all the stage history in a given time
// window.
//
// There can be multiple threads reading the history buffer, for example
// multiple sensor renderers running at different rates. We use shared locks
// to allow multiple threads to read the same frame of history.
//
// StageAtTimeInterval takes an RAII approach to locking, constructing one locks
// a range of slots for reading, and destructing unlocks them.
class StageAtTimeInterval
{
StageAtTimeIntervalId m_stageAtTimeInterval;
static carb::flatcache::IStageAtTimeInterval* sIStageAtTimeInterval();
public:
// The constructor locks frames of history
StageAtTimeInterval(StageWithHistory& stageWithHistory,
RationalTime beginTime,
RationalTime endTime,
bool includeEndTime = false);
StageAtTimeInterval(StageWithHistoryId stageWithHistoryId,
RationalTime beginTime,
RationalTime endTime,
bool includeEndTime = false);
ValidMirrors getAttributeValidBits(const PathC& path, const TokenC& attrName) const;
// Get values of locked elements
template <typename T>
std::vector<const T*> getAttributeRd(const Path& path, const Token& attrName) const;
// Get GPU pointer and size of locked elements
template <typename T>
std::vector<const T*> getAttributeRdGpu(const Path& path, const Token& attrName) const;
// Get the size of an array attribute. When writing CPU code, it isn't
// normally necessary to use this method, as getArrayAttribute returns a
// span containing the data pointer and the size.
// However, when writing mixed CPU/GPU code it is wasteful to copy the
// array data from GPU to CPU when just the size is required, so use this
// method in that case.
std::vector<size_t> getArrayAttributeSize(const Path& path, const Token& attrName) const;
/**
* @brief Get an array-valued attribute for reading from a single prim
*
* @param path The path of the prim
* @param attrName The name of the attribute
*
* @return a vector of array spans, one for each time sample within the current StageAtTimeInterval
*/
template <typename T>
std::vector<gsl::span<const T>> getArrayAttributeRd(const Path& path, const Token& attrName) const;
/**
* @brief Get an array-valued attribute as bytes for reading from a single prim.
* This is useful for converting to VtValue
*
* @param path The path of the prim
* @param attrName The name of the attribute
*
* @return a vector of array spans, one for each time sample within the
* current StageAtTimeInterval
*/
std::vector<ConstArrayAsBytes> getArrayAttributeRawRd(const Path& path, const Token& attrName) const;
// Get timestamps of locked elements
std::vector<RationalTime> getTimestamps() const;
size_t getTimeSampleCount() const;
// findPrims() finds prims that have all the attributes in "all", and any
// of the attributes in "any", and none of the attributes in "none".
// The attributes of the resulting prims can be accessed as piecewise
// contiguous arrays, using getAttributeArray() below, which is typically
// faster than calling getAttribute for each prim.
PrimBucketList findPrims(const carb::flatcache::set<AttrNameAndType>& all,
const carb::flatcache::set<AttrNameAndType>& any = {},
const carb::flatcache::set<AttrNameAndType>& none = {});
// getAttributeArray(primBucketList, index, attrName) returns for each
// timesample, a read-only, contiguous array of the values of attribute
// "attrName" for each prim of bucket "index" of "primBucketList".
// "index" must be in the range [0..primBucketList.getBucketCount())
template <typename T>
std::vector<gsl::span<const T>> getAttributeArrayRd(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName) const;
template <typename T>
std::vector<gsl::span<const T>> getAttributeArrayRdGpu(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName) const;
template <typename T>
std::vector<std::vector<gsl::span<const T>>> getArrayAttributeArrayRd(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName) const;
/**
* @brief Read a raw byte representation for a given attribute from a given bucket. This is useful for doing things such as batched type conversions.
*
* @param primBucketList the list of buckets
* @param primBucketListIndex the specific bucket to search
* @param attrName the token describing the desired attribute
*
* @return a vector of byte arrays, one for each time sample within the current StageAtTimeInterval
*/
std::vector<gsl::span<const char>> getAttributeArrayRawRd(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName) const;
// getPathArray(primBucketList, index) returns for each timesample a
// read-only contiguous array of the paths of the prims of bucket "index"
// of "primBucketList".
// "index" must be in the range [0..primBucketList.getBucketCount())
// The reason a separate path array is returned per sample is that prims
// can be added and deleted from frame to frame, and we need to check which
// prim a sample corresponds to when interpolating.
std::vector<gsl::span<const Path>> getPathArray(const PrimBucketList& primBucketList,
size_t primBucketListIndex) const;
/**
* @brief Get a Connection on a target prim
*
* @param path the target prim
* @param connectionName the connection name
* @return a vector of read only pointers to connections
*/
std::vector<const Connection*> getConnectionRd(const Path& path, const Token& connectionName);
// TODO: replace with an iterator for iterating over bucket names
void printBucketNames() const;
/**
* @brief write the current data for this stageInProgress to the specified UsdStage
* this will write all attributes at the currentTime in getFrameNumber()
*
* @param usdStageId Valid usdStage in the stage cache
*
* @return none
*/
void exportUsd(UsdStageId usdStageId) const;
// Get the number of attributes for a given bucket.
std::vector<size_t> getAttributeCounts(const PrimBucketList& primBucketList, size_t primBucketListIndex) const;
// Get the name and type of each attribute for a given bucket.
std::pair< std::vector<std::vector<Token>>, std::vector<std::vector<Type>>>
getAttributeNamesAndTypes(const PrimBucketList& primBucketList, size_t primBucketListIndex) const;
/** @brief Write a cache file to disk at a specified location
*
* @note Many parameters to this function are optional
* @note This currently only writes the first time in the interval
*
* @file[in The location the file is desired to be written to
* @workingBuffer[in] [Optional] In order to avoid costly reallocations
* the code will attempt to re-use the memory at the buffer
* location if it is large enough. If the buffer isn't larg
* enough the cost of allocation, and re-traversal may be paid
* @workingBufferSize[in] [Optional] If workingBuffer is non null, then this desrcibes the length
* of the buffer
* @return The amount of data needed to serialize the cache, a return value of 0 indicates an error
*
*/
uint64_t writeCacheToDisk(const char* file, uint8_t* workingBuffer, uint64_t workingBufferSize) const;
/** @brief Add a ref count to any data backed by the StageAtTimeIntercal
*
* @note The ref count will not enforce any behavior currently, but will
* print a warning if backing data is deleted before all ref counts
* are cleared
*
* @return None
*
*/
void addRefCount();
/** @brief Remove a ref count from an existing timeInterval
*
* @return True if ref count was removed successfully, failure conditions may
* include
* (1) StageAtTimeInterval doesn't exist
* (2) RefCount was already 0
*/
bool removeRefCount();
/** @brief Query ref count for a stage at time
*
* @note A stage at time might be represented by multiple actual data sources
* in that case we return the largest refcount of all the data sources
*
* @return number of reference counts
*/
unsigned int getRefCount();
// Unlocks elements to allow them to be reused.
~StageAtTimeInterval();
};
// StageAtTime is used when the state of a stage is needed at
// a particular point in time, which may or may not be one of the times sampled
// in the history. If it is, then getAttributeRd returns the exact value sampled.
// If not, it linearly interpolates using the two closest samples in the history.
//
// StageAtTime takes an RAII approach to locking, constructing one
// locks one or two frames in the history (depending on whether interpolation
// is needed), and destructing unlocks them.
class StageAtTime
{
// Invariants:
// I0: if sampleTimes.size()==2, m_theta = (m_time - sampleTimes[0]) /
// (sampleTimes[1] - sampleTimes[0])
// where sampleTimes = m_historyWindow.getTimestamps()
//
// In particular, m_theta increases linearly from 0 to 1 as m_time
// increases from sampleTimes[0] to sampleTimes[1]
//
// TODO: do we need to delay conversion from rational number to double?
StageAtTimeInterval m_historyWindow;
RationalTime m_time;
double m_theta;
void initInterpolation()
{
std::vector<RationalTime> sampleTimes = m_historyWindow.getTimestamps();
if (sampleTimes.size() == 2)
{
if ((double)sampleTimes[0].denominator == 0.0 || (double)sampleTimes[1].denominator == 0.0)
{
CARB_LOG_WARN_ONCE("StageWithHistory initInterpolation(): cannot divide by a denominator with a value of zero.");
m_theta = 0.0;
}
else
{
double a_t = (double)sampleTimes[0].numerator / (double)sampleTimes[0].denominator;
double b_t = (double)sampleTimes[1].numerator / (double)sampleTimes[1].denominator;
if (a_t == b_t)
m_theta = 0.0;
else
{
double c_t = (double)m_time.numerator / (double)m_time.denominator;
m_theta = (c_t - a_t) / (b_t - a_t);
}
}
}
else if (sampleTimes.size() == 1)
m_theta = 0.0;
}
public:
// Locks one or two history elements for read.
StageAtTime(StageWithHistory& stageWithHistory, RationalTime time)
: m_historyWindow(stageWithHistory, time, time, true), m_time(time)
{
initInterpolation();
}
StageAtTime(StageWithHistoryId stageWithHistoryId, RationalTime time)
: m_historyWindow(stageWithHistoryId, time, time, true), m_time(time)
{
initInterpolation();
}
// Auxiliary method to communicate attributes of types which will not be interpolated
// Supported types: bool, int, uint
// no samples found: return nullopt
// samples found: return pair{value of sample in frame n, value of sample in frame n+1}
template <typename T>
optional<std::pair<optional<T>, optional<T>>> getNonInterpolatableAttributeRd(const Path& path, const Token& attrName) const;
ValidMirrors getAttributeValidBits(const PathC& path, const TokenC& attrName) const;
// Read interpolated elements
template <typename T>
optional<T> getAttributeRd(const Path& path, const Token& attrName) const;
// Read GPU elements (interpolation not supported yet!)
template <typename T>
const T* getAttributeRdGpu(const Path& path, const Token& attrName) const;
// Get array attribute size, useful for GPU attributes
size_t getArrayAttributeSize(const Path& path, const Token& attrName) const;
// Get arrau attribute read
template <typename T>
gsl::span<const T> getArrayAttributeRd(const Path& path, const Token& attrName);
// findPrims() finds prims that have all the attributes in "all", and any
// of the attributes in "any", and none of the attributes in "none".
// The attributes of the resulting prims can be accessed as piecewise
// contiguous arrays, using getAttributeArray() below, which is typically
// faster than calling getAttribute for each prim.
PrimBucketList findPrims(const carb::flatcache::set<AttrNameAndType>& all,
const carb::flatcache::set<AttrNameAndType>& any = {},
const carb::flatcache::set<AttrNameAndType>& none = {})
{
return m_historyWindow.findPrims(all, any, none);
}
// getAttributeArray(primBucketList, index, attrName) returns a read-only
// contiguous array of the values of attribute "attrName" for each prim of
// bucket "index" of "primBucketList".
// "index" must be in the range [0..primBucketList.getBucketCount())
template <typename T>
AttributeArrayResult<T> getAttributeArrayRd(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName) const;
template <typename T>
AttributeArrayResult<T> getAttributeArrayRdGpu(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName) const;
template <typename T>
AttributeArrayResult<std::vector<T>> getArrayAttributeArrayRd(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName) const;
/**
* @brief Read a raw byte representation for a given attribute from a given bucket. This is useful for doing things such as batched type conversions.
*
* @param primBucketList the list of buckets
* @param primBucketListIndex the specific bucket to search
* @param attrName the token describing the desired attribute
*
* @return a vector of byte arrays, one for each time sample underlying the current StageAtTime. Note: Does not perform any interpolation.
*/
std::vector<gsl::span<const char>> getAttributeArrayRawRd(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName) const;
// getPathArray(primBucketList, index) returns a read-only contiguous array
// of the paths of the prims of bucket "index" of "primBucketList".
// "index" must be in the range [0..primBucketList.getBucketCount())
gsl::span<const Path> getPathArray(const PrimBucketList& primBucketList, size_t primBucketListIndex) const;
/**
* @brief Get a read only pointer to a connection on a prim
*
* @param path the target prim
* @param connectionName the target connection
* @return returns a vector read only pointers to connections, one per time sample
*/
std::vector<const Connection*> getConnectionRd(const Path& path, const Token& connectionName);
// TODO: replace with an iterator for iterating over bucket names
void printBucketNames() const;
// Get the number of attributes for a given bucket.
size_t getAttributeCount(const PrimBucketList& primBucketList, size_t primBucketListIndex) const;
// Get the name and type of each attribute for a given bucket.
std::pair< std::vector<Token>, std::vector<Type>>
getAttributeNamesAndTypes(const PrimBucketList& primBucketList, size_t primBucketListIndex) const;
// Unlocks elements to allow them to be reused.
~StageAtTime() = default;
/** @brief Write a cache file to disk at a specified location
*
* @note Many parameters to this function are optional
*
* @file[in The location the file is desired to be written to
* @workingBuffer[in] [Optional] In order to avoid costly reallocations
* the code will attempt to re-use the memory at the buffer
* location if it is large enough. If the buffer isn't larg
* enough the cost of allocation, and re-traversal may be paid
* @workingBufferSize[in] [Optional] If workingBuffer is non null, then this desrcibes the length
* of the buffer
* @return The amount of data needed to serialize the cache, a return value of 0 indicates an error
*
*/
uint64_t writeCacheToDisk(const char* file, uint8_t* workingBuffer, uint64_t workingBufferSize) const;
/** @brief Add a ref count to any data backed by the StageAtTimeInterval
*
* @note The ref count will not enforce any behavior currently, but will
* print a warning if backing data is deleted before all ref counts
* are cleared
*
* @return None
*
*/
void addRefCount();
/** @brief Remove a ref count from an existing timeInterval
*
* @return True if ref count was removed successfully, failure conditions may
* include
* (1) StageAtTimeInterval doesn't exist
* (2) RefCount was already 0
*/
bool removeRefCount();
/** @brief Query ref count for a stage at time
*
* @note A stage at time might be represented by multiple actual data sources
* in that case we return the largest refcount of all the data sources
*
* @return number of reference counts
*/
unsigned int getRefCount();
};
// Finally, here is the main class, StageWithHistory.
class StageWithHistory
{
StageWithHistoryId m_stageWithHistory;
UsdStageId m_usdStageId;
friend class StageInProgress;
friend class StageAtTimeInterval;
public:
StageWithHistory(UsdStageId usdStageId, size_t historyFrameCount, RationalTime simPeriod, bool withCuda=false);
~StageWithHistory();
/**
* Create a listener
* This just creates a listener ID, you have to attach it to a stage to use it.
* Note that there is no destroyListener method. To stop using an ID, detach it from all stages it is attached to.
* @return The listener
*/
ListenerId createListener();
};
const ListenerId kInvalidListenerId = { 0 };
} // namespace flatcache
} // namespace carb
// Implement above C++ methods by calling C-ABI interfaces
#include "WrapperImpl.h"
| 41,215 |
C
| 41.799585 | 153 | 0.664855 |
omniverse-code/kit/fabric/include/carb/flatcache/USDValueAccessors.h
|
// Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "UsdPCH.h"
#include <carb/Defines.h>
#include <carb/InterfaceUtils.h>
#include <carb/Types.h>
#include <carb/flatcache/FlatCacheUSD.h>
#include <carb/flatcache/IPath.h>
#include <carb/flatcache/PathToAttributesMap.h>
#include <carb/flatcache/StageWithHistory.h>
#include <iostream>
#include <vector>
namespace carb
{
namespace flatcache
{
// A TfNotice sent with a vector of paths for attribute that
// have changed. Sent by StateInProgress upon request, contains
// the paths of attributes that the StageInProgress has flagged
// as modified during it's lifetime, which is typically one frame.
//
// primPaths and attributeNames are required to the same length,
// the prim and attribute name within that prim whose value
// changed
class AttributeValuesChangedNotice : public pxr::TfNotice
{
public:
AttributeValuesChangedNotice(const std::vector<pxr::SdfPath>& primPaths,
const std::vector<pxr::TfToken>& attributeNames)
: _primPaths(primPaths), _attributeNames(attributeNames)
{
}
~AttributeValuesChangedNotice()
{
}
const std::vector<pxr::SdfPath>& GetPrimPaths() const
{
return _primPaths;
}
const std::vector<pxr::TfToken>& GetAttributeNames() const
{
return _attributeNames;
}
private:
const std::vector<pxr::SdfPath> _primPaths;
const std::vector<pxr::TfToken> _attributeNames;
};
void broadcastTfNoticeForAttributesChanged(StageInProgressId stageInProgressId);
template <typename T>
T getValue(const pxr::UsdAttribute& attribute, const pxr::UsdTimeCode& timeCode)
{
// First, look in flatcache to see if a value is present. If not, fall back
// to read USD's composed attribute value.
{
// read from flatcache via StageInProgress, this is called during a run
// loop where extensions are modifying one timeslice within StageWithHisotry
// Look up the long int identifier for the attribute's UsdStage
auto usdStageId = PXR_NS::UsdUtilsStageCache::Get().GetId(attribute.GetStage()).ToLongInt();
// grab the carb interface for StageInProgress and use it to access the
// (potentially NULL) current stageInProgress for the UsdStage
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
auto stageInProgress = iStageInProgress->get(usdStageId);
if (stageInProgress.id)
{
// Grab a pointer to in-memory representation for the attribute value, in this
// case a pointer to a T. Will be NULL if attribute doesn't exist in flatcache
auto valueSpan =
iStageInProgress->getAttribute(stageInProgress, carb::flatcache::asInt(attribute.GetPrimPath()),
carb::flatcache::asInt(attribute.GetName()));
T* valuePtr = (T*)valueSpan.ptr;
if (valuePtr)
{
// We have a value stored for this attribute in flatcache, return it
return *valuePtr;
}
}
}
// If we get here we didn't find a value stored for this attribute in flatcache,
// so call USD API
pxr::VtValue val;
attribute.Get(&val, timeCode);
return val.UncheckedGet<T>();
}
template <typename T_VALUETYPE>
void setFlatCacheValue(const pxr::UsdAttribute& attribute, T_VALUETYPE value, bool writeToUSD)
{
if (writeToUSD)
{
// write to the USD layer
attribute.Set(value);
}
else
{
// write to flatcache, via StageInProgress
// grab const references to the path of the attribute's parent
// prim and the name of the attribute. Avoid copies here.
const pxr::SdfPath& path = attribute.GetPrimPath();
const pxr::TfToken& name = attribute.GetName();
const pxr::SdfPath& attrPath = attribute.GetPath();
// Convert the bits into a carb-safe value
auto pathId = carb::flatcache::asInt(path);
auto nameId = carb::flatcache::asInt(name);
// Look up the long int identifier for the attribute's UsdSage
auto usdStageId = carb::flatcache::UsdStageId{
(uint64_t)PXR_NS::UsdUtilsStageCache::Get().GetId(attribute.GetStage()).ToLongInt()
};
// grab the carb interface for StageInProgress and use it to access the
// (potentially NULL) current stageInProgress for the UsdStage
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
auto stageInProgress = iStageInProgress->get(usdStageId);
if (!stageInProgress.id)
{
// No one created a stageInProgress, we're expecting this
// to be created by another extension or run loop
//
// XXX: warn, or return falsse?
return;
}
// Grab a pointer to in-memory representation for the attribute value, in this
// case a pointer to a float
auto valuePtr = iStageInProgress->getAttribute(stageInProgress, pathId, nameId);
// Set the value within stageInProgress
((T_VALUETYPE*)valuePtr.ptr)[0] = value;
}
}
// This should be in UsdValueAccessors.cpp, but when it goes there
// clients in DriveSim can't find the symbol. Needs fixing.
inline void setFlatCacheValueFloat(const pxr::UsdAttribute& attribute, float value, bool writeToUSD)
{
setFlatCacheValue<float>(attribute, value, writeToUSD);
}
}
}
| 5,939 |
C
| 33.941176 | 112 | 0.675535 |
omniverse-code/kit/fabric/include/carb/flatcache/IFlatcache.h
|
// Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "IPath.h"
#include <carb/Interface.h>
#include <carb/flatcache/AttrNameAndType.h>
#include <carb/flatcache/IdTypes.h>
#include <carb/flatcache/Ordered_Set.h>
#include <carb/flatcache/RationalTime.h>
#include <map>
#include <stdint.h>
namespace carb
{
namespace flatcache
{
struct UsdStageId
{
uint64_t id;
constexpr bool operator<(const UsdStageId& other) const
{
return id < other.id;
}
constexpr bool operator==(const UsdStageId& other) const
{
return id == other.id;
}
constexpr bool operator!=(const UsdStageId& other) const
{
return id != other.id;
}
};
static_assert(std::is_standard_layout<UsdStageId>::value,
"Struct must be standard layout as it is used in C-ABI interfaces");
static constexpr UsdStageId kUninitializedStage{ 0 };
}
}
namespace std
{
template <>
class hash<carb::flatcache::UsdStageId>
{
public:
size_t operator()(const carb::flatcache::UsdStageId& key) const
{
return key.id;
}
};
}
namespace carb
{
namespace flatcache
{
struct BucketId
{
uint64_t id;
constexpr bool operator<(const BucketId& other) const
{
return id < other.id;
}
constexpr bool operator<=(const BucketId& other) const
{
return id <= other.id;
}
constexpr bool operator==(const BucketId& other) const
{
return id == other.id;
}
constexpr bool operator!=(const BucketId& other) const
{
return id != other.id;
}
constexpr BucketId& operator++()
{
++id;
return *this;
}
constexpr BucketId& operator--()
{
--id;
return *this;
}
constexpr explicit operator size_t() const
{
return id;
}
};
static_assert(std::is_standard_layout<BucketId>::value,
"Struct must be standard layout as it is used in C-ABI interfaces");
static constexpr BucketId kInvalidBucketId{ 0xffff'ffff'ffff'ffff };
// A struct that represents a subset of a bucket
struct BucketSubset
{
BucketId bucket; // The target bucket from which we define a subset
set<TokenC>* attributes; // The subset of attributes to consider - only used if allAttributes == false, MUST be set otherwise
set<PathC>* paths; // The subset of paths to consider - only used if allPaths == false, MUST be set otherwise
bool allAttributes; //attribute filtering or not
bool allPaths; //path filtering or not
};
static_assert(std::is_standard_layout<BucketSubset>::value,
"BucketSubset must be standard layout as it is used in C-ABI interfaces");
}
}
namespace std
{
template <>
class hash<carb::flatcache::BucketId>
{
public:
size_t operator()(const carb::flatcache::BucketId& key) const
{
return key.id;
}
};
}
namespace carb
{
namespace flatcache
{
// Flatcache stores data in untyped (byte) arrays.
// For conversion back to typed arrays, getArraySpan methods return the
// element size in bytes. They also return elementCount to allow the caller to
// wrap the array in std::span, or bounds check array access themselves.
// Flatcache methods can't return std::span or gsl::span directly, because they
// are not C-ABI compatible. So we define SpanC/ConstSpanC, which are.
struct ConstSpanC
{
const uint8_t* ptr;
size_t elementCount;
size_t elementSize;
};
struct SpanC
{
uint8_t* ptr;
size_t elementCount;
size_t elementSize;
// Casting SpanC to ConstSpanC is allowed, but not vice versa
operator ConstSpanC() const
{
return { ptr, elementCount, elementSize };
}
};
struct ConstSpanWithTypeC
{
const uint8_t* ptr;
size_t elementCount;
size_t elementSize;
TypeC type;
};
struct SpanWithTypeC
{
uint8_t* ptr;
size_t elementCount;
size_t elementSize;
TypeC type;
// Casting SpanWithTypeC to ConstSpanWithTypeC is allowed, but not vice versa
operator ConstSpanWithTypeC() const
{
return { ptr, elementCount, elementSize, type };
}
};
struct SpanSizeC
{
size_t* ptr;
size_t elementCount;
};
struct ConstSpanSizeC
{
const size_t* ptr;
size_t elementCount;
};
// An ArrayPointersAndSizesC is an array of immutably sized mutable
// data arrays
//
// Rules (enforced by const):
// {
// ArrayPointersAndSizesC ps;
//
// // Allowed: Changing inner array values
// ps.arrayPtrs[0][0] = 1
//
// // Disallowed: Changing array pointers
// ps.arrayPtrs[0] = (uint8_t*)p;
//
// // Disallowed: Changing inner array sizes
// ps.sizes[0] = 1;
// }
struct ArrayPointersAndSizesC
{
uint8_t* const* arrayPtrs;
const size_t* sizes;
const size_t elementCount;
};
// A ConstArrayPointersAndSizesC is an array of immutably sized immutable
// data arrays
//
// Rules (enforced by const):
// {
// ConstArrayPointersAndSizesC ps;
//
// // Disallowed: Changing inner array values
// ps.arrayPtrs[0][0] = 1
//
// // Disallowed: Changing array pointers
// ps.arrayPtrs[0] = (uint8_t*)p;
//
// // Disallowed: Changing inner array sizes
// ps.sizes[0] = 1;
// }
struct ConstArrayPointersAndSizesC
{
const uint8_t* const* arrayPtrs;
const size_t* sizes;
size_t elementCount;
};
static_assert(std::is_standard_layout<Path>::value, "Path must be standard layout as it is used in C-ABI interfaces");
struct ConstPathCSpan
{
const Path* ptr;
size_t elementCount;
};
struct ConstAttrNameAndTypeSpanC
{
const AttrNameAndType* ptr;
size_t elementCount;
};
struct ConstChangedIndicesC
{
bool allIndicesChanged;
ConstSpanSizeC changedIndices;
};
struct ConstChangedIndicesSpanC
{
const ConstChangedIndicesC* ptr;
size_t elementCount;
};
struct BucketChangesC
{
// Which attributes changed
flatcache::ConstAttrNameAndTypeSpanC changedAttributes;
// For each attribute, which prims changed?
flatcache::ConstChangedIndicesSpanC changedIndices;
flatcache::ConstPathCSpan pathArray;
};
struct AddedPrimIndicesC
{
// Which prims were added?
flatcache::ConstSpanSizeC addedIndices;
};
struct StageWithHistorySnapshot
{
bool valid;
size_t id;
};
enum class ValidMirrors
{
eNone = 0,
eCPU = 1,
eCudaGPU = 2,
eGfxGPU = 4
};
constexpr enum ValidMirrors operator|(const enum ValidMirrors a, const enum ValidMirrors b)
{
return (enum ValidMirrors)(uint32_t(a) | uint32_t(b));
}
constexpr enum ValidMirrors operator&(const enum ValidMirrors a, const enum ValidMirrors b)
{
return (enum ValidMirrors)(uint32_t(a) & uint32_t(b));
}
using PrimBucket = carb::flatcache::set<AttrNameAndType>;
//
// Note when extending the interface please add to the end so
// that dependencies don't break as easily before they are rebuilt
//
struct IStageInProgress
{
CARB_PLUGIN_INTERFACE("carb::flatcache::IStageInProgress", 0, 2);
StageInProgressId(CARB_ABI* create)(UsdStageId usdStageId, size_t simFrameNumber);
StageInProgressId(CARB_ABI* get)(UsdStageId usdStageId);
void(CARB_ABI* destroy)(UsdStageId usdStageId);
size_t(CARB_ABI* getFrameNumber)(StageInProgressId stageId);
// Prefetch prim from USD stage
// This guarantees that subsequent gets of the prim from the cache will succeed
void(CARB_ABI* prefetchPrim)(UsdStageId usdStageId, PathC path);
// Get attribute for read/write access
SpanC(CARB_ABI* getAttribute)(StageInProgressId stageId, PathC path, TokenC attrName);
// Get attribute for read only access
ConstSpanC(CARB_ABI* getAttributeRd)(StageInProgressId stageId, PathC path, TokenC attrName);
// Get attribute for write only access
SpanC(CARB_ABI* getAttributeWr)(StageInProgressId stageId, PathC path, TokenC attrName);
// Get attribute for write only access, creating it if necessary
SpanC(CARB_ABI* getOrCreateAttributeWr)(StageInProgressId stageId, PathC path, TokenC attrName, TypeC typeC);
size_t(CARB_ABI* getArrayAttributeSize)(StageInProgressId stageId, PathC path, TokenC attrName);
void(CARB_ABI* setArrayAttributeSize)(StageInProgressId stageId, PathC path, TokenC attrName, size_t elemCount);
SpanC(CARB_ABI* setArrayAttributeSizeAndGet)(StageInProgressId stageId, PrimBucketListId primBucketList,
size_t primBucketListIndex, size_t indexInBucket, TokenC attrName, size_t newElemCount);
// Get an attribute's type
Type(CARB_ABI* getType)(StageInProgressId stageId, PathC path, TokenC attrName);
// Get prim's attribute count
size_t(CARB_ABI* getAttributeCount)(StageInProgressId stageId, PathC path);
// Get the names of a prim's attributes
void(CARB_ABI* getAttributeNamesAndTypes)(Token* outNames,
Type* outTypes,
size_t outCount,
StageInProgressId stageInProgressId, PathC path);
// Attribute/prim create/destroy
void(CARB_ABI* createPrim)(StageInProgressId stageId, PathC path);
void(CARB_ABI* destroyPrim)(StageInProgressId stageId, PathC path);
void(CARB_ABI* createAttribute)(StageInProgressId stageId, PathC path, TokenC attrName, TypeC type);
void(CARB_ABI* createAttributes)(
StageInProgressId stageId, PathC path, TokenC* attrNames, TypeC* types, uint32_t attrNameAndTypeCount);
// Deprecated as type attribute is not required!
void(CARB_ABI* destroyAttribute)(StageInProgressId stageId, PathC path, TokenC attrName, TypeC type);
// see new destroyAttribute and destroyAttributes functions at end of IFlatcache
// Attribute SOA accessors
PrimBucketListId(CARB_ABI* findPrims)(StageInProgressId stageInProgressId,
const carb::flatcache::set<AttrNameAndType>& all,
const carb::flatcache::set<AttrNameAndType>& any,
const carb::flatcache::set<AttrNameAndType>& none);
void(CARB_ABI* getAttributeArray)(SpanC* out,
StageInProgressId stageInProgressId,
PrimBucketListId primBucketList,
size_t primBucketListIndex,
TokenC attrName);
void(CARB_ABI* getAttributeArrayRd)(ConstSpanC* out,
StageInProgressId stageInProgressId,
PrimBucketListId primBucketList,
size_t primBucketListIndex,
TokenC attrName);
void(CARB_ABI* getAttributeArrayWr)(SpanC* out,
StageInProgressId stageInProgressId,
PrimBucketListId primBucketList,
size_t primBucketListIndex,
TokenC attrName);
void(CARB_ABI* getOrCreateAttributeArrayWr)(SpanC* out,
StageInProgressId stageInProgressId,
PrimBucketListId primBucketList,
size_t primBucketListIndex,
TokenC attrName,
TypeC typeC);
size_t(CARB_ABI* getBucketPrimCount)(StageInProgressId stageInProgressId,
PrimBucketListId primBucketList,
size_t primBucketListIndex,
TokenC attrName);
size_t(CARB_ABI* getBucketAttributeCount)(StageInProgressId stageInProgressId,
PrimBucketListId primBucketList,
size_t primBucketListIndex);
void(CARB_ABI* getBucketAttributeNamesAndTypes)(AttrNameAndType* out, size_t outCount,
StageInProgressId stageInProgressId, PrimBucketListId primBucketList,
size_t primBucketListIndex);
ConstSpanSizeC(CARB_ABI* getArrayAttributeSizeArrayRd)(StageInProgressId stageInProgressId,
PrimBucketListId primBucketList,
size_t primBucketListIndex,
TokenC attrName);
ArrayPointersAndSizesC(CARB_ABI* getArrayAttributeArrayWithSizes)(StageInProgressId stageInProgressId,
PrimBucketListId primBucketList,
size_t primBucketListIndex,
TokenC attrName);
ConstArrayPointersAndSizesC(CARB_ABI* getArrayAttributeArrayWithSizesRd)(StageInProgressId stageInProgressId,
PrimBucketListId primBucketList,
size_t primBucketListIndex,
TokenC attrName);
ArrayPointersAndSizesC(CARB_ABI* getArrayAttributeArrayWithSizesWr)(StageInProgressId stageInProgressId,
PrimBucketListId primBucketList,
size_t primBucketListIndex,
TokenC attrName);
void(CARB_ABI* getPathArray)(ConstPathCSpan* out,
StageInProgressId stageInProgressId,
PrimBucketListId primBucketList,
size_t primBucketListIndex);
void(CARB_ABI* printBucketNames)(StageInProgressId stageInProgressId);
void(CARB_ABI* createForAllStages)(size_t simFrameNumber);
void(CARB_ABI* destroyForAllStages)();
void(CARB_ABI* logAttributeWriteForNotice)(StageInProgressId stageId, PathC path, TokenC attrName);
// Broadcast a USD TfNotice to all registered listeners containing paths of
// all attributes passed to logAttributeWriteForNotice since this StageInProgress was constructed.
// This is used, for example, to send changes to PhysX.
void(CARB_ABI* broadcastTfNoticeForAttributesChanged)(StageInProgressId stageInProgressId);
PrimBucketListId(CARB_ABI* getChanges)(StageInProgressId stageInProgressId, ListenerId listenerId);
void(CARB_ABI* popChanges)(StageInProgressId stageInProgressId, ListenerId listenerId);
RationalTime(CARB_ABI* getFrameTime)(StageInProgressId stageId);
/** @brief Get a Span with a pointer to the head of the relevant array of data
* with elementCount and elementSize reflecting the underlying data
*
* @stageId[in] Id for the stage to look in
* @path[in] Path to the prim holding the attribute
* @name[in] Name of the array attribute
*
* @return If a valid prim/attribute that hold an array returns a valid span, otherwise
* returns an empty span.
*
*/
SpanC(CARB_ABI* getArrayAttribute)(StageInProgressId stageId, PathC path, TokenC attrName);
/** @brief Get a const Span with a pointer to the head of the relevant array of data
* with elementCount and elementSize reflecting the underlying data
*
* @stageId[in] Id for the stage to look in
* @path[in] Path to the prim holding the attribute
* @name[in] Name of the array attribute
*
* @return If a valid prim/attribute that hold an array returns a valid span, otherwise
* returns an empty span.
*
*/
ConstSpanC(CARB_ABI* getArrayAttributeRd)(StageInProgressId stageId, PathC path, TokenC attrName);
/** @brief Get a Span with a pointer to the head of the relevant array of data
* with elementCount and elementSize reflecting the underlying data
*
* @stageId[in] Id for the stage to look in
* @path[in] Path to the prim holding the attribute
* @name[in] Name of the array attribute
*
* @return If a valid prim/attribute that hold an array returns a valid span, otherwise
* returns an empty span.
*
*/
SpanC(CARB_ABI* getArrayAttributeWr)(StageInProgressId stageId, PathC path, TokenC attrName);
/** @brief Destroy attribute with matching name
*
* Overloads and superseeds destroyAttribute which takes a unnecessary attribute type.
*
* @stageId[in] Id for the stage to look in
* @path[in] Path to the prim holding the attribute
* @attrNames[in] Attribute name
*
*/
void(CARB_ABI* destroyAttribute2)(StageInProgressId stageId, PathC path, TokenC attrName);
/** @brief Destroy all attributes with matching names
*
* @stageId[in] Id for the stage to look in
* @path[in] Path to the prim holding the attribute
* @attrNames[in] Attribute name array
* @attrNames[in] Attribute name array count
*
*/
void(CARB_ABI* destroyAttributes)(StageInProgressId stageId, PathC path, TokenC* attrNames, uint32_t attrNameCount);
void(CARB_ABI* getAttributeArrayGpu)(SpanC* out,
StageInProgressId stageInProgressId,
PrimBucketListId primBucketList,
size_t primBucketListIndex,
TokenC attrName);
void(CARB_ABI* getAttributeArrayRdGpu)(ConstSpanC* out,
StageInProgressId stageInProgressId,
PrimBucketListId primBucketList,
size_t primBucketListIndex,
TokenC attrName);
void(CARB_ABI* getAttributeArrayWrGpu)(SpanC* out,
StageInProgressId stageInProgressId,
PrimBucketListId primBucketList,
size_t primBucketListIndex,
TokenC attrName);
// Get GPU attribute for read/write access
SpanC(CARB_ABI* getAttributeGpu)(StageInProgressId stageId, PathC path, TokenC attrName);
// Get GPU attribute for read only access
ConstSpanC(CARB_ABI* getAttributeRdGpu)(StageInProgressId stageId, PathC path, TokenC attrName);
// Get GPU attribute for write only access
SpanC(CARB_ABI* getAttributeWrGpu)(StageInProgressId stageId, PathC path, TokenC attrName);
/** @brief Returns which mirrors of the array are valid: CPU, GPU, etc.
*
* @stageId[in] The stage to query validity from
* @path[in] The prim path
* @attrName[in] The attribute name
*
* @return ValidMirrors struct
*
*/
ValidMirrors(CARB_ABI* getAttributeValidBits)(StageInProgressId stageId, const PathC& path, const TokenC& attrName);
// Connection API
/**
* @brief Create a connection on a prim
*
* @param[in] stageId the stage to work on
* @param[in] path the prim to create the connection on
* @param[in] connectionName the name of the connection attribute
* @param[in] connection the target prim and attribute for the connection
*/
void(CARB_ABI* createConnection)(StageInProgressId stageId, PathC path, TokenC connectionName, Connection connection);
/**
* @brief Create multiple connections on a prim
*
* @param[in] stageId the stage to work on
* @param[in] path the prim to create the connection on
* @param[in] connectionNames the name of the connection attributes to create
* @param[in] connection the target prim and attribute for the connections
* @param[in] connectionCount the number of connections to be created.
*/
void(CARB_ABI* createConnections)(StageInProgressId stageId, PathC path, const TokenC* connectionNames, const Connection* connections, size_t connectionCount);
/**
* @brief remove a connection on a prim
*
* @param[in] stageId the stage to work on
* @param[in] path the prim to remove the connection from
* @param[in] connectionName the name of the connection attribute
*/
void(CARB_ABI* destroyConnection)(StageInProgressId stageId, PathC path, TokenC connectionName);
/**
* @brief Remove multiple connections from a prim
*
* @param[in] stageId the stage to work on
* @param[in] path the prim to remove the connections from
* @param[in] connectionNames the name of the connection attributes to be removed
* @param[in] connectionCount the number of connections to be removed.
*/
void(CARB_ABI* destroyConnections)(StageInProgressId stageId, PathC path, const TokenC* connectionNames, size_t connectionCount);
/**
* @brief Retrieves a connection attribute from a prim
*
* @param[in] stageId the stage to work on
* @param[in] path the prim to fetch the connection from
* @param[in] connectionName the name of the connection attribute to fetch
* @return a read/write pointer to the connection
*/
Connection*(CARB_ABI* getConnection)(StageInProgressId stageId, PathC path, TokenC connectionName);
/**
* @brief Retrieves a connection attribute from a prim
*
* @param[in] stageId the stage to work on
* @param[in] path the prim to fetch the connection from
* @param[in] connectionName the name of the connection attribute to fetch
* @return a read only pointer to the connection
*/
const Connection*(CARB_ABI* getConnectionRd)(StageInProgressId stageId, PathC path, TokenC connectionName);
/**
* @brief Retrieves a connection attribute from a prim
*
* @param[in] stageId the stage to work on
* @param[in] path the prim to fetch the connection from
* @param[in] connectionName the name of the connection attribute to fetch
* @return a write only pointer to the connection
*/
Connection*(CARB_ABI* getConnectionWr)(StageInProgressId stageId, PathC path, TokenC connectionName);
/**
* @brief Copy all attributes from the source prim to the destination prim
* Will create attributes if they do not exist on the destination prim
* If an attribute exists on both prims they must have compatible types to copy.
*
* @param[in] stageId the stage id to use for copying
* @param[in] srcPath the source prim
* @param[in] dstPath the destination prim
*/
void(CARB_ABI* copyAllAttributes)(StageInProgressId stageId, PathC srcPath, PathC dstPath);
/**
* @brief Copy the specified attributes from the source prim to the the specified
* attributes on the destination prim
* Will create attributes if they do not exist on the destination prim
* If an attribute exists on both prims they must have compatible types to copy.
* Note: The srcAttrs and dstAttrs must be the same size as the function assumes
* that the copy is 1 to 1 in terms of name alignment
*
* @param[in] stageId the stage id to use for copying
* @param[in] srcPath the source prim
* @param[in] srcAttrs a vector of attributes to be copied.
* @param[in] dstPath the destination prim
* @param[in] dstAttrs a vector of attributes to be copied.
* @param[in] count the number of attributes to copy
*/
void(CARB_ABI* copySpecifiedAttributes)(StageInProgressId stageId, PathC srcPath, const TokenC* srcAttrs, PathC dstPath, const TokenC* dstAttrs, size_t count);
};
struct IStageAtTimeInterval
{
CARB_PLUGIN_INTERFACE("carb::flatcache::IStageAtTimeInterval", 0, 1);
StageAtTimeIntervalId(CARB_ABI* create)(StageWithHistoryId stageWithHistoryId,
RationalTime beginTime,
RationalTime endTime,
bool includeEndTime);
void(CARB_ABI* destroy)(StageAtTimeIntervalId stageAtTimeIntervalId);
size_t(CARB_ABI* getTimesampleCount)(StageAtTimeIntervalId stageAtTimeIntervalId);
void(CARB_ABI* getTimestamps)(RationalTime* out, size_t outCount, StageAtTimeIntervalId stageAtTimeIntervalId);
// Single attribute accessor
size_t(CARB_ABI* getAttributeRd)(const void** out,
size_t outCount,
StageAtTimeIntervalId stageAtTimeIntervalId,
PathC path,
TokenC attrName);
// Attribute SOA accessors
PrimBucketListId(CARB_ABI* findPrims)(StageAtTimeIntervalId stageAtTimeIntervalId,
const carb::flatcache::set<AttrNameAndType>& all,
const carb::flatcache::set<AttrNameAndType>& any,
const carb::flatcache::set<AttrNameAndType>& none);
void(CARB_ABI* getAttributeArrayRd)(ConstSpanC* out,
size_t outCount,
StageAtTimeIntervalId stageAtTimeIntervalId,
PrimBucketListId primBucketList,
size_t primBucketListIndex,
TokenC attrName);
void(CARB_ABI* getArrayAttributeArrayWithSizesRd)(ConstArrayPointersAndSizesC* out,
size_t outCount,
StageAtTimeIntervalId stageAtTimeIntervalId,
PrimBucketListId primBucketList,
size_t primBucketListIndex,
TokenC attrName);
void(CARB_ABI* getPathArray)(ConstPathCSpan* out,
size_t outCount,
StageAtTimeIntervalId stageAtTimeIntervalId,
PrimBucketListId primBucketList,
size_t primBucketListIndex);
void(CARB_ABI* printBucketNames)(StageAtTimeIntervalId stageAtTimeIntervalId);
void(CARB_ABI* exportUsd)(StageAtTimeIntervalId stageAtTimeIntervalId, UsdStageId usdStageId);
RationalTime(CARB_ABI* getSimPeriod)(UsdStageId usdStageId);
void(CARB_ABI* getAttributeCounts)(StageAtTimeIntervalId stageAtTimeIntervalId,
PrimBucketListId primBucketList,
size_t primBucketListIndex,
size_t timesamples,
size_t* outCounts);
void(CARB_ABI* getAttributeNamesAndTypes)(StageAtTimeIntervalId stageAtTimeIntervalId,
PrimBucketListId primBucketList,
size_t primBucketListIndex,
size_t timesamples,
const size_t* attributeCounts,
Token** outNames,
Type** outTypes);
size_t(CARB_ABI* getAttributeCountForTimesample)(StageAtTimeIntervalId stageAtTimeIntervalId,
PrimBucketListId primBucketList,
size_t primBucketListIndex,
size_t timesampleIndex);
void(CARB_ABI* getAttributeNamesAndTypesForTimesample)(StageAtTimeIntervalId stageAtTimeIntervalId,
PrimBucketListId primBucketList,
size_t primBucketListIndex,
size_t timesampleIndex,
size_t attributeCount,
Token* outNames,
Type* outTypes);
void(CARB_ABI* getArrayAttributeWithSizeRd)(ConstSpanWithTypeC* out,
size_t outCount,
StageAtTimeIntervalId stageAtTimeIntervalId,
carb::flatcache::PathC path,
carb::flatcache::TokenC attrName);
/** @brief Write a cache file to disk at a specified location
*
* @note Many parameters to this function are optional
* @note This currently only writes the first time in the interval
* @stageAtTimeIntervalId[in] The stage at time to be written to disk
* @file[in The location the file is desired to be written to
* @workingBuffer[in] [Optional] In order to avoid costly reallocations
* the code will attempt to re-use the memory at the buffer
* location if it is large enough. If the buffer isn't larg
* enough the cost of allocation, and re-traversal may be paid
* @workingBufferSize[in] [Optional] If workingBuffer is non null, then this desrcibes the length
* of the buffer
* @return The amount of data needed to serialize the cache, a return value of 0 indicates an error
*
*/
uint64_t(CARB_ABI* writeCacheToDisk)(
StageAtTimeIntervalId stageAtTimeIntervalId,
const char* file,
uint8_t* workingBuffer,
uint64_t workingBufferSize);
/** @brief Add a ref count to any data backed by the StageAtTimeIntercal
*
* @note The ref count will not enforce any behavior currently, but will
* print a warning if backing data is deleted before all ref counts
* are cleared
*
* @stageAtTimeIntervalId[in] The stage at time tracked for the ref counting
*
* @return None
*
*/
void(CARB_ABI* addRefCount)(StageAtTimeIntervalId stageAtTimeIntervalId);
/** @brief Remove a ref count from an existing timeInterval
*
*
* @stageAtTimeIntervalId[in] The stage at time tracked for the ref counting
*
* @return True if ref count was removed successfully, failure conditions may
* include
* (1) StageAtTimeInterval doesn't exist
* (2) RefCount was already 0
*
*/
bool(CARB_ABI* removeRefCount)(StageAtTimeIntervalId stageAtTimeIntervalId);
/** @brief Query ref count for a stage at time
*
* @note A stage at time might be represented by multiple actual data sources
* in that case we return the largest refcount of all the data sources
*
* @stageAtTimeIntervalId[in] The stage at time tracked for the ref counting
*
* @return number of reference counts
*
*/
unsigned int(CARB_ABI* getRefCount)(StageAtTimeIntervalId stageAtTimeIntervalId);
// Access GPU Array attribute
void(CARB_ABI* getAttributeArrayRdGpu)(ConstSpanC* out,
size_t outCount,
StageAtTimeIntervalId stageAtTimeIntervalId,
PrimBucketListId primBucketList,
size_t primBucketListIndex,
TokenC attrName);
// Access GPU pointer attribute
void(CARB_ABI* getAttributeRdGpu)(ConstSpanC* out,
size_t outCount,
StageAtTimeIntervalId stageAtTimeIntervalId,
PathC path,
TokenC attrName);
// Get array size, useful for GPU attributes
size_t(CARB_ABI* getArrayAttributeSize)(size_t* out, size_t outCount, StageAtTimeIntervalId stageAtTimeIntervalId, PathC path, TokenC attrName);
/** @brief Returns which mirrors of the array are valid: CPU, GPU, etc.
*
* @stageAtTimeIntervalId[in] The stage to query validity from
* @path[in] The prim path
* @attrName[in] The attribute name
*
* @return ValidMirrors struct
*
*/
ValidMirrors(CARB_ABI* getAttributeValidBits)(StageAtTimeIntervalId stageAtTimeIntervalId, const PathC& path, const TokenC& attrName);
/**
* @brief
*
*/
void(CARB_ABI* getConnectionRd)(const void** out,
size_t outCount,
StageAtTimeIntervalId stageAtTimeIntervalId,
PathC path,
TokenC connectionName);
};
struct IStageWithHistory
{
CARB_PLUGIN_INTERFACE("carb::flatcache::IStageWithHistory", 0, 1);
StageWithHistoryId(CARB_ABI* create)(UsdStageId usdStageId, size_t historyFrameCount, RationalTime simPeriod);
StageWithHistoryId(CARB_ABI* get)(UsdStageId usdStageId);
void(CARB_ABI* destroy)(UsdStageId usdStageId);
//
// Create a snapshot of the stageWIthHistory for the usdStageId, this currently just resets
// the stage in progress, but it probably should be extended to copy the entire ringbuffer if we intend to
// so anything other than reset to the start frame.
//
StageWithHistorySnapshot(CARB_ABI* saveSnapshot)(UsdStageId usdStageId);
bool(CARB_ABI* deleteSnapshot)(UsdStageId usdStageId, size_t snapshotId);
bool(CARB_ABI* restoreFromSnapshot)(UsdStageId usdStageId, size_t snapshotId);
RationalTime(CARB_ABI* getSimPeriod)(UsdStageId usdStageId);
// For multi-process replication. Stores the link between the stage id on the master process and the local stage id.
void(CARB_ABI* setStageIdMapping)(UsdStageId usdStageIdMaster, UsdStageId usdStageIdLocal);
ListenerId(CARB_ABI* createListener)();
/** @brief Get the last frame that was written to the StageWithHistory
*
* @usdStageId[in] The identifier for the statge
*
* @return the time, and period of the last valid data written to the StageWithHistory
*
*/
RationalTime(CARB_ABI* getLatestFrame)(UsdStageId usdStageId);
StageWithHistoryId(CARB_ABI* create2)(UsdStageId usdStageId, size_t historyFrameCount, RationalTime simPeriod, bool withCuda);
UsdStageId(CARB_ABI* getLocalStageId)(UsdStageId usdStageIdMaster);
};
struct IStageWithHistoryDefaults
{
CARB_PLUGIN_INTERFACE("carb::flatcache::IStageWithHistoryDefaults", 0, 1);
void(CARB_ABI* setStageHistoryFrameCount)(size_t historyFrameCount);
void(CARB_ABI* setStageHistoryUpdatePeriod)(uint64_t periodNumerator, uint64_t periodDenominator);
};
struct IPrimBucketList
{
CARB_PLUGIN_INTERFACE("carb::flatcache::IPrimBucketList", 0, 2);
void(CARB_ABI* destroy)(PrimBucketListId primBucketListId);
size_t(CARB_ABI* getBucketCount)(PrimBucketListId primBucketListId);
void(CARB_ABI* print)(PrimBucketListId primBucketListId);
BucketChangesC(CARB_ABI* getChanges)(PrimBucketListId changeListId, size_t index);
AddedPrimIndicesC(CARB_ABI* getAddedPrims)(PrimBucketListId changeListId, size_t index);
};
struct IChangeTrackerConfig
{
CARB_PLUGIN_INTERFACE("carb::flatcache::IChangeTrackerConfig", 0, 3);
void(CARB_ABI* pause)(StageInProgressId stageInProgressId, ListenerId listenerId);
void(CARB_ABI* resume)(StageInProgressId stageInProgressId, ListenerId listenerId);
bool(CARB_ABI* isChangeTrackingPaused)(StageInProgressId stageInProgressId, ListenerId listenerId);
void(CARB_ABI* attributeEnable)(StageInProgressId stageInProgressId, TokenC attrName, ListenerId listenerId);
void(CARB_ABI* attributeDisable)(StageInProgressId stageInProgressId, TokenC attrName, ListenerId listenerId);
bool(CARB_ABI* isListenerAttached)(StageInProgressId stageInProgressId, ListenerId listenerId);
void(CARB_ABI* detachListener)(StageInProgressId stageInProgressId, ListenerId listenerId);
size_t(CARB_ABI* getListenerCount)(StageInProgressId stageInProgressId);
void(CARB_ABI* enablePrimCreateTracking)(StageInProgressId stageInProgressId, ListenerId listenerId);
};
/** @brief The Serializer interface provides the C-ABI compatible functions for
* working with all serialization of SWH and workflows. This covers
* (1) In-memory serialization/deserialization
* (2) Serialization to Disk and From
* (3) Functions to support replication based on serialization
* Because of the nature of SWH there are multiple places one might want to
* actually serialize the cache from, we provide some convenience functions
* that wrap this up, but also the direct functionality to serialize a
* PathToAttributesMap directly to/from a buffer for convenience.
*
*/
struct ISerializer
{
CARB_PLUGIN_INTERFACE("carb::flatcache::ISerializer", 0, 2);
//
// deprecated for more appropriately named serializeRingBuffer
//
uint64_t(CARB_ABI* serializeStage)(StageWithHistoryId stageWithHistoryId, size_t slot, uint8_t* dest, size_t destSize);
//
// deprecated for more appropriately named deserializeIntoRingBuffer
//
bool (CARB_ABI* deserializeStage)(StageWithHistoryId stageWithHistoryId, size_t slot, const uint8_t* input, const size_t inputSize,
size_t simFrameNumber, carb::flatcache::RationalTime simFrameTime);
/** @brief Attempt to serialize the stage into the provided buffer. This function
* is intended to be used when you want to serialize all the data within a
* ring buffer entry, however this is often more data than needs to be sent.
*
* @stage[in] The StageWithHistory with the ring buffer to be serialized
* @slot[in] The slot from the ring buffer to send
* @dest[in/out] Pointer to buffer to be written to, will start writing to head
* of pointer. dest will be left pointing to the point after the last write
* @destSize Size of buffer that was allocated for the data (in bytes)
*
* @return Number of bytes written success is determined by (return <= @destSize)
*
*
* @invariant It is safe to write to any memory within[dest, dest+size] for the
* duration of the function call.
*
* @note If the cache will not fit into the size of memory allocated in
* @dest then it will stop writing, but continue to run the serialize
* algorithm to calculate the actual amount of data that needs to be
* written
*
*/
uint64_t(CARB_ABI* serializeRingBuffer)(StageWithHistoryId stageWithHistoryId, size_t slot, uint8_t* dest, size_t destSize);
/** @brief Given a buffer that has the serialized version of a cache written
* using the serialize function, this function will override all the data
* in the ringbuffer at the requested slot with the data encoded in the
* buffer. This function will only succeed if the StageWithHistory that
* is passed in was created from the same UsdStage (opened at the same root layer)
* that was used to create the original serialized cache.
*
*
* @stageWithHistoryId[in] The stage to write the data to
* @slot[in] The index in the ring buffer to pull to
* @input[in] Pointer to buffer of data containing serialized cache
* @inputSize[in] Size of data in the buffer
* @simFrameNumber[in] The frame of the simulation to set the ring buffer entry to
* @simFrameTime[in] The simFrame time to set the ring buffer to
*
* @return True if buffer was successfully de-serialized
*
* @TODO: whould we care that it came from the same version of the USD file?
*/
bool (CARB_ABI* deserializeIntoRingBuffer)(StageWithHistoryId stageWithHistoryId, size_t slot, const uint8_t* input, const size_t inputSize,
size_t simFrameNumber, carb::flatcache::RationalTime simFrameTime);
/** @brief Replicate the ring buffers from the master to the workers when running
* multiple processes. Data is serialized into buffers allocated and broadcast
* by Realm, followed by deserialization into the remote ring buffers. This
* function is synchronous, i.e., the remote FlatCaches have finished updating
* when this function returns.
*/
void (CARB_ABI* replicateRingBuffers)();
};
struct Platform;
struct IPlatform
{
CARB_PLUGIN_INTERFACE("carb::flatcache::IPlatform", 0, 1);
const Platform& (CARB_ABI* get)(const PlatformId& platformId);
Platform& (CARB_ABI* getMutable)(const PlatformId& platformId);
void (CARB_ABI* reset)(const PlatformId& platformId);
void (CARB_ABI* resetAll)();
};
} // namespace flatcache
} // namespace carb
| 41,868 |
C
| 40.169125 | 164 | 0.630386 |
omniverse-code/kit/fabric/include/carb/flatcache/GetArrayGPU.h
|
// Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "PathToAttributesMap.h"
#include <carb/profiler/Profile.h>
namespace carb
{
namespace flatcache
{
const uint64_t kProfilerMask = 1;
// If this is an array-of-arrays:
// array.cpuData - array of CPU pointers on CPU
// gpuPointerArray->cpuData() - array of GPU pointers on CPU
inline void PathToAttributesMap::enableGpuRead(MirroredArray& array,
const size_t* elemToArraySize,
MirroredArray* elemToArrayCpuCapacity,
MirroredArray* elemToArrayGpuCapacity,
MirroredArray* gpuPointerArray)
{
CARB_PROFILE_ZONE(kProfilerMask, "enableGpuRead");
using omni::gpucompute::MemcpyKind;
log("begin enableGpuRead\n");
bool& cpuValid = array.cpuValid;
bool& gpuValid = array.gpuValid;
bool& gpuAllocedWithCuda = array.gpuAllocedWithCuda;
uint8_t* cpuArray = array.cpuData();
uint8_t*& gpuArray = array.gpuArray;
if (gpuValid)
{
// Nothing to do
}
else if (cpuValid)
{
std::lock_guard<MirroredArray::AttributeMutex> hostDeviceLock(array.attributeMutex);
const TypeC type = array.type;
const Typeinfo &typeInfo = getTypeInfo(type);
if (typeInfo.isArray)
{
const size_t elemCount = array.count;
uint8_t** cpuPointers = reinterpret_cast<uint8_t**>(cpuArray);
uint8_t** gpuPointers = reinterpret_cast<uint8_t**>(gpuPointerArray->cpuData());
for (size_t elem = 0; elem != elemCount; elem++)
{
const size_t desiredCapacity = elemToArraySize[elem];
const size_t cpuCapacity = reinterpret_cast<size_t*>(elemToArrayCpuCapacity->cpuData())[elem];
size_t& gpuCapacity = reinterpret_cast<size_t*>(elemToArrayGpuCapacity->cpuData())[elem];
if(gpuCapacity != desiredCapacity)
{
destructiveResizeIfNecessaryGPU(*gpuPointerArray, elem, gpuCapacity, desiredCapacity, typeInfo.arrayElemSize, platform.gpuCuda, platform.gpuCudaCtx);
}
const size_t copyByteCount = std::min(desiredCapacity, cpuCapacity) * typeInfo.arrayElemSize;
if (copyByteCount > 0)
{
void* cpuPointer = cpuPointers[elem];
void* gpuPointer = gpuPointers[elem];
CARB_ASSERT(cpuPointer);
CARB_ASSERT(gpuPointer);
platform.gpuCuda->memcpyAsync( *platform.gpuCudaCtx, gpuPointer, cpuPointer, copyByteCount, MemcpyKind::hostToDevice);
}
}
gpuPointerArray->cpuValid = true;
}
else
{
// Copy the outer array from CPU to GPU
size_t byteCount = array.size();
allocGpuMemIfNecessary(array, byteCount, typeInfo.size, platform.gpuCuda, platform.gpuCudaCtx);
log("array values: to GPU\n");
uint8_t* cpuArray = array.cpuData();
carb::profiler::ZoneId zoneId = CARB_PROFILE_BEGIN(kProfilerMask, "outer array values");
platform.gpuCuda->memcpyAsync(*platform.gpuCudaCtx, gpuArray, cpuArray, byteCount, MemcpyKind::hostToDevice);
CARB_PROFILE_END(kProfilerMask, zoneId);
}
// New state
cpuValid = true;
gpuValid = true;
gpuAllocedWithCuda = true;
}
}
inline void PathToAttributesMap::enableGpuWrite(PathToAttributesMap::MirroredArray& array,
const size_t* elemToArraySize,
PathToAttributesMap::MirroredArray* elemToArrayCpuCapacity,
PathToAttributesMap::MirroredArray* elemToArrayGpuCapacity,
PathToAttributesMap::MirroredArray* arrayGpuDataArray)
{
CARB_PROFILE_ZONE(kProfilerMask, "enableGpuWrite");
using omni::gpucompute::MemcpyKind;
bool& usdValid = array.usdValid;
bool& cpuValid = array.cpuValid;
bool& gpuValid = array.gpuValid;
bool& gpuAllocedWithCuda = array.gpuAllocedWithCuda;
const TypeC type = array.type;
const Typeinfo &typeInfo = getTypeInfo(type);
std::lock_guard<MirroredArray::AttributeMutex> hostDeviceLock(array.attributeMutex);
if (!typeInfo.isArray && !gpuValid)
{
size_t byteCount = array.size();
allocGpuMemIfNecessary(array, byteCount, typeInfo.size, platform.gpuCuda, platform.gpuCudaCtx);
}
else if (typeInfo.isArray)
{
// Array-valued elements are lazily allocated, meaning they are only
// resized when write access is requested.
// Write access has been requested, so resize if necessary
size_t elemCount = array.count;
for (size_t elem = 0; elem != elemCount; elem++)
{
size_t& gpuCapacity = reinterpret_cast<size_t*>(elemToArrayGpuCapacity->cpuData())[elem];
size_t desiredElemCount = elemToArraySize[elem];
resizeIfNecessaryGPU(
*arrayGpuDataArray, elem, gpuCapacity, desiredElemCount, typeInfo.arrayElemSize, platform.gpuCuda, platform.gpuCudaCtx);
}
// Upload of allocated pointers to GPU happens outside this function
}
// New state
usdValid = false;
cpuValid = false;
gpuValid = true;
gpuAllocedWithCuda = true;
if (elemToArrayCpuCapacity) elemToArrayCpuCapacity->usdValid = false;
if (elemToArrayGpuCapacity) elemToArrayGpuCapacity->usdValid = false;
if (arrayGpuDataArray) arrayGpuDataArray->usdValid = false;
}
inline ConstSpanC PathToAttributesMap::getArraySpanRdGpuC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix)
{
APILOGGER("getArraySpanRdGpuC", apiLogEnabled, attrName);
BucketId bucketId = findBucketId(bucket);
if (bucketId == kInvalidBucketId)
return { nullptr, 0, 0 };
// We don't set dirty indices here because this method gives read-only access
return getArraySpanC(bucketId, attrName, CudaReadConfig(), suffix).array;
}
inline const void* PathToAttributesMap::getArrayRdGpuC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix)
{
APILOGGER("getArrayRdGpuC", apiLogEnabled, attrName);
BucketId bucketId = findBucketId(bucket);
if (bucketId == kInvalidBucketId)
return nullptr;
// We don't set dirty indices here because this method gives read-only access
return getArraySpanC(bucketId, attrName, CudaReadConfig(), suffix).array.ptr;
}
inline SpanC PathToAttributesMap::getArrayGpuC(BucketId bucketId, const TokenC& attrName, NameSuffix suffix)
{
APILOGGER("getArrayGpuC", apiLogEnabled, attrName);
ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(bucketId, attrName, CudaReadWriteConfig(), suffix);
setArrayDirty(arrayAndDirtyIndices);
return arrayAndDirtyIndices.array;
}
inline ConstSpanC PathToAttributesMap::getArrayRdGpuC(BucketId bucketId, const TokenC& attrName, NameSuffix suffix)
{
APILOGGER("getArrayRdGpuC", apiLogEnabled, attrName);
const ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(bucketId, attrName, CudaReadConfig(), suffix);
// We don't set dirty indices here because this method gives read-only access
return arrayAndDirtyIndices.array;
}
inline SpanC PathToAttributesMap::getArrayWrGpuC(BucketId bucketId, const TokenC& attrName, NameSuffix suffix)
{
APILOGGER("getArrayWrC", apiLogEnabled, attrName);
ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(bucketId, attrName, CudaWriteConfig(), suffix);
setArrayDirty(arrayAndDirtyIndices);
return arrayAndDirtyIndices.array;
}
inline SpanC PathToAttributesMap::getArraySpanWrGpuC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix)
{
APILOGGER("getArraySpanWrGpuC", apiLogEnabled, attrName);
BucketId bucketId = findBucketId(bucket);
if (bucketId == kInvalidBucketId)
return { nullptr, 0, 0 };
ArrayAndDirtyIndices array = getArraySpanC(bucketId, attrName, CudaWriteConfig(), suffix);
setArrayDirty(array);
return array.array;
}
inline void* PathToAttributesMap::getArrayWrGpuC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix)
{
APILOGGER("getArrayWrGpuC", apiLogEnabled, attrName);
// Get write-only GPU access
BucketId bucketId = findBucketId(bucket);
if (bucketId == kInvalidBucketId)
return nullptr;
ArrayAndDirtyIndices array = getArraySpanC(bucketId, attrName, CudaWriteConfig(), suffix);
setArrayDirty(array);
return array.array.ptr;
}
inline SpanC PathToAttributesMap::getArraySpanGpuC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix)
{
APILOGGER("getArraySpanGpuC", apiLogEnabled, attrName);
BucketId bucketId = findBucketId(bucket);
if (bucketId == kInvalidBucketId)
return { nullptr, 0, 0 };
ArrayAndDirtyIndices array = getArraySpanC(bucketId, attrName, CudaReadWriteConfig(), suffix);
setArrayDirty(array);
return array.array;
}
inline void* PathToAttributesMap::getArrayGpuC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix)
{
APILOGGER("getArrayGpuC", apiLogEnabled, attrName);
BucketId bucketId = findBucketId(bucket);
if (bucketId == kInvalidBucketId)
return nullptr;
ArrayAndDirtyIndices array = getArraySpanC(bucketId, attrName, CudaReadWriteConfig(), suffix);
setArrayDirty(array);
return array.array.ptr;
}
inline SpanC PathToAttributesMap::getAttributeGpuC(const PathC& path, const TokenC& attrName, PtrToPtrKind ptrToPtrKind, NameSuffix suffix)
{
APILOGGER("getAttributeGpuC", apiLogEnabled, path, attrName);
bool present; // Whether this path has a bucket
BucketId bucketId; // Pointer to the bucket if it does
size_t element; // Index corresponding to path in this bucket's arrays
std::tie(present, bucketId, element) = getPresentAndBucketAndElement(path);
if (!present)
return { nullptr, 0, 0 };
ArrayAndDirtyIndices array = getArraySpanC(bucketId, attrName, CudaReadWriteConfig().withPtrToPtrKind(ptrToPtrKind), suffix);
setArrayElementDirty(array, element);
return getArrayElementPtr(array.array, element);
}
inline ConstSpanC PathToAttributesMap::getAttributeRdGpuC(const PathC& path, const TokenC& attrName, PtrToPtrKind ptrToPtrKind, NameSuffix suffix)
{
APILOGGER("getAttributeRdGpuC", apiLogEnabled, path, attrName);
bool present;
BucketId bucketId;
size_t element;
std::tie(present, bucketId, element) = getPresentAndBucketAndElement(path);
if (!present)
return { nullptr, 0, 0 };
ConstSpanC array = getArraySpanC(bucketId, attrName, CudaReadConfig().withPtrToPtrKind(ptrToPtrKind), suffix).array;
// We don't set dirty indices here because this method gives read-only access
return getArrayElementPtr(array, element);
}
inline SpanC PathToAttributesMap::getAttributeWrGpuC(const PathC& path, const TokenC& attrName, PtrToPtrKind ptrToPtrKind, NameSuffix suffix)
{
APILOGGER("getAttributeWrGpuC", apiLogEnabled, path, attrName);
// Writing an element is a RMW on the whole array, so use getArrayGpu instead of getArrayGpuWr
bool present;
BucketId bucketId;
size_t element;
std::tie(present, bucketId, element) = getPresentAndBucketAndElement(path);
if (!present)
return { nullptr, 0, 0 };
ArrayAndDirtyIndices array = getArraySpanC(bucketId, attrName, CudaReadWriteConfig().withPtrToPtrKind(ptrToPtrKind), suffix);
setArrayElementDirty(array, element);
return getArrayElementPtr(array.array, element);
}
// Typed accessors
template <typename T>
inline const T* PathToAttributesMap::getArrayRdGpu(const Bucket& bucket, const TokenC& attrName)
{
APILOGGER("getArrayRdGpu", apiLogEnabled, attrName);
// TODO: check that T is the correct type
return reinterpret_cast<const T*>(getArrayRdGpuC(bucket, attrName));
}
template <typename T>
inline T* PathToAttributesMap::getArrayWrGpu(const Bucket& bucket, const TokenC& attrName)
{
APILOGGER("getArrayWrGpu", apiLogEnabled, attrName);
// TODO: check that T is the correct type
return reinterpret_cast<T*>(getArrayWrGpuC(bucket, attrName));
}
template <typename T>
inline T* PathToAttributesMap::getArrayGpu(const Bucket& bucket, const TokenC& attrName)
{
APILOGGER("getArrayGpu", apiLogEnabled, attrName);
// TODO: check that T is the correct type
return reinterpret_cast<T*>(getArrayGpuC(bucket, attrName));
}
template <typename T>
inline T* PathToAttributesMap::getAttributeGpu(const PathC& path, const TokenC& attrName, PtrToPtrKind ptrToPtrKind)
{
APILOGGER("getAttributeGpu", apiLogEnabled, path, attrName);
// TODO: check that T is the correct type
return reinterpret_cast<T*>(getAttributeGpuC(path, attrName, ptrToPtrKind).ptr);
}
template <typename T>
inline const T* PathToAttributesMap::getAttributeRdGpu(const PathC& path, const TokenC& attrName, PtrToPtrKind ptrToPtrKind)
{
APILOGGER("getAttributeRdGpu", apiLogEnabled, path, attrName);
// TODO: check that T is the correct type
return reinterpret_cast<const T*>(getAttributeRdGpuC(path, attrName, ptrToPtrKind).ptr);
}
template <typename T>
inline T* PathToAttributesMap::getAttributeWrGpu(const PathC& path, const TokenC& attrName, PtrToPtrKind ptrToPtrKind)
{
APILOGGER("getAttributeWrGpu", apiLogEnabled, path, attrName);
// TODO: check that T is the correct type
return reinterpret_cast<T*>(getAttributeWrGpuC(path, attrName, ptrToPtrKind).ptr);
}
}
}
| 14,162 |
C
| 37.909341 | 169 | 0.69856 |
omniverse-code/kit/fabric/include/carb/flatcache/AttrNameAndType.h
|
// Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <carb/flatcache/IToken.h>
#include <carb/flatcache/IPath.h>
#include <carb/flatcache/Type.h>
namespace carb
{
namespace flatcache
{
// Basic types
// Types and methods ending in C are intended to be used with C-ABI interfaces.
// PathToAttributesMap allows metadata to be attached to attributes.
// The metadata that exist currently are for (flatcache) internal use only.
// Abstractly it holds an array for each attribute, where element 0
// (NameSuffix::none) is the value itself, and other elements are the metadata.
// It is called NameSuffix because conceptually each metadatum adds a
// new attribute with a name suffix specifying the type of metadata.
// For example, suppose you have an attribute "points" that has an attached
// elemCount. Conceptually you have two attributes "points" and
// "points_elemCount".
enum class NameSuffix
{
none = 0, // Index NameSuffix::value is the index of the data itself
// The following metadata is present on USD attributes that connect to others
connection // The target(prim, attribute) of the connection
};
struct Connection
{
PathC path;
TokenC attrName;
};
// AttrNameAndType specifies the name and type of an attribute. When the user
// searches for buckets of prims they use this type to specify which attributes
// the prims must have. Also the user can query the name and type of an
// attribute at a given path, and the output has this type.
//
// This version of the struct contains the type in flatcache format only.
// The original, AttrNameAndType, additionally contains the type in USD format,
// but that version will be deprecated.
struct AttrNameAndType
{
Type type;
Token name;
NameSuffix suffix;
AttrNameAndType() = default;
AttrNameAndType(Type type, Token name, NameSuffix suffix = NameSuffix::none)
: type(type), name(name), suffix(suffix)
{
}
// Note that in the name comparisons below TokenC masks off USD's lifetime bit.
// For example, tokens created from the same string are considered equal even
// if one was created with finite lifetime and the other infinite lifetime.
bool operator<(const AttrNameAndType& rhs) const
{
if (TypeC(type) < TypeC(rhs.type))
return true;
if (TypeC(rhs.type) < TypeC(type))
return false;
if (TokenC(name) < TokenC(rhs.name))
return true;
if (TokenC(rhs.name) < TokenC(name))
return false;
return suffix < rhs.suffix;
}
bool operator==(const AttrNameAndType& other) const
{
return type == other.type && name == other.name && suffix == other.suffix;
}
};
static_assert(std::is_standard_layout<AttrNameAndType>::value,
"AttrNameAndType must be standard layout as it is used in C-ABI interfaces");
// NOTE: This type alias provides source level compatibility. Usage of the original AttrNameAndType structure has
// been replaced with what was previously called AttrNameAndType_v2 and the _v2 suffix dropped. This alias allows code
// which still refers to AttrNameAndType_v2 to compile.
using AttrNameAndType_v2 = AttrNameAndType;
}
}
namespace std
{
template <>
struct hash<carb::flatcache::AttrNameAndType>
{
// Use the same hash_combine as boost
template <class T>
static inline void hash_combine(std::size_t& seed, const T& v)
{
std::hash<T> hasher;
seed ^= hasher(v) + 0x9e3779b9 + (seed << 6) + (seed >> 2);
}
std::size_t operator()(const carb::flatcache::AttrNameAndType& key) const
{
size_t hash = std::hash<carb::flatcache::Type>{}(key.type);
hash_combine(hash, std::hash<carb::flatcache::Token>{}(key.name));
hash_combine(hash, uint32_t(key.suffix));
return hash;
}
};
}
| 4,247 |
C
| 32.984 | 118 | 0.705204 |
omniverse-code/kit/fabric/include/carb/flatcache/Platform.h
|
#pragma once
#include <carb/flatcache/Allocator.h>
namespace omni
{
namespace gpucompute
{
struct GpuCompute;
struct Context;
} // namespace gpucompute
} // namespace omni
namespace carb
{
namespace flatcache
{
struct Platform
{
Allocator allocator;
omni::gpucompute::GpuCompute* gpuCuda = nullptr;
omni::gpucompute::Context* gpuCudaCtx = nullptr;
// The gpuD3dVk interface is used only if you access GPU arrays using D3D or Vulkan.
// If you're only using CPU or CUDA GPU arrays then you don't set it.
omni::gpucompute::GpuCompute* gpuD3dVk = nullptr;
omni::gpucompute::Context* gpuD3dVkCtx = nullptr;
Platform() = default;
Platform(const Platform& other) = delete;
Platform& operator=(const Platform& other) = delete;
Platform(Platform&& other) = default;
Platform& operator=(Platform&& other) = default;
inline void reset()
{
gpuD3dVk = nullptr;
gpuD3dVkCtx = nullptr;
gpuCuda = nullptr;
gpuCudaCtx = nullptr;
allocator.~Allocator();
new (&allocator) Allocator();
}
// mirror of IPlatform functions
static void get(const PlatformId& id);
static void getMutable(const PlatformId& id);
static void reset(const PlatformId& id);
static void resetAll();
};
} // namespace flatcache
} // namespace carb
| 1,343 |
C
| 21.032787 | 88 | 0.676843 |
omniverse-code/kit/fabric/include/carb/flatcache/Type.h
|
// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <sstream>
#include <string>
namespace carb
{
namespace flatcache
{
// TypeC are integer keys that identify types, like float3, int[] etc.
// There isn't a USD type that can be cast to TypeC,
// please use carb::flatcache::usdTypeToTypeC().
struct TypeC
{
uint32_t type;
constexpr bool operator<(const TypeC& other) const
{
return type < other.type;
}
constexpr bool operator==(const TypeC& other) const
{
return type == other.type;
}
constexpr bool operator!=(const TypeC& other) const
{
return type != other.type;
}
};
static_assert(std::is_standard_layout<TypeC>::value, "Struct must be standard layout as it is used in C-ABI interfaces");
static constexpr TypeC kUnknownType{ 0 };
enum class BaseDataType : uint8_t
{
eUnknown = 0,
eBool,
eUChar,
eInt,
eUInt,
eInt64,
eUInt64,
eHalf,
eFloat,
eDouble,
eToken,
// RELATIONSHIP is stored as a 64-bit integer internally, but shouldn't be
// treated as an integer type by nodes.
eRelationship,
// For internal use only
eAsset,
ePrim,
eConnection,
// eTags are attributes that have a name but no type or value
// They are used for named tags, including USD applied schemas
eTag
};
inline std::ostream& operator<<(std::ostream& s, const BaseDataType& type)
{
static const std::string names[] = { "unknown", "bool", "uchar", "int", "uint",
"int64", "uint64", "half", "float", "double",
"token", "rel", "asset", "prim", "connection",
"tag" };
if (type <= BaseDataType::eTag)
{
return s << names[uint8_t(type)];
}
return s;
}
// These correspond with USD attribute "roles", with the exception of eString.
// For example that a vector3f or vector3d (VECTOR) would be transformed
// differently from a point3f or point3d (POSITION).
enum class AttributeRole : uint8_t
{
eNone = 0,
eVector,
eNormal,
ePosition,
eColor,
eTexCoord,
eQuaternion,
eTransform,
eFrame,
eTimeCode,
// eText is not a USD role. If a uchar[] attribute has role eText then
// the corresponding USD attribute will have type "string", and be human
// readable in USDA. If it doesn't, then it will have type "uchar[]" in USD
// and appear as an array of numbers in USDA.
eText,
// eAppliedSchema is not a USD role, eTags with this role are USD applied schema.
eAppliedSchema,
// ePrimTypeName is not a USD role, eTags with this role are USD prim types.
ePrimTypeName,
// eExecution is not a USD role, uint attributes with this role are used for control flow in Action Graphs.
eExecution,
eMatrix,
// eObjectId is not a USD role, uint64 attributes with this role are used for Python object identification.
eObjectId,
// eBundle is not a USD role, ePrim and eRelationship attributes with this role identify OmniGraph bundles
eBundle,
// ePath is not a USD role, it refers to strings that are reinterpreted as SdfPaths. The attribute type must
// be a uchar[] with a USD type "string".
ePath,
// eInstancedAttribute is used as a role on tag types in place of attribute types on instanced prims.
eInstancedAttribute,
// eAncestorPrimTypeName is not a USD role, eTags with this role are ancestor types of a USD prim type.
eAncestorPrimTypeName,
// Special marker for roles that are not yet determined
eUnknown,
};
inline std::ostream& operator<<(std::ostream& s, const AttributeRole& type)
{
static const std::string ognRoleNames[] = { "none", "vector", "normal", "point", "color",
"texcoord", "quat", "transform", "frame", "timecode",
"text", "appliedSchema", "primTypeName", "execution", "matrix",
"objectId", "bundle", "path", "instancedAttribute", "ancestorPrimTypeName",
"unknown" };
if (type <= AttributeRole::eUnknown)
{
return s << ognRoleNames[uint8_t(type)];
}
return s;
}
// Role names as used by USD, which are slightly different from the internal names used
inline std::string usdRoleName(const AttributeRole& type)
{
static const std::string usdRoleNames[] = { "none", "vector", "normal", "position", "color",
"texCoord", "quaternion", "transform", "frame", "timecode",
"text", "appliedSchema", "primTypeName", "execution", "matrix",
"objectId", "bundle", "path", "instancedAttribute", "ancestorPrimTypeName",
"unknown" };
if (type <= AttributeRole::eUnknown)
{
return usdRoleNames[uint8_t(type)];
}
return usdRoleNames[uint8_t(AttributeRole::eUnknown)];
}
struct Type
{
BaseDataType baseType; // 1 byte
// 1 for raw base types; 2 for vector2f, int2, etc; 3 for point3d, normal3f, etc;
// 4 for quatf, color4d, float4, matrix2f etc; 9 for matrix3f, etc; 16 for matrix4d, etc.
uint8_t componentCount; // 1 byte
// 0 for a single value
// 1 for an array
// 2 for an array of arrays (not yet supported)
uint8_t arrayDepth; // 1 byte
AttributeRole role; // 1 byte
constexpr Type(BaseDataType baseType, uint8_t componentCount = 1, uint8_t arrayDepth = 0, AttributeRole role = AttributeRole::eNone)
: baseType(baseType), componentCount(componentCount), arrayDepth(arrayDepth), role(role)
{
}
constexpr Type() : Type(BaseDataType::eUnknown)
{
}
// Matches little endian interpretation of the four bytes
constexpr explicit Type(const TypeC& t)
: baseType(BaseDataType(t.type & 0xff)),
componentCount((t.type >> 8) & 0xff),
arrayDepth((t.type >> 16) & 0xff),
role(AttributeRole((t.type >> 24) & 0xff))
{
}
constexpr explicit operator TypeC() const
{
uint32_t type = uint8_t(role) << 24 | arrayDepth << 16 | componentCount << 8 | uint8_t(baseType);
return TypeC{ type };
}
constexpr bool operator==(const Type& rhs) const
{
return compatibleRawData(rhs) && role == rhs.role;
}
constexpr bool operator!=(const Type& rhs) const
{
return !((*this) == rhs);
}
constexpr bool operator<(const Type& rhs) const
{
return TypeC(*this) < TypeC(rhs);
}
/**
* Role-insensitive equality check
*/
constexpr bool compatibleRawData(const Type& otherType) const
{
return baseType == otherType.baseType && componentCount == otherType.componentCount &&
arrayDepth == otherType.arrayDepth;
}
/**
* Check to see if this is one of the matrix types
*/
constexpr bool isMatrixType() const
{
return (role == AttributeRole::eMatrix) || (role == AttributeRole::eFrame) || (role == AttributeRole::eTransform);
}
/**
* Returns the dimensions of the type, componentCount for most types and square root of that for matrix types
*/
constexpr uint8_t dimension() const
{
if (isMatrixType())
{
return componentCount == 4 ? 2 : (componentCount == 9 ? 3 : (componentCount == 16 ? 4 : componentCount));
}
return componentCount;
}
std::string getTypeName() const
{
std::ostringstream typeName;
typeName << baseType;
if (componentCount > 1)
typeName << uint32_t(componentCount);
if (arrayDepth == 1)
typeName << "[]";
else if (arrayDepth == 2)
typeName << "[][]";
// Some roles are hidden from USD
if ((role != AttributeRole::eNone)
&& (role != AttributeRole::eObjectId)
&& (role != AttributeRole::eBundle)
&& (role != AttributeRole::ePath)
)
{
typeName << " (" << usdRoleName(role) << ")";
}
return typeName.str();
}
// ======================================================================
/**
* OGN formats the type names slightly differently.
* - the tuples are internal "float[3]" instead of "float3"
* - the roles replace the actual name "colord[3]" instead of "double3 (color)"
*/
std::string getOgnTypeName() const
{
std::ostringstream typeName;
if (role == AttributeRole::eText)
{
typeName << "string";
return typeName.str();
}
if (role == AttributeRole::ePath)
{
typeName << "path";
return typeName.str();
}
if (role != AttributeRole::eNone)
{
typeName << role;
// For roles with explicit types, add that to the role name
if ((role != AttributeRole::eTimeCode)
&& (role != AttributeRole::eTransform)
&& (role != AttributeRole::eFrame)
&& (role != AttributeRole::eObjectId)
&& (role != AttributeRole::eBundle)
&& (role != AttributeRole::eExecution))
{
switch (baseType)
{
case BaseDataType::eHalf:
typeName << "h";
break;
case BaseDataType::eFloat:
typeName << "f";
break;
case BaseDataType::eDouble:
typeName << "d";
break;
default:
typeName << baseType;
break;
}
}
}
else
{
typeName << baseType;
}
if (componentCount > 1)
{
typeName << "[" << uint32_t(dimension()) << "]";
}
if (arrayDepth == 1)
typeName << "[]";
else if (arrayDepth == 2)
typeName << "[][]";
return typeName.str();
}
};
inline std::ostream& operator<<(std::ostream& s, const Type& type)
{
s << type.getTypeName();
return s;
}
}
}
namespace std
{
template <>
struct hash<carb::flatcache::Type>
{
std::size_t operator()(const carb::flatcache::Type& key) const
{
return carb::flatcache::TypeC(key).type;
}
};
template <>
struct hash<carb::flatcache::TypeC>
{
std::size_t operator()(const carb::flatcache::TypeC& key) const
{
return key.type;
}
};
}
| 11,365 |
C
| 30.484764 | 139 | 0.560493 |
omniverse-code/kit/fabric/include/carb/flatcache/GetArrayD3dGpu.h
|
// Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <carb/Defines.h>
#include <carb/graphics/Graphics.h>
#include <omni/gpucompute/GpuCompute.h>
using namespace carb::graphics;
using std::unique_ptr;
namespace carb
{
namespace flatcache
{
// If this is an array-of-arrays:
// array.cpuData - array of CPU pointers on CPU
// gpuPointerArray->cpuData() - array of GPU pointers on CPU
inline void PathToAttributesMap::enableD3dGpuRead(MirroredArray& array,
const size_t* elemToArraySize,
MirroredArray* elemToArrayCpuCapacity,
MirroredArray* elemToArrayGpuCapacity,
MirroredArray* gpuPointerArray)
{
using omni::gpucompute::MemcpyKind;
bool& cpuValid = array.cpuValid;
bool& gpuValid = array.gpuValid;
bool& gpuAllocedWithCuda = array.gpuAllocedWithCuda;
uint8_t* cpuArray = array.cpuData();
uint8_t*& gpuArray = array.gpuArray;
if (gpuValid)
{
// Nothing to do
}
else if (!gpuValid && cpuValid)
{
const TypeC type = array.type;
const Typeinfo &typeInfo = getTypeInfo(type);
std::lock_guard<MirroredArray::AttributeMutex> hostDeviceLock(array.attributeMutex);
// If each element is an array, then they could be of different sizes
// So alloc and memcpy each one
if (typeInfo.isArray)
{
size_t elemCount = array.count;
uint8_t** elemToArrayCpuData = (uint8_t**)cpuArray;
uint8_t** elemToArrayGpuData = (uint8_t**)gpuPointerArray->cpuData();
for (size_t elem = 0; elem != elemCount; elem++)
{
// Make sure that the dest (GPU) buffer is large enough
const uint8_t* const& cpuData = elemToArrayCpuData[elem]; // src
size_t& destCapacity = reinterpret_cast<size_t*>(elemToArrayGpuCapacity->cpuData())[elem];
size_t desiredElemCount = elemToArraySize[elem];
destructiveResizeIfNecessaryGPU(
*gpuPointerArray, elem, destCapacity, desiredElemCount, typeInfo.arrayElemSize, platform.gpuD3dVk, platform.gpuD3dVkCtx);
// Copy from CPU to GPU
if (desiredElemCount != 0 && cpuData)
{
uint8_t*& gpuData = elemToArrayGpuData[elem]; // dest
size_t copyByteCount = desiredElemCount * typeInfo.arrayElemSize;
platform.gpuD3dVk->memcpy(*platform.gpuD3dVkCtx, gpuData, cpuData, copyByteCount, MemcpyKind::hostToDevice);
}
else if (desiredElemCount != 0 && !cpuData)
{
printf("Warning: GPU read access requested, CPU is valid but not allocated\n");
}
}
// We don't need to copy the outer array to GPU here.
// In D3dVk, the outer array is currently a CPU array of descriptors that we copy to
// a kernel calls descriptor set at dispatch time
}
else
{
// Copy the outer array from CPU to GPU
size_t byteCount = array.size();
allocGpuMemIfNecessary(array, byteCount, typeInfo.size, platform.gpuD3dVk, platform.gpuD3dVkCtx);
uint8_t* cpuArray = array.cpuData();
platform.gpuD3dVk->memcpy(*platform.gpuD3dVkCtx, gpuArray, cpuArray, byteCount, MemcpyKind::hostToDevice);
}
// New state
cpuValid = true;
gpuValid = true;
gpuAllocedWithCuda = false;
}
}
inline void PathToAttributesMap::enableD3dGpuWrite(PathToAttributesMap::MirroredArray& array,
const size_t* elemToArraySize,
PathToAttributesMap::MirroredArray* elemToArrayCpuCapacity,
PathToAttributesMap::MirroredArray* elemToArrayGpuCapacity,
PathToAttributesMap::MirroredArray* arrayGpuDataArray)
{
log("begin enableGpuWrite\n");
bool& usdValid = array.usdValid;
bool& cpuValid = array.cpuValid;
bool& gpuValid = array.gpuValid;
bool& gpuAllocedWithCuda = array.gpuAllocedWithCuda;
const TypeC type = array.type;
const Typeinfo &typeInfo = getTypeInfo(type);
std::lock_guard<MirroredArray::AttributeMutex> hostDeviceLock(array.attributeMutex);
if (!gpuValid)
{
size_t byteCount = array.size();
allocGpuMemIfNecessary(array, byteCount, typeInfo.size, platform.gpuD3dVk, platform.gpuD3dVkCtx);
// Array-valued elements are lazily allocated, meaning they are only
// resized when write access is requested.
// Write access has been requested, so resize if necessary
if (typeInfo.isArray)
{
size_t elemCount = array.count;
for (size_t elem = 0; elem != elemCount; elem++)
{
size_t& gpuCapacity = reinterpret_cast<size_t*>(elemToArrayGpuCapacity->cpuData())[elem];
size_t desiredElemCount = elemToArraySize[elem];
resizeIfNecessaryGPU(
*arrayGpuDataArray, elem, gpuCapacity, desiredElemCount, typeInfo.arrayElemSize, platform.gpuD3dVk, platform.gpuD3dVkCtx);
}
// Upload of allocated pointers to GPU happens outside this function
}
}
// New state
usdValid = false;
cpuValid = false;
gpuValid = true;
gpuAllocedWithCuda = false;
log("end enableGpuWrite\n\n");
}
inline const void* PathToAttributesMap::getArrayRdD3d(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix)
{
BucketId bucketId = findBucketId(bucket);
if (bucketId == kInvalidBucketId)
return nullptr;
// We don't set dirty indices here because this method gives read-only access
return getArraySpanC(bucketId, attrName, D3dVkReadConfig(), suffix).array.ptr;
}
inline void* PathToAttributesMap::getArrayWrD3d(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix)
{
BucketId bucketId = findBucketId(bucket);
if (bucketId == kInvalidBucketId)
return nullptr;
ArrayAndDirtyIndices array = getArraySpanC(bucketId, attrName, D3dVkReadWriteConfig(), suffix);
setArrayDirty(array);
return array.array.ptr;
}
inline void* PathToAttributesMap::getArrayD3d(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix)
{
BucketId bucketId = findBucketId(bucket);
if (bucketId == kInvalidBucketId)
return nullptr;
ArrayAndDirtyIndices array = getArraySpanC(bucketId, attrName, D3dVkReadWriteConfig(), suffix);
setArrayDirty(array);
return array.array.ptr;
}
#if 0
// If array of values, return the Buffer* that was returned by malloc
// If array of arrays, return array of Buffer* for each element array
inline std::pair<void**, size_t> PathToAttributesMap::getArrayD3d(const Bucket& bucket, const TokenC& attrName)
{
std::pair<void**, size_t> retval = { nullptr, 0 };
BucketImpl& bucketImpl = buckets[bucket];
auto iter = bucketImpl.arrays.find({ pxr::TfType(), TypeC(), attrName });
bool found = (iter != bucketImpl.arrays.end());
if (found)
{
bool isTag = (typeToInfo[iter->first.type].size == 0);
if (!isTag)
{
pxr::TfType type = iter->first.type;
size_t elemSize = typeToInfo[type].size;
size_t arrayElemSize = typeToInfo[type].arrayElemSize;
// Read enable must come before write enable
enableD3dGpuRead(iter->second, elemSize, arrayElemSize);
enableD3dGpuWrite(iter->second, elemSize, arrayElemSize);
retval.first = iter->second.d3dArrays.data();
retval.second = iter->second.d3dArrays.size();
}
else if (isTag)
{
// If is a tag, then array.data() will be zero, so set special value
// to distinguish from tag absent case
retval.first = (void**)-1;
}
}
return retval;
}
#endif
inline omni::gpucompute::GpuPointer PathToAttributesMap::getAttributeD3d(const PathC& path, const TokenC& attrName)
{
bool present; // Whether this path has a bucket
BucketId bucketId; // Pointer to the bucket if it does
size_t element; // Index corresponding to path in this bucket's arrays
std::tie(present, bucketId, element) = getPresentAndBucketAndElement(path);
if (!present)
return { nullptr, 0, 0 };
// TODO: Get rid of double hash lookup below (getArraySpanC + explicit call)
ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(bucketId, attrName, D3dVkReadWriteConfig());
setArrayElementDirty(arrayAndDirtyIndices, element);
void* array = arrayAndDirtyIndices.array.ptr;
if (array != nullptr)
{
// Get elemSize
const BucketImpl* bucketImplPtr = buckets.find(bucketId);
if (!bucketImplPtr)
return { nullptr, 0, 0 };
const AttrName name{ attrName, NameSuffix::none };
const MirroredArray *valuesArray;
if (!bucketImplPtr->scalarAttributeArrays.find(name, &valuesArray))
{
const ArrayAttributeArray *arrayAttributeArray;
if (bucketImplPtr->arrayAttributeArrays.find(name, &arrayAttributeArray))
{
valuesArray = &arrayAttributeArray->values;
}
else
{
return { nullptr, 0, 0 };
}
}
assert(valuesArray);
const Typeinfo &typeinfo = getTypeInfo(valuesArray->type);
const bool isArrayOfArray = typeinfo.isArray;
const size_t elemSize = typeinfo.size;
if (!isArrayOfArray)
{
return { array, element * elemSize, elemSize };
}
else if (isArrayOfArray)
{
// For arrays of arrays we return the Buffer* of the inner array
uint8_t* const* elemToArrayData = (uint8_t* const*)array;
return { elemToArrayData[element], 0, 0 };
}
}
return { nullptr, 0, 0 };
}
}
}
| 10,667 |
C
| 37.652174 | 142 | 0.626043 |
omniverse-code/kit/fabric/include/carb/flatcache/PathToAttributesMap.h
|
// Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <carb/Defines.h>
#include <carb/flatcache/AttrNameAndType.h>
#include <carb/flatcache/IdTypes.h>
#include <carb/flatcache/ApiLogger.h>
#include <carb/flatcache/HashMap.h>
#include <carb/flatcache/IFlatcache.h>
#include <carb/flatcache/IPath.h>
#include <carb/flatcache/IToken.h>
#include <carb/flatcache/Ordered_Set.h>
#include <carb/flatcache/Platform.h>
#include <carb/flatcache/PrimChanges.h>
#include <carb/flatcache/Type.h>
#include <carb/logging/Log.h>
#include <carb/profiler/Profile.h>
#include <carb/thread/Mutex.h>
#include <omni/gpucompute/GpuCompute.h>
#include <fstream>
// The following is needed to include USD headers
#if defined(__GNUC__)
# pragma GCC diagnostic push
# pragma GCC diagnostic ignored "-Wdeprecated-declarations"
# pragma GCC diagnostic ignored "-Wunused-local-typedefs"
# pragma GCC diagnostic ignored "-Wunused-function"
// This suppresses deprecated header warnings, which is impossible with pragmas.
// Alternative is to specify -Wno-deprecated build option, but that disables other useful warnings too.
# ifdef __DEPRECATED
# define OMNI_USD_SUPPRESS_DEPRECATION_WARNINGS
# undef __DEPRECATED
# endif
#endif
#include <pxr/base/tf/type.h>
#include <pxr/usd/usd/timeCode.h>
// PathToAttributesMap doesn't depend on USD for tokens or paths
// However, it's useful to be able to see the USD text representation of tokens and
// paths when debugging. Set ENABLE_USD_DEBUGGING to 1 to enable that.
#if defined(_DEBUG)
# define ENABLE_USD_DEBUGGING 1
#else
# define ENABLE_USD_DEBUGGING 0
#endif
// TODO: Move this to some shared macro header if needed elsewhere
#if defined(_DEBUG)
#define VALIDATE_TRUE(X) CARB_ASSERT(X)
#else
#define VALIDATE_TRUE(X) X
#endif
#define PTAM_SIZE_TYPE size_t
#define PTAM_SIZE_TYPEC (flatcache::TypeC(carb::flatcache::Type(carb::flatcache::BaseDataType::eUInt64)))
static_assert(sizeof(PTAM_SIZE_TYPE) == sizeof(uint64_t), "Unexpected sizeof size_t");
#define PTAM_POINTER_TYPE void*
#define PTAM_POINTER_TYPEC (flatcache::TypeC(carb::flatcache::Type(carb::flatcache::BaseDataType::eUInt64)))
static_assert(sizeof(PTAM_POINTER_TYPE) == sizeof(uint64_t), "Unexpected sizeof void*");
// When we switch to CUDA async CPU<->GPU copies, we'll need to use pinned CPU
// memory for performance. However, the allocations themselves will be much
// slower. If you want to see how much slower, set USE_PINNED_MEMORY to 1.
// When we do switch, we should probably do a single allocation and sub
// allocate it ourselves. That way we'd only call cudaHostAlloc once.
#define USE_PINNED_MEMORY 0
// Set this to one to enable CARB profile zones for large bucket copies
#define PROFILE_LARGE_BUCKET_COPIES 0
// We plan to move TfToken and AssetPath construction to IToken.
// Until we do we have to depend on token.h, a USD header
#include "FlatCacheUSD.h"
#include <pxr/usd/sdf/pathTable.h> // 104 only - do not port this to 105+
// Enums are in their own file since they have no external dependencies
#include "Enums.h"
#include <pxr/base/tf/token.h>
#include <pxr/usd/sdf/path.h>
#include <algorithm>
#include <iostream>
#include <iterator>
#include <map>
#include <queue>
#include <set>
#include <unordered_map>
#include <utility>
using pxr::UsdTimeCode;
using Hierarchy = pxr::SdfPathTable<int>; // 104 only - do not port this to 105+
namespace carb
{
namespace flatcache
{
struct AttrName
{
TokenC name;
NameSuffix suffix;
bool operator<(const AttrName& other) const = delete;
bool operator==(const AttrName& other) const = delete;
};
// Use the same hash_combine as boost
template <class T>
static inline size_t hash_combine(std::size_t seed, const T& v)
{
std::hash<T> hasher;
seed ^= hasher(v) + 0x9e3779b9 + (seed << 6) + (seed >> 2);
return seed;
}
}
}
namespace carb
{
namespace flatcache
{
inline std::string toString(NameSuffix suffix)
{
if (suffix == NameSuffix::connection)
return "_connection";
else if (suffix == NameSuffix::none)
return "";
return "";
}
inline std::ostream& operator<<(std::ostream& s, const NameSuffix& nameSuffix)
{
s << toString(nameSuffix);
return s;
}
// FlatCache buckets UsdPrims according to their type and the UsdAttributes
// they have. For example, all the UsdGeomMeshes can go in one bucket, all the
// UsdSkelAnimations in another. The user can then quickly get a contiguous
// array of all the meshes, without having to traverse the whole stage.
//
// The name of a bucket is its set of attribute names (Tokens).
// As in the USD API, these tokens are the names used in USDA files, not C++
// type names like UsdGeomMesh.
//
// Example (using C++11 initializer lists to create the sets):
// Bucket meshes = { Token("Mesh") };
// Bucket skelAnimations = { Token("SkelAnimation") };
//
// For efficiency, set is an ordered c++ array, not std::set
using Bucket = set<AttrNameAndType>;
struct BucketAndId
{
const Bucket bucket;
const BucketId bucketId;
};
// Buckets store attribute values in contiguous arrays, and in C++ array
// indices are size_t
using ArrayIndex = size_t;
const ArrayIndex kInvalidArrayIndex = 0xffff'ffff'ffff'ffff;
// Invariants:
// I0: setOfChangedIndices = [0..N), if allIndicesChanged
// = changedIndices, otherwise
// I1: If setOfChangedIndices==[0..N) then allIndicesChanged=true and changedIndices = {}
// where N is defined by the caller
//
// In particular this means that changedIndices can't have size N
// because if all indices were changed, then changedIndices = {}, by I1
struct ChangedIndicesImpl
{
bool allIndicesChanged = true;
flatcache::set<ArrayIndex> changedIndices;
ChangedIndicesImpl(size_t N)
{
if (N == 0)
allIndicesChanged = true;
else
allIndicesChanged = false;
}
// Create the singleton set {index}
ChangedIndicesImpl(ArrayIndex index, size_t N)
{
if (index == 0 && N == 1)
{
// Maintain invariant I0
allIndicesChanged = true;
}
else
{
// Maintain invariant I0
changedIndices = { index };
allIndicesChanged = false;
}
}
void dirtyAll()
{
// Maintain invariant I1
allIndicesChanged = true;
changedIndices.clear();
}
void insert(size_t index, size_t N)
{
CARB_ASSERT(index < N);
// If all indices already changed, then inserting an index has no
// effect
if (allIndicesChanged)
return;
changedIndices.insert(index);
// Maintain invariant I1
if (changedIndices.size() == N)
{
dirtyAll();
}
}
void decrementN(size_t newN)
{
if (allIndicesChanged)
return;
changedIndices.erase(newN);
if (changedIndices.size() == newN)
allIndicesChanged = true;
}
void erase(size_t index, size_t N)
{
CARB_ASSERT(index < N);
if (allIndicesChanged)
{
allIndicesChanged = false;
// Make a sorted list of integers [0..N) \ index
changedIndices.v.resize(N - 1);
size_t dest = 0;
for (size_t i = 0; i != index; i++)
{
changedIndices.v[dest++] = i;
}
for (size_t i = index + 1; i != N; i++)
{
changedIndices.v[dest++] = i;
}
return;
}
changedIndices.erase(index);
}
bool contains(size_t index)
{
if (allIndicesChanged)
return true;
return changedIndices.contains(index);
}
};
struct ArrayAndDirtyIndices
{
SpanC array; // We use SpanC instead of gsl::span<const uint8_t> to allow casting to array of correct type
std::vector<ChangedIndicesImpl*> changedIndicesForEachListener; // This is empty if change tracking is not enabled for this attribute
};
// Bucket vectors and their attribute arrays are public, so users can iterate
// over them directly using for loops.
// For users that prefer opaque iterators, we provide View.
struct View;
// FlatCache doesn't need all metadata in UsdAttribute, just the attribute's
// type, size in bytes, whether it is an array, and if it is an array, the
// size of each elements in bytes
struct Typeinfo
{
size_t size;
bool isArray;
size_t arrayElemSize;
};
// FlatCache stores a map from attribute names (Tokens) to their type and
// size.
using TypeToInfo = HashMap<TypeC, Typeinfo, std::hash<TypeC>, std::equal_to<TypeC>, AllocFunctor, FreeFunctor>;
// By default, an attribute's value is not in the cache, and flags == eNone
// Once the user reads a value, ePresent is true
// Once the user writes a value, eDirty is true
enum class Flags
{
eNone = 0,
ePresent = 1,
eDirty = 2
};
// Operators for combining Flags
constexpr enum Flags operator|(const enum Flags a, const enum Flags b)
{
return (enum Flags)(uint32_t(a) | uint32_t(b));
}
constexpr enum Flags operator&(const enum Flags a, const enum Flags b)
{
return (enum Flags)(uint32_t(a) & uint32_t(b));
}
struct BucketChangesImpl
{
// Which attributes changed
gsl::span<const AttrNameAndType> changedAttributes;
// For each attribute, which prims changed?
std::vector<ConstChangedIndicesC> changedIndices;
gsl::span<const Path> pathArray;
// Which indices contain newly added prims?
gsl::span<const size_t> addedIndices;
};
struct PrimBucketListImpl
{
flatcache::set<BucketId> buckets;
std::vector<BucketChangesImpl> changes;
void clear()
{
buckets.clear();
changes.clear();
}
};
using SerializationCache = HashMap<uint64_t, std::string>;
using DeserializationCache = HashMap<std::string, pxr::SdfPath>;
// Now we've defined the basic types, we can define the type of FlatCache.
//
// Abstractly, FlatCache maps each Path to the UsdAttributes of the UsdPrim
// at that path. So the type of FlatCache is "PathToAttributesMap".
struct PathToAttributesMap
{
struct MirroredArray
{
private:
std::vector<uint8_t> cpuArray;
public:
Platform& platform;
TypeC type;
Typeinfo typeinfo;
uint8_t* gpuArray;
size_t gpuCapacity; // Amount of memory allocated at gpuArray in bytes
std::vector<void*> d3dArrays; // Actually vector of Buffer*
size_t count;
bool usdValid;
bool cpuValid;
bool gpuValid;
bool gpuAllocedWithCuda;
using AttributeMutex = carb::thread::mutex;
AttributeMutex attributeMutex;
MirroredArray(Platform& platform_, const TypeC &type, const Typeinfo& typeinfo) noexcept;
~MirroredArray();
MirroredArray(const MirroredArray& other) = delete;
MirroredArray& operator=(const MirroredArray& other) noexcept;
MirroredArray(MirroredArray&& other) noexcept;
MirroredArray& operator=(MirroredArray&& other) noexcept;
friend void swap(MirroredArray& a, MirroredArray& b) noexcept;
inline bool isArrayOfArray() const
{
CARB_ASSERT((typeinfo.arrayElemSize != 0) == typeinfo.isArray);
return typeinfo.isArray;
}
inline MirroredArray* getValuesArray()
{
return this;
}
void resize(size_t byteCount)
{
// CPU
// At the moment, CPU always resizes, but eventually it will only
// resize if it is allocated and valid
// This will ensure that GPU temp data is never allocated on CPU
cpuArray.resize(byteCount);
// Don't need to resize GPU here, because it is deferred until next
// copy to/from GPU mem
}
// GPU resize that preserves existing contents
// This is used by addPath and addAttributes
void resizeGpu(omni::gpucompute::GpuCompute* computeAPI,
omni::gpucompute::Context* computeCtx,
size_t byteCount,
size_t elemSize)
{
if (!computeAPI || !computeCtx)
return;
bool capacitySufficient = (byteCount <= gpuCapacity);
if (!capacitySufficient)
{
void* oldGpuArray = gpuArray;
size_t oldByteCount = gpuCapacity;
gpuArray = reinterpret_cast<uint8_t*>(computeAPI->mallocAsync(*computeCtx, byteCount, elemSize));
gpuCapacity = byteCount;
if (oldGpuArray)
{
using omni::gpucompute::MemcpyKind;
computeAPI->memcpyAsync(*computeCtx, gpuArray, oldGpuArray, oldByteCount, MemcpyKind::deviceToDevice);
computeAPI->freeAsync(*computeCtx, oldGpuArray);
}
}
}
size_t size() const
{
return cpuArray.size();
}
uint8_t* cpuData()
{
return cpuArray.data();
}
const uint8_t* cpuData() const
{
return cpuArray.data();
}
void clear()
{
cpuArray.clear();
}
};
using ScalarAttributeArray = MirroredArray;
struct ArrayAttributeArray
{
enum class MirroredArrays : uint8_t
{
Values,
ElemCounts,
CpuElemCounts,
GpuElemCounts,
GpuPtrs,
Count
};
ArrayAttributeArray(Platform& platform_, const TypeC& type, const Typeinfo &typeinfo) noexcept;
~ArrayAttributeArray();
ArrayAttributeArray(const ArrayAttributeArray& other) = delete;
ArrayAttributeArray& operator=(const ArrayAttributeArray& other) noexcept;
ArrayAttributeArray(ArrayAttributeArray&& other) noexcept;
ArrayAttributeArray& operator=(ArrayAttributeArray&& other) noexcept;
friend void swap(ArrayAttributeArray& a, ArrayAttributeArray& b) noexcept;
inline MirroredArray* getValuesArray()
{
return &values;
}
MirroredArray values;
MirroredArray elemCounts;
MirroredArray cpuElemCounts;
MirroredArray gpuElemCounts;
MirroredArray gpuPtrs;
};
// DO NOT generalize this static_assert using globally named defines for magic numbers.
// We intentionally sprinkle static_assert on hardcoded sizes around this file to increase friction when changing
// the struct definition. Any change to ArrayAttributeArray requires evaluating multiple locations that rely on
// keeping in sync with the struct. Having each of these be hardcoded comparions forces future authors to
// individually evaluate each dependent site for correctness. If the comparison is generalized, future authors could
// simply adjust the global definition without examining every dependent routine, which might lead to errors.
static_assert(sizeof(ArrayAttributeArray) == 5 * sizeof(MirroredArray), "ArrayAttributeArray has unexpected size");
struct Changes
{
// changedAttributes and changesIndices together implement an ordered
// map, from attribute to changed indices.
// changedAttributes is a flatcache::set, which is a sorted std::vector.
//
// To lookup an element of the map, find the index, i, of the key in
// changedAttributes, then read the value from changedIndices[i]
//
// changedAttributes and changedIndices must have the same size.
//
// TODO: make a general ordered_map class based on flatcache::set
flatcache::set<AttrNameAndType> changedAttributes;
std::vector<ChangedIndicesImpl> changedIndices;
//
// New elements are stored in a set
//
flatcache::set<ArrayIndex> addedIndices;
void setDirty(const AttrNameAndType& nameAndType, size_t index, size_t maxIndex)
{
auto& keys = changedAttributes.v;
auto& values = changedIndices;
auto insertIter = lower_bound(keys.begin(), keys.end(), nameAndType);
ptrdiff_t insertIndex = insertIter - keys.begin();
bool found = (insertIter != keys.end() && !(nameAndType < *insertIter));
if (found)
{
values[insertIndex].insert(index, maxIndex);
}
else
{
keys.insert(insertIter, nameAndType);
values.insert(values.begin() + insertIndex, ChangedIndicesImpl(index, maxIndex));
}
}
void dirtyAll(const AttrNameAndType& nameAndType, size_t maxIndex)
{
auto& keys = changedAttributes.v;
auto& values = changedIndices;
auto insertIter = lower_bound(keys.begin(), keys.end(), nameAndType);
ptrdiff_t insertIndex = insertIter - keys.begin();
bool found = (insertIter != keys.end() && !(nameAndType < *insertIter));
if (found)
{
values[insertIndex].dirtyAll();
}
else
{
keys.insert(insertIter, nameAndType);
ChangedIndicesImpl changedIndices(maxIndex);
changedIndices.dirtyAll();
values.insert(values.begin() + insertIndex, changedIndices);
}
}
void addNewPrim(size_t index)
{
addedIndices.insert(index);
}
void removePrim(size_t index)
{
//
// we just clean the index from the set
//
addedIndices.erase(index);
}
size_t getNewPrimCount()
{
return addedIndices.size();
}
};
// FlatCache buckets UsdPrims according to type and attributes, and
// BucketImpl stores the attribute values of a bucket's prims in
// structure-of-arrays (SOA) format.
// BucketImpl maps each attribute name (TokenC) to a MirroredArray, a
// contiguous byte array (vector<uint8_t>) and a bitfield encoding the
// validate/dirtiness of each mirror.
// Abstractly, flatcache data is addressed like a multidimensional array
// buckets[bucket][attributeName][path].
// FlatCache uses byte arrays instead of typed arrays, because USD files,
// scripts, and plugins can define custom types, so no dll or exe knows the
// complete set of types at the time of its compilation.
// BucketImpl also contains elemToPath to map each SOA element to the
// Path it came from.
struct BucketImpl
{
struct Hasher
{
size_t operator()(const AttrName& key) const
{
return hash_combine(hash(key.name), uint32_t(key.suffix));
}
};
struct KeyEqual
{
bool operator()(const AttrName& a, const AttrName& b) const
{
return (a.name == b.name) && (a.suffix == b.suffix);
}
};
using ScalarAttributeArrays = HashMap<AttrName, ScalarAttributeArray, Hasher, KeyEqual, AllocFunctor, FreeFunctor>;
using ArrayAttributeArrays = HashMap<AttrName, ArrayAttributeArray, Hasher, KeyEqual, AllocFunctor, FreeFunctor>;
Platform& platform;
ScalarAttributeArrays scalarAttributeArrays;
ArrayAttributeArrays arrayAttributeArrays;
std::vector<pxr::SdfPath> elemToPath;
// listenerIdToChanges entries are lazily created when the user changes
// an attribute, or when an attribute moves between buckets
HashMap<ListenerId, Changes, ListenerIdHasher, std::equal_to<ListenerId>, AllocFunctor, FreeFunctor> listenerIdToChanges;
template<typename CallbackT>
void forEachValueArray(CallbackT callback);
BucketImpl(Platform &platform_)
: platform(platform_)
, scalarAttributeArrays(0, Hasher(), KeyEqual(), AllocFunctor{ &platform.allocator }, FreeFunctor{ &platform.allocator })
, arrayAttributeArrays(0, Hasher(), KeyEqual(), AllocFunctor{ &platform.allocator }, FreeFunctor{ &platform.allocator })
, elemToPath()
, listenerIdToChanges(0, ListenerIdHasher(), std::equal_to<ListenerId>(), AllocFunctor{ &platform.allocator }, FreeFunctor{ &platform.allocator })
{
}
~BucketImpl()
{
#if PROFILE_LARGE_BUCKET_COPIES
size_t count = elemToPath.size();
carb::profiler::ZoneId zoneId = carb::profiler::kUnknownZoneId;
if (1000 <= count)
zoneId = CARB_PROFILE_BEGIN(1, "Destroy Bucket %zu", count);
arrays.clear();
elemToPath.clear();
if (1000 <= count)
CARB_PROFILE_END(1, zoneId);
#endif // #if PROFILE_LARGE_BUCKET_COPIES
}
BucketImpl(const BucketImpl&) = delete;
inline BucketImpl& operator=(const BucketImpl& other) noexcept
{
#if PROFILE_LARGE_BUCKET_COPIES
size_t count = other.elemToPath.size();
carb::profiler::ZoneId zoneId = carb::profiler::kUnknownZoneId;
if (1000 <= count)
zoneId = CARB_PROFILE_BEGIN(1, "Copy Bucket %zu", count);
#endif // #if PROFILE_LARGE_BUCKET_COPIES
scalarAttributeArrays.clear();
scalarAttributeArrays.reserve(other.scalarAttributeArrays.size());
other.scalarAttributeArrays.forEach([this](const AttrName& name, const ScalarAttributeArray &otherArray) {
// construct new array with the current BucketImpl platform, but mimicing type of otherArray
ScalarAttributeArray *array;
scalarAttributeArrays.allocateEntry(name, &array);
new (array) ScalarAttributeArray(platform, otherArray.type, otherArray.typeinfo);
*array = otherArray;
});
arrayAttributeArrays.clear();
arrayAttributeArrays.reserve(other.arrayAttributeArrays.size());
other.arrayAttributeArrays.forEach([this](const AttrName& name, const ArrayAttributeArray &otherArray) {
// construct new array with the current BucketImpl platform, but mimicing type of otherArray
ArrayAttributeArray *array;
arrayAttributeArrays.allocateEntry(name, &array);
new (array) ArrayAttributeArray(platform, otherArray.values.type, otherArray.values.typeinfo);
*array = otherArray;
});
elemToPath = other.elemToPath;
listenerIdToChanges.clear();
listenerIdToChanges.reserve(other.listenerIdToChanges.size());
other.listenerIdToChanges.forEach([this](const ListenerId& listener, const Changes &otherChanges) {
Changes* changes;
VALIDATE_TRUE(listenerIdToChanges.allocateEntry(listener, &changes));
static_assert(std::is_copy_constructible<Changes>::value, "Expected listenerIdToChanges values to be copy-constructible");
new (changes) Changes(otherChanges);
});
bucket = other.bucket;
#if PROFILE_LARGE_BUCKET_COPIES
if (1000 <= count)
CARB_PROFILE_END(1, zoneId);
#endif // #if PROFILE_LARGE_BUCKET_COPIES
return *this;
}
BucketImpl(BucketImpl&& other) noexcept = delete;
inline BucketImpl& operator=(BucketImpl&& other) noexcept
{
// this->platform = std::move(b.platform); // intentionally not move-assigning platform (we can't anyways, it's a ref)
this->scalarAttributeArrays = std::move(other.scalarAttributeArrays);
this->arrayAttributeArrays = std::move(other.arrayAttributeArrays);
this->elemToPath = std::move(other.elemToPath);
this->listenerIdToChanges = std::move(other.listenerIdToChanges);
this->bucket = std::move(other.bucket);
return *this;
}
const Bucket& GetBucket() const
{
return bucket;
}
//
// TODO: In the future we should support universal ref +
// move assignments, unforuntately Bucket doesn't follow
// the rule of 5 so that is unavailable to us currently.
//
void SetBucket(const Bucket& _bucket)
{
bucket = _bucket;
}
private:
//
// bucketImpl knows the Bucket it represents
//
Bucket bucket;
};
/**
* @struct BucketIdToImpl
*
* @brief Convenience data structure for quick bucket lookups
*
* @details We want to avoid the cost of hashmap lookups when possible
* due to the large number of times that elements are looked
* up via a single element lookup. This class creates a static vector
* to track buckets. It also will keep the array densely packed
* as possible, while not incurring the cost of moves
*
* @notes 1) NOT threadsafe
*
* @todo possibly store the last and first valid so one can avoid un-needed
* iteration
* @todo provide an iterator
* @todo make deleting buckets move lastFreeSlot back where approriate so that
* it is always meaningful as "end()"
*/
struct BucketIdToImpl
{
// A reasonable first size for the number of buckets
// If one changed this number, they would have to update
// the constants in the unit tests "Check Bucket Growth"
static const int max_buckets_init = 1024;
/**
* @brief Initialize storage to the minimum size
* We rely on C++ behavior that default initalizes the
* std::vector bool to false for valid tracking
*
*/
BucketIdToImpl(Platform& platform)
: platform(platform)
, buckets(max_buckets_init)
, lastEmptySlot{ 0 }
{
}
~BucketIdToImpl()
{
clear();
}
/**
* @brief mimic emplace function for stl objects
*
* @details This allows us to do move of buckets after they have been
* created into the storage. We choose for backwards compatibility
* with other APS to return a pair. It takes care of the correct place
* to store the bucket for you to keep the storage as dense as possible.
* This claims the next bucket, and will move out of
* @param bucketImpl a new bucket to be added to the Storage, empty after function call
* due to move.
*
* @return <BucketId, BucketImpl&> the pair representing the Id for lookup of the bucket
* and a reference to the data since it was moved from param
*
*/
std::pair<BucketId, BucketImpl&> emplace(BucketImpl* bucketImpl)
{
CARB_ASSERT(bucketImpl);
BucketId bucketId = ClaimNextOpenBucket();
if (buckets[size_t(bucketId)])
{
platform.allocator.delete_(buckets[size_t(bucketId)]);
}
buckets[size_t(bucketId)] = std::move(bucketImpl);
return std::pair<BucketId, BucketImpl&>(bucketId, *buckets[(size_t)bucketId]);
}
/**
* @brief Erase the specified bucket
*
* @details This actually forces deletion of the object that is to be deleted it adds the
* id to the list of free buckets so that it will be recycled before more are
* added to the end
*
* @param id : The id of the bucket to be deleted
*/
void erase(BucketId id)
{
if (size_t(id) < buckets.size() && buckets[size_t(id)])
{
// Ignoring optimization of if last slot is empty
freeSlots.push(id);
platform.allocator.delete_(buckets[size_t(id)]);
buckets[size_t(id)] = nullptr;
}
}
// Find the bucket at the requested slot, if no bucket exists
// then we return
/**
* @brief Find the bucket if it exists
*
* @param id : The id of the bucket to be found
*
* @return If the bucket exists a pointer is return, otherwise a null pointer is returned
*/
BucketImpl* find(BucketId id)
{
if (size_t(id)< buckets.size())
return buckets[size_t(id)];
return nullptr;
}
/**
* @brief Find the bucket if it exists (const)
*
* @param id : The id of the bucket to be found
*
* @return If the bucket exists a cosnt pointer is return, otherwise a null pointer is returned
*/
const BucketImpl* find(BucketId slot) const
{
if (size_t(slot) < buckets.size())
return buckets[size_t(slot)];
return nullptr;
}
/**
* @brief Clear all the buckets
*
* @details This will force deletion of all the nodes and make the storage appear empty
*
*/
void clear()
{
for (uint64_t i = 0; i < size_t(lastEmptySlot); ++i)
{
if (buckets[i])
{
platform.allocator.delete_(buckets[i]);
buckets[i] = nullptr;
}
}
// no clear in std::queue so we swap with a new one
std::queue<BucketId>().swap(freeSlots);
lastEmptySlot.id = 0;
}
/**
* @brief get the possible end of the storage
*
* @details This will return the last possible id for a bucket, however
* should be combined with valid to be carefule
*
* @return The last "allocated bucket" but could not be valid
*/
size_t end() const
{
return size_t(lastEmptySlot);
}
/**
* @brief Support copy assignment.
*
* @note In order for this to be a valid copy it must be followed up
* by a call to PathToAttributesMap::BucketImplCopyArrays to
* correctly copy array-of-array data.
*
* @return Copy constructed buckets, without array-of-arrays set up
*/
BucketIdToImpl& operator=(const BucketIdToImpl& other)
{
// Array of free slots
this->freeSlots = other.freeSlots;
// Track the last empty slot
this->lastEmptySlot = other.lastEmptySlot;
//
// A bucketImpl is a struct that mainly contains a map
// to arrays which are of the data type MirroredArray.
// A MirroredArray has two states
// (1) It contains an array of data
// (2) It contains an array of arrays.
// In the case of (2) the array itself doesn't have enough
// information to make a copy of the array of arrays, so the
// copy constructor is overloaded and the structure around the
// array of arrays is loaded, but the actual copying of that data is
// pushed off to be done by the function BucketImplCopyArrays
// which is a member of PathToAttributesMap which is the only place
// that currently has enough information to make the copy
//
this->buckets.resize(other.buckets.size());
for (size_t i = 0; i < this->buckets.size(); ++i)
{
if (other.buckets[i])
{
if (!this->buckets[i])
{
this->buckets[i] = platform.allocator.new_<BucketImpl>(platform);
}
*this->buckets[i] = *other.buckets[i];
}
else if (this->buckets[i])
{
platform.allocator.delete_(this->buckets[i]);
this->buckets[i] = nullptr;
}
}
return *this;
}
/**
* @brief Resize the internals
*
* @details Note that resize will only grow, calling with a smaller size is a no-op
*
*/
void resize(size_t newSize)
{
if (newSize > buckets.size()) {
buckets.resize(newSize);
}
}
/**
* @brief Claim a bucket by index, this means that the pointer will be
* returned regardless of valid, and that it will mark as valid
* and update internals where needed. In the case where an index is
* requested that is past the allocated then more memory is allocated
*
* @note This should be used sparingly, generally it is intended to be used
* in the case where we are re-constructing one flat cache from another,
* if things are done out of order it could be expensize, also it is assumed
* all external mappings are maintained by the claimer
*
* @param id : The id of the bucket to be claimed
*
* @return A reference to the bucketImpl that was claimed
*/
BucketImpl& claim(BucketId id)
{
// grow if needed
if (size_t(id)>= buckets.size()) {
resize(size_t(id)+ 1);
}
// if the bucket is already valid we can just return access to it
if (!buckets[size_t(id)])
{
// otherwise we need to update accordingly
while (lastEmptySlot <= id) {
if (!buckets[size_t(lastEmptySlot)]) {
freeSlots.push(lastEmptySlot);
}
++lastEmptySlot;
}
buckets[size_t(id)] = platform.allocator.new_<BucketImpl>(platform);
}
CARB_ASSERT(buckets[size_t(id)]);
return *buckets[size_t(id)];
}
template<typename CallbackT>
void forEachValidBucket(CallbackT callback) const;
private:
/**
* @brief Get the next open bucket
*
* @note May invalidate references to existing buckets
*
*/
BucketId ClaimNextOpenBucket()
{
BucketId slot = lastEmptySlot;
if (freeSlots.size() != 0)
{
slot = freeSlots.front();
freeSlots.pop();
}
else
{
++lastEmptySlot;
if (size_t(lastEmptySlot) == buckets.size())
{
std::vector<BucketImpl*> newVector;
newVector.resize(buckets.size() * 2);
//
// A bucketImpl is a struct that mainly contains a map
// to arrays which are of the data type MirroredArray.
// A MirroredArray has two states
// (1) It contains an array of data
// (2) It contains an array of arrays.
// In the case of (2) the array itself doesn't have enough
// information to make a copy of the array of arrays, so the
// copy constructor is overloaded and the structure around the
// array of arrays is loaded, but the actual copying of that data is
// pushed off to be done by the function BucketImplCopyArrays
// which is a member of PathToAttributesMap which is the only place
// that currently has enough information to make the copy
//
// Since we cannot guarantee that someone will know to call
// the copyArrays function we enforce that data must be moved here.
//
for (size_t i = 0; i < buckets.size(); ++i)
{
newVector[i] = std::move(buckets[i]);
}
std::swap(newVector, buckets);
}
}
CARB_ASSERT(!buckets[size_t(slot)]);
buckets[size_t(slot)] = platform.allocator.new_<BucketImpl>(platform);
return slot;
}
Platform& platform;
// array of bucket impls
std::vector<BucketImpl*> buckets;
// Array of free slots
std::queue<BucketId> freeSlots;
// Track the last empty slot
BucketId lastEmptySlot;
};
// Internally we convert Paths to uint64_t path ids using asInt().
// PathId is the domain of pathToBucketElem, defined below.
using PathId = PathC;
Platform& platform;
// Concretely, FlatCache is the following three maps:
// 1) Each path maps to a bucket, and an SOA index within that bucket.
// This level of indirection allows the user to delete prims and/or
// attributes without creating holes in the SOAs. Whenever the user
// deletes a prim, the cache moves the last SOA element to the deleted
// element, and updates the path to element map of the moved SOA element.
// 2) Buckets (sets of attribute names) map to BucketImpls, defined above.
// This allows the user to quickly get e.g. arrays of all the meshes,
// or all the meshes that have rigid body attributes.
// 3) pxr::TfType names map to TypeInfos, containing attribute type and size in bytes.
HashMap<PathId, std::pair<BucketId, ArrayIndex>, std::hash<PathId>, std::equal_to<PathId>, AllocFunctor, FreeFunctor> pathToBucketElem;
BucketIdToImpl buckets;
std::map<Bucket, BucketId> attrNameSetToBucketId;
// Each listener has its own attrNamesToLog and enableChangeTracking
struct ChangeTrackerConfig
{
set<TokenC> attrNamesToLog;
bool changeTrackingEnabled = true;
};
HashMap<ListenerId, ChangeTrackerConfig, ListenerIdHasher, std::equal_to<ListenerId>, AllocFunctor, FreeFunctor> listenerIdToChangeTrackerConfig;
TypeToInfo typeToInfo;
UsdStageId usdStageId;
bool minimalPopulationDone = false; // 104 only - do not port this forward to 105+
Hierarchy stageHierarchy; // 104 only - do not port this forward to 105+
mutable bool apiLogEnabled = false;
// The rest of PathToAttributesMap is methods
size_t size();
void clear();
void printMirroredArray(const char* const label, const ScalarAttributeArray &array, const size_t* const arrayElemCount) const;
void print() const;
// Void* multiple attribute interface
void getArraysRdC(const void** attrsOut, const Bucket& bucket, const TokenC* attrNames, size_t attrCount);
void getAttributesRdC(const void** attrsOut, const PathC* paths, const TokenC* attrNames, size_t attrCount);
void getAttributesRdGpuC(const void** attrsOut, const PathC* paths, const TokenC* attrNames, size_t attrCount, PtrToPtrKind ptrToPtrKind);
void getArraysWrC(void** attrsOut, const Bucket& bucket, const TokenC* attrNames, size_t attrCount);
void getAttributesWrC(void** attrsOut, const PathC& path, const TokenC* attrNames, size_t attrCount);
void getAttributesWrGpuC(void** attrsOut, const PathC& path, const TokenC* attrNames, size_t attrCount, PtrToPtrKind ptrToPtrKind);
// Span interface
SpanC getArraySpanC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix = NameSuffix::none);
ConstSpanC getArraySpanRdC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix = NameSuffix::none);
SpanC getArraySpanWrC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix = NameSuffix::none);
// Void* interface
void addAttributeC(
const PathC& path, const TokenC& attrName, TypeC type, const void* value = nullptr);
void addArrayAttributeC(
const PathC& path, const TokenC& attrName, TypeC type, const void* value, const size_t arrayElemCount);
void addAttributesToPrim(
const PathC& path, const std::vector<TokenC>& attrNames, const std::vector<TypeC>& typeCs);
void addAttributeC(const PathC& path,
const TokenC& attrName,
NameSuffix suffix,
TypeC type,
const void* value = nullptr);
void addArrayAttributeC(const PathC& path,
const TokenC& attrName,
NameSuffix suffix,
TypeC type,
const void* value,
const size_t arrayElemCount);
void* getArrayC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix = NameSuffix::none);
SpanC getAttributeC(const PathC& path, const TokenC& attrName, NameSuffix suffix = NameSuffix::none);
const void* getArrayRdC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix = NameSuffix::none);
ConstSpanC getAttributeRdC(const PathC& path, const TokenC& attrName, NameSuffix suffix = NameSuffix::none);
void* getArrayWrC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix = NameSuffix::none);
SpanC getAttributeWrC(const PathC& path, const TokenC& attrName, NameSuffix suffix = NameSuffix::none);
SpanC getOrCreateAttributeWrC(const PathC& path, const TokenC& attrName, TypeC type);
// Type safe interface
template <typename T>
void addAttribute(const PathC& path, const TokenC& attrName, TypeC type, const T& value);
template <typename T>
void addSubAttribute(const PathC& path, const TokenC& attrName, NameSuffix suffix, TypeC type, const T& value);
template <typename T>
T* getArray(const Bucket& bucket, const TokenC& attrName);
template <typename T>
T* getAttribute(const PathC& path, const TokenC& attrName);
template <typename T>
const T* getArrayRd(const Bucket& bucket, const TokenC& attrName);
template <typename T>
const T* getArrayRd(BucketId bucketId, const TokenC& attrName);
template <typename T>
const T* getAttributeRd(const PathC& path, const TokenC& attrName);
template <typename T>
T* getArrayWr(const Bucket& bucket, const TokenC& attrName);
template <typename T>
T* getAttributeWr(const PathC& path, const TokenC& attrName);
template <typename T>
T* getAttributeWr(const PathC& path, const TokenC& attrName, NameSuffix suffix);
void removeAttribute(const PathC& path, const TokenC& attrName);
void removeSubAttribute(const PathC& path, const TokenC& attrName, NameSuffix suffix);
/** @brief Destroy all attributes with matching names from prim at given path
*
* @path[in] Path to the prim holding the attribute
* @attrNames[in] Attribute name array
*
*/
void removeAttributesFromPath(const PathC& path, const std::vector<TokenC>& attrNames);
ValidMirrors getAttributeValidBits(
const PathC& path,
const TokenC& attrName,
ArrayAttributeArray::MirroredArrays subArray = ArrayAttributeArray::MirroredArrays::Values) const;
// Accessors for element count of array attributes
size_t* getArrayAttributeSize(const PathC& path, const TokenC& attrName);
const size_t* getArrayAttributeSizeRd(const PathC& path, const TokenC& attrName);
size_t* getArrayAttributeSizeWr(const PathC& path, const TokenC& attrName);
size_t* getArrayAttributeSizes(const Bucket& bucket, const TokenC& attrName);
const size_t* getArrayAttributeSizesRd(const Bucket& bucket, const TokenC& attrName);
size_t* getArrayAttributeSizesWr(const Bucket& bucket, const TokenC& attrName);
SpanC setArrayAttributeSizeAndGet(PathC path, const TokenC& attrName, size_t newSize);
SpanC setArrayAttributeSizeAndGet(BucketId bucketId, size_t elementIndex, const TokenC& attrName, size_t newSize);
// GPU can currently read, but not write, size of arrays
// This is because writing causes array to resize, and that's not currently supported on GPU
const size_t* getArrayAttributeSizeRdGpu(const PathC& path, const TokenC& attrName);
const size_t* getArrayAttributeSizesRdGpu(const Bucket& bucket, const TokenC& attrName);
// Void* CUDA GPU interface
SpanC getAttributeGpuC(const PathC& path, const TokenC& attrName, PtrToPtrKind ptrToPtrKind = PtrToPtrKind::eNotApplicable, NameSuffix suffix = NameSuffix::none);
ConstSpanC getAttributeRdGpuC(const PathC& path, const TokenC& attrName, PtrToPtrKind ptrToPtrKind = PtrToPtrKind::eNotApplicable, NameSuffix suffix = NameSuffix::none);
SpanC getAttributeWrGpuC(const PathC& path, const TokenC& attrName, PtrToPtrKind ptrToPtrKind = PtrToPtrKind::eNotApplicable, NameSuffix suffix = NameSuffix::none);
void* getArrayGpuC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix = NameSuffix::none);
const void* getArrayRdGpuC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix = NameSuffix::none);
void* getArrayWrGpuC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix = NameSuffix::none);
// Span CUDA GPU interface
SpanC getArraySpanGpuC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix = NameSuffix::none);
ConstSpanC getArraySpanRdGpuC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix = NameSuffix::none);
SpanC getArraySpanWrGpuC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix = NameSuffix::none);
// Type safe CUDA GPU interface
template <typename T>
T* getAttributeGpu(const PathC& path, const TokenC& attrName, PtrToPtrKind ptrToPtrKind = PtrToPtrKind::eNotApplicable);
template <typename T>
const T* getAttributeRdGpu(const PathC& path, const TokenC& attrName, PtrToPtrKind ptrToPtrKind = PtrToPtrKind::eNotApplicable);
template <typename T>
T* getAttributeWrGpu(const PathC& path, const TokenC& attrName, PtrToPtrKind ptrToPtrKind = PtrToPtrKind::eNotApplicable);
template <typename T>
T* getArrayGpu(const Bucket& bucket, const TokenC& attrName);
template <typename T>
const T* getArrayRdGpu(const Bucket& bucket, const TokenC& attrName);
template <typename T>
T* getArrayWrGpu(const Bucket& bucket, const TokenC& attrName);
// D3D GPU interface
omni::gpucompute::GpuPointer getAttributeD3d(const PathC& path, const TokenC& attrName);
void* getArrayD3d(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix = NameSuffix::none);
const void* getArrayRdD3d(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix = NameSuffix::none);
void* getArrayWrD3d(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix = NameSuffix::none);
// PathC methods
void addPath(const PathC& path, const Bucket& bucket = {});
void renamePath(const PathC& oldPath, const PathC& newPath);
set<AttrNameAndType> getTypes(const PathC& path) const;
size_t getAttributeCount(const PathC& path) const;
TypeC getType(const PathC& path, const TokenC& attrName) const;
void removePath(const PathC& path);
size_t count(const PathC& path) const;
size_t count(const PathC& path, const TokenC& attrName) const;
// Type methods
void addType(TypeC type, Typeinfo typeInfo);
Typeinfo getTypeInfo(TypeC type) const;
// Bucket methods
BucketId addBucket(const Bucket& bucket);
void addAttributeC(
const Bucket& bucket, const TokenC& attrName, TypeC type, const void* value = nullptr);
void addArrayAttributeC(const Bucket& bucket, const TokenC& attrName, TypeC type, const void* value, const size_t arrayElemCount);
template <typename T>
void addAttribute(const Bucket& bucket, const TokenC& attrName, TypeC type, const T& value);
void removeAttributeC(const Bucket& bucket, const TokenC& attrName, TypeC type);
void printBucket(const Bucket& bucket) const;
void printBucketName(const Bucket& bucketTypes, BucketId bucketId) const;
void printBucketNames() const;
void printBucketNamesAndTypes() const;
flatcache::set<BucketId> findBuckets(const set<AttrNameAndType>& all,
const set<AttrNameAndType>& any = {},
const set<AttrNameAndType>& none = {}) const;
View getView(const set<AttrNameAndType>& inc, const set<AttrNameAndType>& exc = {});
size_t getElementCount(const Bucket& bucket) const;
const PathC* getPathArray(const Bucket& bucket) const;
TypeC getType(const Bucket& bucket, const TokenC& attrName) const;
/** @brief Destroy all attributes with matching names from a given Bucket - array version of removeAttributeC
*
* @bucket[in] Bucket to remove attributes from
* @attrNames[in] Attribute name array
*
*/
void removeAttributesFromBucket(const Bucket& bucket, const std::vector<TokenC>& attrNames);
// BucketId methods
size_t getElementCount(BucketId bucketId) const;
SpanC getArrayC(BucketId bucketId, const TokenC& attrName, NameSuffix suffix = NameSuffix::none);
ConstSpanC getArrayRdC(BucketId bucketId, const TokenC& attrName, NameSuffix suffix = NameSuffix::none);
SpanC getArrayWrC(BucketId bucketId, const TokenC& attrName, NameSuffix suffix = NameSuffix::none);
SpanC getOrCreateArrayWrC(
BucketId bucketId, const TokenC& attrName, TypeC type, NameSuffix suffix = NameSuffix::none);
SpanC getArrayGpuC(BucketId bucketId, const TokenC& attrName, NameSuffix suffix = NameSuffix::none);
ConstSpanC getArrayRdGpuC(BucketId bucketId, const TokenC& attrName, NameSuffix suffix = NameSuffix::none);
SpanC getArrayWrGpuC(BucketId bucketId, const TokenC& attrName, NameSuffix suffix = NameSuffix::none);
BucketId getBucketId(const PathC& path) const;
ArrayPointersAndSizesC getArrayAttributeArrayWithSizes(BucketId bucketId, const TokenC& attrName);
ConstArrayPointersAndSizesC getArrayAttributeArrayWithSizesRd(BucketId bucketId, const TokenC& attrName);
ArrayPointersAndSizesC getArrayAttributeArrayWithSizesWr(BucketId bucketId, const TokenC& attrName);
SpanSizeC getArrayAttributeSizes(BucketId bucketId, const TokenC& attrName);
ConstSpanSizeC getArrayAttributeSizesRd(BucketId bucketId, const TokenC& attrName);
ConstSpanSizeC getArrayAttributeSizesRdGpu(BucketId bucketId, const TokenC& attrName);
SpanSizeC getArrayAttributeSizesWr(BucketId bucketId, const TokenC& attrName);
ConstPathCSpan getPathArray(BucketId bucketId) const;
Bucket getNamesAndTypes(BucketId bucketId) const;
// std::vector<Bucket> methods
std::vector<size_t> getElementCounts(const std::vector<Bucket>& buckets) const;
template <typename T>
std::vector<const T*> getArraysRd(const std::vector<Bucket>& buckets, const TokenC& attrName);
template <typename T>
std::vector<T*> getArraysWr(const std::vector<Bucket>& buckets, const TokenC& attrName);
template <typename T>
std::vector<T*> getArrays(const std::vector<Bucket>& buckets, const TokenC& attrName);
// BucketImpl methods
size_t getElementCount(const BucketImpl& bucketImpl) const;
// Allow default construction
PathToAttributesMap(const PlatformId& platformId = PlatformId::Global);
// Disallow copying
PathToAttributesMap(const PathToAttributesMap&) = delete;
// Allow copy assignment. This is used by StageWithHistory
PathToAttributesMap& operator=(const PathToAttributesMap&);
// Allow move construction and assignment
PathToAttributesMap(PathToAttributesMap&& other) noexcept = default;
PathToAttributesMap& operator=(PathToAttributesMap&& other) noexcept = default;
~PathToAttributesMap();
// Methods that are currently used in flatcache.cpp
// TODO: Make private
struct ArrayOfArrayInfo
{
// For each array, the element count requested by the user
size_t* arraySizeArray;
// For each array, the element count allocated on the CPU
MirroredArray* arrayCpuCapacityArray;
// For each array, the element count allocated on the GPU
MirroredArray* arrayGpuCapacityArray;
// For each array, the GPU data
MirroredArray* arrayGpuPtrArray;
};
struct ConstArrayOfArrayInfo
{
// For each array, the element count requested by the user
const MirroredArray * arraySizeArray;
// For each array, the element count allocated on the CPU
const MirroredArray* arrayCpuCapacityArray;
// For each array, the element count allocated on the GPU
const MirroredArray* arrayGpuCapacityArray;
// For each array, the GPU data
const MirroredArray* arrayGpuPtrArray;
};
// enableCpuWrite() is used in flatcache.cpp, so needs to be public
// TODO: move that code from there to here
void enableCpuWrite(MirroredArray& array,
const size_t* elemToArraySize,
MirroredArray* elemToArrayCpuCapacity,
MirroredArray* elemToArrayGpuCapacity,
MirroredArray* elemToArrayGpuData);
// getArrayOfArrayInfo() is used in flatcache.cpp, so needs to be public
// TODO: move that code from there to here
ArrayOfArrayInfo getArrayOfArrayInfo(Typeinfo typeInfo, BucketImpl& bucketImpl, TokenC attrName);
ArrayOfArrayInfo getArrayOfArrayInfo(ArrayAttributeArray &arrayAttributeArray);
ConstArrayOfArrayInfo getArrayOfArrayInfo(Typeinfo typeInfo, const BucketImpl& bucketImpl, TokenC attrName) const;
ConstArrayOfArrayInfo getArrayOfArrayInfo(const ArrayAttributeArray &arrayAttributeArray) const;
void bucketImplCopyScalarAttributeArray(ScalarAttributeArray &dest, const ScalarAttributeArray &src);
void bucketImplCopyArrayAttributeArray(BucketImpl& destBucketImpl, const AttrName& destName, ArrayAttributeArray &dest, const ArrayAttributeArray &src);
void bucketImplCopyArrays(BucketImpl& destBucketImpl,
BucketId destBucketId,
const BucketImpl& srcBucketImpl,
BucketId srcBucketId,
const carb::flatcache::set<AttrNameAndType>& attrFilter = {});
// Serialization
struct Serializer
{
uint8_t *p;
uint8_t *buf;
uint8_t *end;
uint64_t bytesWritten; // increments even if attempts are made to write past end
bool overflowed;
void init(uint8_t *const _buf, uint8_t *const end);
bool writeBytes(const uint8_t *const src, uint64_t size);
bool writeString(const char* const s, const size_t len);
bool writeString(const std::string &s);
template<typename T>
bool write(const T &t);
};
struct Deserializer
{
const uint8_t *p;
const uint8_t *buf;
const uint8_t *end;
uint64_t bytesRead; // increments even if attempts are made to read past end
bool overflowed;
void init(const uint8_t *const _buf, const uint8_t *const end);
bool readBytes(uint8_t *const dst, uint64_t size);
bool readString(std::string &s);
template<typename T>
bool read(T &t);
};
uint64_t serializeScalarAttributeArray(BucketImpl& srcBucketImpl, const BucketId& srcBucketId, const AttrName& srcName, ScalarAttributeArray& srcScalarAttributeArray, Serializer &out);
bool deserializeScalarAttributeArray(BucketImpl& destBucketImpl, const BucketId& destBucketId, Deserializer &in);
uint64_t serializeArrayAttributeArray(BucketImpl& srcBucketImpl, const BucketId& srcBucketId, const AttrName& srcName, ArrayAttributeArray& srcArrayAttributeArray, Serializer &out);
bool deserializeArrayAttributeArray(BucketImpl& destBucketImpl, const BucketId& destBucketId, Deserializer &in);
PrimBucketListImpl getChanges(ListenerId listener);
void popChanges(ListenerId listener);
private:
// Device is used by getArrayC
enum class Device
{
eCPU = 0,
eCudaGPU = 1,
eD3dVkGPU = 2
};
static inline constexpr ArrayOfArrayInfo ScalarArrayOfArrayInfo()
{
return ArrayOfArrayInfo{ nullptr, nullptr, nullptr, nullptr };
}
// TODO: Now that EnableReadFn and EnableWriteFn can have the same type, should they just be one alias?
using EnableReadFn = void (PathToAttributesMap::*)(PathToAttributesMap::MirroredArray& array,
const size_t* elemToArraySize,
PathToAttributesMap::MirroredArray* elemToArrayCpuCapacity,
PathToAttributesMap::MirroredArray* elemToArrayGpuCapacity,
PathToAttributesMap::MirroredArray* elemToArrayGpuData);
using EnableWriteFn = void (PathToAttributesMap::*)(PathToAttributesMap::MirroredArray& array,
const size_t* elemToArraySize,
PathToAttributesMap::MirroredArray* elemToArrayCpuCapacity,
PathToAttributesMap::MirroredArray* elemToArrayGpuCapacity,
PathToAttributesMap::MirroredArray* elemToArrayGpuData);
struct IOConfig
{
EnableReadFn enableRead;
EnableWriteFn enableWrite;
EnableReadFn enableRdPtrForWrite;
Device device;
PtrToPtrKind ptrToPtrKind;
inline IOConfig& withEnableRead(EnableReadFn _enableRead)
{
enableRead = _enableRead;
return *this;
}
inline IOConfig& withEnableWrite(EnableWriteFn _enableWrite)
{
enableWrite = _enableWrite;
return *this;
}
inline IOConfig& withEnableRdPtrForWrite(EnableReadFn _enableRdPtrForWrite)
{
enableRdPtrForWrite = _enableRdPtrForWrite;
return *this;
}
inline IOConfig& withDevice(Device _device)
{
device = _device;
return *this;
}
inline IOConfig& withPtrToPtrKind(PtrToPtrKind _ptrToPtrKind)
{
ptrToPtrKind = _ptrToPtrKind;
return *this;
}
};
void serializeMirroredArrayMetadata(const AttrName& srcName, MirroredArray &srcValuesArray, Serializer &out);
template<typename ArraysT, typename ArraysMapT>
void deserializeMirroredArrayMetadata(Platform& platform, ArraysMapT& arraysMap, AttrName &destName, Typeinfo *&typeInfo, ArraysT *&destArray, Deserializer &in);
BucketImpl& addAttributeInternal(BucketImpl& prevBucketImpl, const Bucket& bucket, const TokenC& attrName, const TypeC ctype, const void* value, const Typeinfo& typeinfo, const size_t arrayElemCount);
void fillAttributeInternal(BucketImpl& bucketImpl, const AttrName& name, const size_t startIndex, const size_t endIndex, const void* value, const Typeinfo& typeinfo, const size_t arrayElemCount, MirroredArray *const valuesArray, ArrayAttributeArray *const arrayAttributeArray);
void addAttributeInternal(const PathC& path, const TokenC& attrNameC, const NameSuffix nameSuffix, const TypeC ctype, const void* value, const Typeinfo& typeinfo, const size_t arrayElemCount);
bool findArrayAttributeArrayForPath(const PathC& path,
const TokenC& attrName,
size_t& outElementIndex,
BucketImpl*& outBucketImpl,
ArrayAttributeArray*& outArrayAttributeArray);
bool findArrayAttributeArrayForBucketId(const BucketId bucketId,
const TokenC& attrName,
BucketImpl*& outBucketImpl,
ArrayAttributeArray*& outArrayAttributeArray);
void allocElement(ScalarAttributeArray &scalar);
void allocElement(ArrayAttributeArray &vector);
size_t allocElement(BucketImpl& bucketImpl);
void allocElementForMove(BucketImpl& srcBucketImpl, const ArrayOfArrayInfo &srcAoaInfo, const AttrName& name, MirroredArray &destArray, MirroredArray *const srcArray);
size_t allocElementForMove(BucketImpl& destBucketImpl, BucketImpl& srcBucketImpl, const PathC& path);
void addElementToTrackers(size_t elemIndex, BucketImpl& bucketImpl);
void makeSrcValidIfDestValid(MirroredArray& srcArray,
BucketImpl& srcBucketImpl,
const ArrayOfArrayInfo& srcAoaInfo,
const MirroredArray& destArray,
const AttrName& name);
void moveElementBetweenBuckets(const PathC& path, BucketId destBucketId, BucketId srcBucketId, const Bucket& destBucket);
void moveElementScalarData(ScalarAttributeArray &destArray, const size_t destElemIndex, const ScalarAttributeArray &srcArray, const size_t srcElemIndex);
void moveElementArrayData(ArrayAttributeArray &destArray, const size_t destElemIndex, const ArrayAttributeArray &srcArray, const size_t srcElemIndex);
void moveElement(BucketImpl& destBucket, size_t destElemIndex, BucketImpl& srcBucket, size_t srcElemIndex);
void destroyElement(BucketId bucketId, size_t elemIndex, bool destroyDataPointedTo);
ArrayAndDirtyIndices getArraySpanC(BucketId bucketId, TokenC attrName, const IOConfig &io, NameSuffix suffix = NameSuffix::none);
ArrayAndDirtyIndices getArraySpanC(MirroredArray& array, const AttrName& name, const ArrayOfArrayInfo& aoa, BucketImpl& bucketImpl, const IOConfig &io);
void enableCpuReadImpl(MirroredArray& array,
const size_t* elemToArraySize,
MirroredArray* elemToArrayCpuCapacity,
MirroredArray* elemToArrayGpuCapacity,
MirroredArray* elemToArrayGpuData,
bool printWarnings = true);
void enableCpuRead(MirroredArray& array,
const size_t* elemToArraySize,
MirroredArray* elemToArrayCpuCapacity,
MirroredArray* elemToArrayGpuCapacity,
MirroredArray* elemToArrayGpuData);
void enableCpuReadIfValid(MirroredArray& array,
const size_t* elemToArraySize,
MirroredArray* elemToArrayCpuCapacity,
MirroredArray* elemToArrayGpuCapacity,
MirroredArray* elemToArrayGpuData);
void enableGpuRead(MirroredArray& array,
const size_t* elemToArraySize,
MirroredArray* elemToArrayCpuCapacity,
MirroredArray* elemToArrayGpuCapacity,
MirroredArray* elemToArrayGpuData);
void enableGpuWrite(MirroredArray& array,
const size_t* elemToArraySize,
MirroredArray* elemToArrayCpuCapacity,
MirroredArray* elemToArrayGpuCapacity,
MirroredArray* elemToArrayGpuData);
void enableD3dGpuRead(MirroredArray& array,
const size_t* elemToArraySize,
MirroredArray* elemToArrayCpuCapacity,
MirroredArray* elemToArrayGpuCapacity,
MirroredArray* gpuPointerArray);
void enableD3dGpuWrite(MirroredArray& array,
const size_t* elemToArraySize,
MirroredArray* elemToArrayCpuCapacity,
MirroredArray* elemToArrayGpuCapacity,
MirroredArray* elemToArrayGpuData);
static inline constexpr IOConfig CpuReadConfig()
{
return IOConfig{
&PathToAttributesMap::enableCpuRead, // enableRead
nullptr, // enableWrite
nullptr, // enableRdPtrForWrite
Device::eCPU, // device
PtrToPtrKind::eNotApplicable // ptrToPtrKind
};
};
static inline constexpr IOConfig CpuWriteConfig()
{
return IOConfig{
nullptr, // enableRead
&PathToAttributesMap::enableCpuWrite, // enableWrite
&PathToAttributesMap::enableCpuRead, // enableRdPtrForWrite
Device::eCPU, // device
PtrToPtrKind::eNotApplicable // ptrToPtrKind
};
};
static inline constexpr IOConfig CpuReadWriteConfig()
{
return IOConfig{
&PathToAttributesMap::enableCpuRead, // enableRead
&PathToAttributesMap::enableCpuWrite, // enableWrite
&PathToAttributesMap::enableCpuRead, // enableRdPtrForWrite
Device::eCPU, // device
PtrToPtrKind::eNotApplicable // ptrToPtrKind
};
};
// TODO: This probably needs to go away, it only exists to turn off "printWarnings"
static inline constexpr IOConfig CpuReadIfValidWriteConfig()
{
return IOConfig{
&PathToAttributesMap::enableCpuReadIfValid, // enableRead
&PathToAttributesMap::enableCpuWrite, // enableWrite
&PathToAttributesMap::enableCpuRead, // enableRdPtrForWrite
Device::eCPU, // device
PtrToPtrKind::eNotApplicable // ptrToPtrKind
};
};
static inline constexpr IOConfig CudaReadConfig()
{
return IOConfig{
&PathToAttributesMap::enableGpuRead, // enableRead
nullptr, // enableWrite
nullptr, // enableRdPtrForWrite
Device::eCudaGPU, // device
PtrToPtrKind::eNotApplicable // ptrToPtrKind
};
};
static inline constexpr IOConfig CudaWriteConfig()
{
return IOConfig{
nullptr, // enableRead
&PathToAttributesMap::enableGpuWrite, // enableWrite
&PathToAttributesMap::enableGpuRead, // enableRdPtrForWrite
Device::eCudaGPU, // device
PtrToPtrKind::eNotApplicable // ptrToPtrKind
};
};
static inline constexpr IOConfig CudaReadWriteConfig()
{
return IOConfig{
&PathToAttributesMap::enableGpuRead, // enableRead
&PathToAttributesMap::enableGpuWrite, // enableWrite
&PathToAttributesMap::enableGpuRead, // enableRdPtrForWrite
Device::eCudaGPU, // device
PtrToPtrKind::eNotApplicable // ptrToPtrKind
};
};
static inline constexpr IOConfig D3dVkReadConfig()
{
return IOConfig{
&PathToAttributesMap::enableD3dGpuRead, // enableRead
nullptr, // enableWrite
nullptr, // enableRdPtrForWrite
Device::eD3dVkGPU, // device
PtrToPtrKind::eNotApplicable // ptrToPtrKind
};
};
static inline constexpr IOConfig D3dVkWriteConfig()
{
return IOConfig{
nullptr, // enableRead
&PathToAttributesMap::enableD3dGpuWrite, // enableWrite
&PathToAttributesMap::enableD3dGpuRead, // enableRdPtrForWrite
Device::eD3dVkGPU, // device
PtrToPtrKind::eNotApplicable // ptrToPtrKind
};
};
static inline constexpr IOConfig D3dVkReadWriteConfig()
{
return IOConfig{
&PathToAttributesMap::enableD3dGpuRead, // enableRead
&PathToAttributesMap::enableD3dGpuWrite, // enableWrite
&PathToAttributesMap::enableD3dGpuRead, // enableRdPtrForWrite
Device::eD3dVkGPU, // device
PtrToPtrKind::eNotApplicable // ptrToPtrKind
};
};
std::tuple<bool, BucketId, size_t> getPresentAndBucketAndElement(const PathC& path) const;
SpanC getArrayElementPtr(SpanC array, size_t bucketElement) const;
ConstSpanC getArrayElementPtr(ConstSpanC array, size_t bucketElement) const;
void destructiveResizeIfNecessary(uint8_t*& cpuData, size_t& capacity, size_t desiredElemCount, size_t elemByteCount);
void destructiveResizeIfNecessaryGPU(MirroredArray& gpuPointerArray,
size_t elem,
size_t& capacity,
size_t desiredElemCount,
size_t elemByteCount,
omni::gpucompute::GpuCompute* computeAPI,
omni::gpucompute::Context* computeCtx);
void resizeIfNecessary(uint8_t*& cpuData, size_t& capacity, size_t desiredElemCount, size_t elemByteCount, TypeC type);
void resizeIfNecessaryGPU(MirroredArray& gpuPointerArray,
size_t elem,
size_t& capacity,
size_t desiredElemCount,
size_t elemByteCount,
omni::gpucompute::GpuCompute* computeAPI,
omni::gpucompute::Context* computeCtx);
void allocGpuMemIfNecessary(PathToAttributesMap::MirroredArray& array,
size_t byteCount,
size_t elemSize,
omni::gpucompute::GpuCompute* computeAPI,
omni::gpucompute::Context* computeCtx);
std::pair<BucketId, BucketImpl&> findOrCreateBucket(const Bucket& bucket);
void eraseBucket(const Bucket& bucket);
BucketId findBucketId(const Bucket& bucket);
std::tuple<BucketId, ArrayIndex> getBucketAndArrayIndex(const PathC& path) const;
std::tuple<BucketId, ArrayIndex> addAttributeGetBucketAndArrayIndex(
const PathC& path, const TokenC& attrNameC, NameSuffix nameSuffix, TypeC type);
void addAttributesToBucket(
const PathC& path,
const std::vector<TokenC>& attrNames,
const std::vector<TypeC>& typeCs);
void setArrayDirty(ArrayAndDirtyIndices& arrayAndDirtyIndices);
void setArrayElementDirty(ArrayAndDirtyIndices& arrayAndDirtyIndices, size_t elemIndex);
BucketImpl& addAttributeC(BucketImpl& bucketImpl,
const Bucket& bucket,
const TokenC& attrName,
TypeC ctype,
const void* value = nullptr);
BucketImpl& addArrayAttributeC(BucketImpl& bucketImpl, const Bucket& bucket, const TokenC& attrName, TypeC ctype, const void* value, const size_t arrayElemCount);
void checkInvariants();
/**
* @brief Internal debug function
*
* @details This loops over all the array attributes checking that the
* invariants hold true. Currently it just enforces
* (I1) If the size and cpu size match that the cpu array pointer isn't null
*
*
* @return none
*/
bool __validateArrayInvariants() const;
};
// PathToAttributesMap doesn't depend on USD for tokens or paths
// However, it's useful to be able to see the USD text representation of tokens and
// paths when debugging. Set ENABLE_USD_DEBUGGING to 1 to enable that.
// Then use toTfToken() to convert TokenC to pxr::TfToken
inline const pxr::TfToken& toTfToken(const TokenC& token)
{
return reinterpret_cast<const pxr::TfToken&>(token);
}
inline const pxr::SdfPath& toSdfPath(const PathC& path)
{
return reinterpret_cast<const pxr::SdfPath&>(path);
}
// Query result
struct View
{
PathToAttributesMap* path2attrsMap;
std::vector<Bucket> buckets;
std::vector<size_t> bucketElemCounts;
};
using BucketImpl = PathToAttributesMap::BucketImpl;
// The rest of this file is methods
// Returns the number of Paths known to the cache
inline size_t PathToAttributesMap::size()
{
return pathToBucketElem.size();
}
// Delete all data in the cache, meaning all the buckets and the map
// from paths to buckets.
inline void PathToAttributesMap::clear()
{
buckets.clear();
pathToBucketElem.clear();
attrNameSetToBucketId.clear();
}
template<typename CallbackT>
inline void PathToAttributesMap::BucketIdToImpl::forEachValidBucket(CallbackT callback) const
{
BucketId id{ 0 };
for (size_t i = 0; i < buckets.size(); ++i, ++id)
{
if (buckets[i])
{
callback(id, *buckets[i]);
}
}
}
inline void PathToAttributesMap::printMirroredArray(const char* const label, const ScalarAttributeArray &array, const size_t* const arrayElemCount) const
{
auto printValue = [](const uint8_t *const data, const size_t size)
{
if (!data)
{
printf("<nullptr>");
}
else
{
if (size <= sizeof(uint8_t))
{
printf("u8=%u, d8=%d, c=%c", *data, *(const int8_t*)data, *(const char*)data);
}
else if (size <= sizeof(uint16_t))
{
printf("u16=%u, d16=%d", *(const uint16_t*)data, *(const int16_t*)data);
}
else if (size <= sizeof(uint32_t))
{
printf("u32=%u, d32=%d, float=%f", *(const uint32_t*)data, *(const int32_t*)data, *(const float*)data);
}
else if (size <= sizeof(uint64_t))
{
printf("u64=%" PRIu64 ", d64=%" PRId64 ", double=%f, ptr=0x%p", *(const uint64_t*)data, *(const int64_t*)data, *(const double*)data, *(void**)data);
}
else
{
printf("\n");
for (size_t i = 0; i < size; i += 16)
{
printf(" %06zx: ", i);
for (size_t j = 0; j < 16; ++j)
{
if (i + j < size)
{
printf("%02x ", data[i + j]);
}
else
{
printf(" ");
}
}
printf(" ");
for (size_t j = 0; j < 16; j++)
{
if (i + j < size)
{
printf("%c", isprint(data[i + j]) ? data[i + j] : '.');
}
}
printf("\n");
}
}
}
};
printf(" %s (type %d)[count %zu]:\n", label, array.type.type, array.count);
const Typeinfo &typeinfo = array.typeinfo;
const size_t elemSize = typeinfo.size;
printf(" cpuValid=%d 0x%p\n", array.cpuValid, array.cpuData());
if (array.cpuValid)
{
for (size_t elem = 0; elem < array.count; ++elem)
{
printf(" [%5zu]: ", elem);
const uint8_t *const elemData = array.cpuData() + elem * elemSize;
printf("0x%p ", elemData);
if (arrayElemCount)
{
CARB_ASSERT(typeinfo.isArray);
const uint8_t* const base = *((const uint8_t **)elemData);
printf(" => 0x%p", base);
for (size_t i = 0; i < arrayElemCount[elem]; ++i)
{
printf("\n [%5zu]: ", i);
const uint8_t* const arrayData = base ? base + i * typeinfo.arrayElemSize : nullptr;
printValue(arrayData, typeinfo.arrayElemSize);
}
}
else
{
printValue(elemData, elemSize);
}
printf("\n");
}
}
printf(" gpuValid=%d 0x%p\n", array.gpuValid, array.gpuArray);
printf(" usdValid=%d\n", array.usdValid);
}
// Print the cache, specifically the Paths and the UsdAttributes they map to
// (but not the values of the attributes currently)
inline void PathToAttributesMap::print() const
{
auto va = [](auto ...params) -> const char* {
static char tmp[1024];
#ifdef _WIN32
_snprintf_s(tmp, sizeof(tmp), params...);
#else
snprintf(tmp, sizeof(tmp), params...);
#endif
return (const char*)&tmp;
};
std::cout << "(== PathToAttributesMap::print() begin ==)\n";
buckets.forEachValidBucket([this, va](const BucketId bucketId, const BucketImpl& bucketImpl) {
printf("bucket [%zu]:\n", size_t(bucketId));
if (!bucketImpl.elemToPath.size())
{
printf(" <no elements>\n");
}
else
{
for (size_t elem = 0; elem < bucketImpl.elemToPath.size(); ++elem)
{
printf(" elem [%5zu]: \"%s\"\n", elem, bucketImpl.elemToPath[elem].GetText());
}
}
if (bucketImpl.scalarAttributeArrays.empty() && bucketImpl.arrayAttributeArrays.empty())
{
printf(" <no attributes>\n");
}
else
{
bucketImpl.scalarAttributeArrays.forEach([this, &va](const AttrName& name, const ScalarAttributeArray &array) {
printMirroredArray(va("%s \"%s\"", "sattr", toTfToken(name.name).GetText()), array, nullptr);
});
bucketImpl.arrayAttributeArrays.forEach([this, &va](const AttrName& name, const ArrayAttributeArray &array) {
printMirroredArray(va("%s \"%s\" %s", "vattr", toTfToken(name.name).GetText(), "values"), array.values, (const size_t*)array.cpuElemCounts.cpuData());
printMirroredArray(va("%s \"%s\" %s", "vattr", toTfToken(name.name).GetText(), "elemCounts"), array.elemCounts, nullptr);
printMirroredArray(va("%s \"%s\" %s", "vattr", toTfToken(name.name).GetText(), "cpuElemCounts"), array.cpuElemCounts, nullptr);
printMirroredArray(va("%s \"%s\" %s", "vattr", toTfToken(name.name).GetText(), "gpuElemCounts"), array.gpuElemCounts, nullptr);
printMirroredArray(va("%s \"%s\" %s", "vattr", toTfToken(name.name).GetText(), "gpuPtrs"), array.gpuPtrs, nullptr);
});
}
});
std::cout << "(== PathToAttributesMap::print() end ==)\n\n";
}
#define ENABLE_LOG 0
inline void log(const char* format, ...)
{
#if ENABLE_LOG
va_list args;
va_start(args, format);
vprintf(format, args);
va_end(args);
#endif
}
inline void PathToAttributesMap::addType(TypeC type, Typeinfo typeInfo)
{
Typeinfo *v;
typeToInfo.allocateEntry(type, &v);
*v = typeInfo;
}
inline Typeinfo PathToAttributesMap::getTypeInfo(TypeC type) const
{
const Typeinfo* typeinfo;
if (typeToInfo.find(type, &typeinfo))
{
return *typeinfo;
}
else
{
return Typeinfo();
}
}
inline BucketId PathToAttributesMap::addBucket(const Bucket& bucket)
{
auto iter = attrNameSetToBucketId.find(bucket);
bool found = (iter != attrNameSetToBucketId.end());
if (!found)
{
// Create bucket
auto bucketIdAndImpl = buckets.emplace(platform.allocator.new_<BucketImpl>(platform));
auto bucketAndId = attrNameSetToBucketId.emplace(bucket, bucketIdAndImpl.first);
const Bucket& addedBucket = bucketAndId.first->first;
BucketImpl& bucketImpl = bucketIdAndImpl.second;
bucketImpl.SetBucket(bucket);
// Make array for each type
for (const AttrNameAndType& b : addedBucket)
{
const Type attrType = b.type;
const Token& attrName = b.name;
const NameSuffix& suffix = b.suffix;
const TypeC attrTypeC = TypeC(attrType);
const Typeinfo* typeinfo;
if (typeToInfo.find(attrTypeC, &typeinfo))
{
AttrName name{ attrName, suffix };
if (typeinfo->isArray)
{
ArrayAttributeArray* ptr;
bucketImpl.arrayAttributeArrays.allocateEntry(std::move(name), &ptr);
new (ptr) ArrayAttributeArray(platform, attrTypeC, *typeinfo);
}
else
{
ScalarAttributeArray* ptr;
bucketImpl.scalarAttributeArrays.allocateEntry(std::move(name), &ptr);
new (ptr) ScalarAttributeArray(platform, attrTypeC, *typeinfo);
}
}
else
{
std::cout << "Error: Typeinfo for " << attrType << " not found. Please add it using addType()." << std::endl;
}
}
return bucketIdAndImpl.first;
}
else
{
return iter->second;
}
}
// Multiple attribute methods
inline void PathToAttributesMap::getAttributesRdC(const void** attrsOut,
const PathC* paths,
const TokenC* attrNames,
size_t attrCount)
{
// TODO: make optimized version instead of calling getAttributeRdC
for (size_t i = 0; i != attrCount; i++)
{
attrsOut[i] = getAttributeRdC(paths[i], attrNames[i]).ptr;
}
}
inline void PathToAttributesMap::getAttributesRdGpuC(const void** attrsOut,
const PathC* paths,
const TokenC* attrNames,
size_t attrCount,
PtrToPtrKind ptrToPtrKind)
{
// TODO: make optimized version instead of calling getAttributeRdGpuC
for (size_t i = 0; i != attrCount; i++)
{
attrsOut[i] = getAttributeRdGpuC(paths[i], attrNames[i], ptrToPtrKind).ptr;
}
}
inline void PathToAttributesMap::getArraysRdC(const void** attrsOut,
const Bucket& bucket,
const TokenC* attrNames,
size_t attrCount)
{
// TODO: make optimized version instead of calling getArrayRdC
for (size_t i = 0; i != attrCount; i++)
{
attrsOut[i] = getArrayRdC(bucket, attrNames[i]);
}
}
inline void PathToAttributesMap::getAttributesWrC(void** attrsOut,
const PathC& path,
const TokenC* attrNames,
size_t attrCount)
{
// TODO: make optimized version instead of calling getAttributeWrC
for (size_t i = 0; i != attrCount; i++)
{
attrsOut[i] = getAttributeWrC(path, attrNames[i]).ptr;
}
}
inline void PathToAttributesMap::getAttributesWrGpuC(void** attrsOut,
const PathC& path,
const TokenC* attrNames,
size_t attrCount,
PtrToPtrKind ptrToPtrKind)
{
// TODO: make optimized version instead of calling getAttributeWrGpuC
for (size_t i = 0; i != attrCount; i++)
{
attrsOut[i] = getAttributeWrGpuC(path, attrNames[i], ptrToPtrKind).ptr;
}
}
inline void PathToAttributesMap::getArraysWrC(void** attrsOut, const Bucket& bucket, const TokenC* attrNames, size_t attrCount)
{
// TODO: make optimized version instead of calling getArrayWrC
for (size_t i = 0; i != attrCount; i++)
{
attrsOut[i] = getArrayWrC(bucket, attrNames[i]);
}
}
// Algorithm:
// Check whether bucket already has a bucketId
// If it does:
// Check whether bucketId has a bucketImpl
// If it does:
// return (bucketId, bucketImpl)
// Else:
// Print error message
// return (bucketId, empty bucketImpl)
// Else:
// Allocate a bucketId
// attrNameSetToBucketId += (bucket->bucketId)
// buckets += (bucketId->empty bucketImpl)
//
inline std::pair<BucketId, BucketImpl&> PathToAttributesMap::findOrCreateBucket(const Bucket& bucket)
{
auto iter = attrNameSetToBucketId.find(bucket);
bool foundBucketAndId = (iter != attrNameSetToBucketId.end());
BucketId bucketId;
if (foundBucketAndId)
{
bucketId = iter->second;
auto implPtr = buckets.find(bucketId);
if (implPtr)
{
return { bucketId, *implPtr };
}
else
{
// This is an error, but make an impl so that we can return gracefully
CARB_LOG_ERROR("BucketId->impl not found");
// Allocate an impl and id->impl mapping and then set the
// attrNameSetToBucketId to the slot of the new impl
auto idAndImpl = buckets.emplace(platform.allocator.new_<BucketImpl>(platform));
iter->second = idAndImpl.first;
idAndImpl.second.SetBucket(bucket);
return idAndImpl;
}
}
// Allocate an impl and place in vector
auto idAndImpl = buckets.emplace(platform.allocator.new_<BucketImpl>(platform));
// Store bucket->Id mapping
attrNameSetToBucketId.emplace(bucket, idAndImpl.first);
idAndImpl.second.SetBucket(bucket);
return idAndImpl;
}
inline void PathToAttributesMap::eraseBucket(const Bucket& bucket)
{
auto iter = attrNameSetToBucketId.find(bucket);
bool foundBucketAndId = (iter != attrNameSetToBucketId.end());
BucketId bucketId;
if (foundBucketAndId)
{
bucketId = iter->second;
auto implPtr = buckets.find(bucketId);
if (implPtr)
{
buckets.erase(bucketId);
}
else
{
CARB_LOG_ERROR("BucketId->impl not found");
}
attrNameSetToBucketId.erase(bucket);
}
else
{
// Nothing to do
}
}
// Add an attribute to all elements of a bucket
// Note that this might cause a merge with an existing bucket
//
// Here are the maps we have to update:
// pathToBucketElem :: path -> (bucketId, arrayIndex)
// buckets :: bucketId -> bucketImpl
// attrNameSetToBucketId :: bucket-> bucketId
//
inline BucketImpl& PathToAttributesMap::addAttributeC(BucketImpl& bucketImpl,
const Bucket& bucket,
const TokenC& attrName,
TypeC ctype,
const void* value)
{
// TODO: should we warn on missing type?
const Typeinfo& typeinfo = getTypeInfo(ctype);
if (typeinfo.isArray && value)
{
CARB_LOG_ERROR("addAttributeC: Attempted to add array-value attribute with default values. Use addArrayAttribute instead.");
return bucketImpl;
}
return addAttributeInternal(bucketImpl, bucket, attrName, ctype, value, typeinfo, 0);
}
inline BucketImpl& PathToAttributesMap::addArrayAttributeC(BucketImpl& bucketImpl, const Bucket& bucket, const TokenC& attrName, TypeC ctype, const void* value, const size_t arrayElemCount)
{
CARB_ASSERT(!arrayElemCount || value);
// TODO: should we warn on missing type?
const Typeinfo& typeinfo = getTypeInfo(ctype);
return addAttributeInternal(bucketImpl, bucket, attrName, ctype, value, typeinfo, arrayElemCount);
}
// Add an attribute to all elements of a bucket
inline void PathToAttributesMap::addAttributeC(
const Bucket& bucket, const TokenC& attrName, TypeC ctype, const void* value)
{
const auto iter = attrNameSetToBucketId.find(bucket);
if (iter != attrNameSetToBucketId.end())
{
const BucketId bucketId = iter->second;
BucketImpl *const implPtr = buckets.find(bucketId);
if (implPtr)
{
// TODO: should we warn on missing type?
const Typeinfo& typeinfo = getTypeInfo(ctype);
if (typeinfo.isArray && value)
{
CARB_LOG_ERROR("addAttributeC: Attempted to add array-value attribute with default values. Use addArrayAttribute instead.");
return;
}
addAttributeInternal(*implPtr, bucket, attrName, ctype, value, typeinfo, 0);
}
}
}
inline void PathToAttributesMap::addArrayAttributeC(const Bucket& bucket, const TokenC& attrName, TypeC ctype, const void* value, const size_t arrayElemCount)
{
CARB_ASSERT(!arrayElemCount || value);
const auto iter = attrNameSetToBucketId.find(bucket);
if (iter != attrNameSetToBucketId.end())
{
const BucketId bucketId = iter->second;
BucketImpl *const implPtr = buckets.find(bucketId);
if (implPtr)
{
// TODO: should we warn on missing type?
const Typeinfo& typeinfo = getTypeInfo(ctype);
addAttributeInternal(*implPtr, bucket, attrName, ctype, value, typeinfo, arrayElemCount);
}
}
}
template <typename T>
void PathToAttributesMap::addAttribute(
const Bucket& bucket, const TokenC& attrName, TypeC type, const T& value)
{
APILOGGER("addAttribute", apiLogEnabled, attrName);
// TODO: check that type is compatible
return addAttributeC(bucket, attrName, type, &value);
}
inline size_t PathToAttributesMap::getElementCount(const BucketImpl& bucketImpl) const
{
return bucketImpl.elemToPath.size();
}
inline size_t PathToAttributesMap::getElementCount(BucketId bucketId) const
{
const auto implPtr = buckets.find(bucketId);
if (implPtr)
{
return implPtr->elemToPath.size();
}
return 0;
}
inline size_t PathToAttributesMap::getElementCount(const Bucket& bucket) const
{
const auto iter = attrNameSetToBucketId.find(bucket);
if (iter != attrNameSetToBucketId.end())
{
BucketId bucketId = iter->second;
return getElementCount(bucketId);
}
return 0;
}
inline PathToAttributesMap::ArrayOfArrayInfo PathToAttributesMap::getArrayOfArrayInfo(Typeinfo typeInfo,
BucketImpl& bucketImpl,
TokenC attrName)
{
if (typeInfo.isArray)
{
ArrayAttributeArray *array;
if (bucketImpl.arrayAttributeArrays.find(AttrName{ attrName, NameSuffix::none }, &array))
{
return getArrayOfArrayInfo(*array);
}
}
return { nullptr, nullptr, nullptr, nullptr };
}
inline PathToAttributesMap::ArrayOfArrayInfo PathToAttributesMap::getArrayOfArrayInfo(ArrayAttributeArray &arrayAttributeArray)
{
MirroredArray *const arraySizeArray = &arrayAttributeArray.elemCounts;
MirroredArray *const arrayCpuCapacityArray = &arrayAttributeArray.cpuElemCounts;
MirroredArray *const arrayGpuCapacityArray = &arrayAttributeArray.gpuElemCounts;
MirroredArray *const arrayGpuPtrArray = &arrayAttributeArray.gpuPtrs;
return { (size_t*)arraySizeArray->cpuData(), arrayCpuCapacityArray, arrayGpuCapacityArray, arrayGpuPtrArray };
}
inline PathToAttributesMap::ConstArrayOfArrayInfo PathToAttributesMap::getArrayOfArrayInfo(Typeinfo typeInfo,
const BucketImpl& bucketImpl,
TokenC attrName) const
{
if (typeInfo.isArray)
{
const ArrayAttributeArray *array;
if (bucketImpl.arrayAttributeArrays.find(AttrName{ attrName, NameSuffix::none }, &array))
{
return getArrayOfArrayInfo(*array);
}
}
return { nullptr, nullptr, nullptr, nullptr };
}
inline PathToAttributesMap::ConstArrayOfArrayInfo PathToAttributesMap::getArrayOfArrayInfo(const ArrayAttributeArray &arrayAttributeArray) const
{
const MirroredArray *const arraySizeArray = &arrayAttributeArray.elemCounts;
const MirroredArray *const arrayCpuCapacityArray = &arrayAttributeArray.cpuElemCounts;
const MirroredArray *const arrayGpuCapacityArray = &arrayAttributeArray.gpuElemCounts;
const MirroredArray *const arrayGpuPtrArray = &arrayAttributeArray.gpuPtrs;
return { arraySizeArray, arrayCpuCapacityArray, arrayGpuCapacityArray, arrayGpuPtrArray };
}
inline std::vector<size_t> PathToAttributesMap::getElementCounts(const std::vector<Bucket>& buckets) const
{
size_t bucketCount = buckets.size();
std::vector<size_t> retval(bucketCount);
for (size_t i = 0; i != bucketCount; i++)
{
retval[i] = getElementCount(buckets[i]);
}
return retval;
}
inline void PathToAttributesMap::addElementToTrackers(size_t elemIndex, BucketImpl& bucketImpl)
{
// Update change trackers
// We allocate them lazily, so we have to iterate over listenerIdToChangeTrackerConfig
// then allocate bucketImpl.listenerIdToChanges if necessary
listenerIdToChangeTrackerConfig.forEach([this, &bucketImpl, &elemIndex](ListenerId& listenerId, ChangeTrackerConfig& config) {
if (config.changeTrackingEnabled)
{
// Allocate changes if necessary
Changes* changes;
if (bucketImpl.listenerIdToChanges.allocateEntry(listenerId, &changes))
{
new (changes) Changes();
}
changes->addNewPrim(elemIndex);
}
});
}
inline void PathToAttributesMap::allocElement(ScalarAttributeArray &scalar)
{
const size_t allocSize = scalar.typeinfo.size;
const size_t newSize = scalar.size() + allocSize;
scalar.resize(newSize);
// Only resize GPU mirror if it was previously allocated
if (scalar.gpuCapacity != 0)
{
scalar.resizeGpu(platform.gpuCuda, platform.gpuCudaCtx, newSize, allocSize);
}
scalar.count++;
}
inline void PathToAttributesMap::allocElement(ArrayAttributeArray &arrayAttributeArray)
{
static_assert(sizeof(ArrayAttributeArray) == 5 * sizeof(MirroredArray), "ArrayAttributeArray has unexpected size");
allocElement(arrayAttributeArray.values);
allocElement(arrayAttributeArray.elemCounts);
allocElement(arrayAttributeArray.cpuElemCounts);
allocElement(arrayAttributeArray.gpuElemCounts);
allocElement(arrayAttributeArray.gpuPtrs);
// For array-valued attributes, initialize CPU and GPU element counts
static_assert(sizeof(ArrayAttributeArray) == 5 * sizeof(MirroredArray), "ArrayAttributeArray has unexpected size");
reinterpret_cast<size_t*>(arrayAttributeArray.elemCounts.cpuData())[arrayAttributeArray.elemCounts.count-1] = 0;
arrayAttributeArray.elemCounts.cpuValid = true;
reinterpret_cast<size_t*>(arrayAttributeArray.cpuElemCounts.cpuData())[arrayAttributeArray.cpuElemCounts.count - 1] = 0;
arrayAttributeArray.cpuElemCounts.cpuValid = true;
reinterpret_cast<size_t*>(arrayAttributeArray.gpuElemCounts.cpuData())[arrayAttributeArray.gpuElemCounts.count - 1] = 0;
arrayAttributeArray.gpuElemCounts.cpuValid = true;
}
inline size_t PathToAttributesMap::allocElement(BucketImpl& bucketImpl)
{
// I moved this here to support old-style ArrayBase::resize
// TODO: Now that ArrayBase is gone, check whether we can move it back
const size_t element = bucketImpl.elemToPath.size();
bucketImpl.elemToPath.emplace_back(); // Allocate an empty path, it gets set later
bucketImpl.scalarAttributeArrays.forEach([this, &bucketImpl](const AttrName& name, ScalarAttributeArray &array) {
allocElement(array);
CARB_UNUSED(bucketImpl);
CARB_ASSERT(array.count == bucketImpl.elemToPath.size());
});
bucketImpl.arrayAttributeArrays.forEach([this, &bucketImpl](const AttrName& name, ArrayAttributeArray &array) {
allocElement(array);
CARB_UNUSED(bucketImpl);
CARB_ASSERT(array.values.count == bucketImpl.elemToPath.size());
CARB_ASSERT(array.elemCounts.count == bucketImpl.elemToPath.size());
CARB_ASSERT(array.cpuElemCounts.count == bucketImpl.elemToPath.size());
CARB_ASSERT(array.gpuElemCounts.count == bucketImpl.elemToPath.size());
CARB_ASSERT(array.gpuPtrs.count == bucketImpl.elemToPath.size());
});
addElementToTrackers(element, bucketImpl);
return element;
}
// CPU and GPU valid bits are per SoA array, not per-prim per-attribute.
// Suppose we have a prim with attr whose GPU mirror is not valid, and we want
// to add it to a bucket that has a valid GPU mirror of that attribute. What
// should we set the bucket array's gpuValid to after the add?
//
// Option 1: set bucket's gpuValid to false.
// If cpuValid were true for the bucket, then this would be inefficient but
// correct. But, if cpuValid were false, then we'd have to copy all the bucket's
// data from GPU to CPU to avoid invalidating the only valid copy of the data.
// That would be very inefficient for a bucket with a lot of prims, or an
// array of array-valued attributes.
//
// Option 2: set bucket's gpuValid to true.
// For the bucket plus our new element to be gpuValid, we need to make the new
// element gpuValid by copying it from CPU to GPU.
//
// We've chosen Option 2 as it is the most efficient and makeSrcValidIfDestValid
// implements it.
// The explanation above was for GPU mirrors, but it applies equally to CPU.
//
// We are changing the srcArray mirrors to match destArray, so
// counterintuitively destArray is const and srcArray is not.
inline void PathToAttributesMap::makeSrcValidIfDestValid(MirroredArray& srcArray,
BucketImpl& srcBucketImpl,
const ArrayOfArrayInfo& srcAoaInfo,
const MirroredArray& destArray,
const AttrName& name)
{
bool srcCpuValid = srcArray.cpuValid;
bool srcGpuValid = srcArray.gpuValid;
bool destCpuValid = destArray.cpuValid;
bool destGpuValid = destArray.gpuValid;
if (srcCpuValid && !srcGpuValid && destGpuValid)
{
// Possible states:
// srcCpu srcGpu destCpu destGpu
// 1 0 0 1
// 1 0 1 1
// With a valid CPU source, this will copy data to the GPU to make it valid
// We don't set dirty indices here because this method gives read-only access
getArraySpanC(srcArray, name, srcAoaInfo, srcBucketImpl, CudaReadConfig());
// srcCpu srcGpu destCpu destGpu
// 1 1 0 1
// 1 1 1 1
}
else if (!srcCpuValid && !srcGpuValid && !destCpuValid && destGpuValid)
{
// srcCpu srcGpu destCpu destGpu
// 0 0 0 1
// Without a valid CPU source, just allocate memory so it can be "valid" even if not initialized
getArraySpanC(srcArray, name, srcAoaInfo, srcBucketImpl, CudaWriteConfig());
// srcCpu srcGpu destCpu destGpu
// 0 1 0 1
}
else if (!srcCpuValid && srcGpuValid && destCpuValid)
{
// srcCpu srcGpu destCpu destGpu
// 0 1 1 0
// 0 1 1 1
// With a valid GPU source, this will copy data back to the CPU to make it valid
// We don't set dirty indices here because this method gives read-only access
getArraySpanC(srcArray, name, srcAoaInfo, srcBucketImpl, CpuReadConfig());
// srcCpu srcGpu destCpu destGpu
// 1 1 1 0
// 1 1 1 1
}
else if (!srcCpuValid && !srcGpuValid && destCpuValid && !destGpuValid)
{
// srcCpu srcGpu destCpu destGpu
// 0 0 1 0
// Without a valid GPU source, just allocate memory so it can be "valid" even if not initialized
getArraySpanC(srcArray, name, srcAoaInfo, srcBucketImpl, CpuWriteConfig());
// srcCpu srcGpu destCpu destGpu
// 1 0 1 0
}
else if (!srcCpuValid && !srcGpuValid && destCpuValid && destGpuValid)
{
// srcCpu srcGpu destCpu destGpu
// 0 0 1 1
// Without a valid GPU source, just allocate memory so it can be "valid" even if not initialized
getArraySpanC(srcArray, name, srcAoaInfo, srcBucketImpl, CudaWriteConfig());
// srcCpu srcGpu destCpu destGpu
// 0 1 1 1
// This one clears gpuValid, because we assume that the user is going to write to it
// But, we're not passing the allocated pointer to the user so...
getArraySpanC(srcArray, name, srcAoaInfo, srcBucketImpl, CpuWriteConfig());
// srcCpu srcGpu destCpu destGpu
// 1 0 1 1
// ..we can safely set gpuValid to true
srcArray.gpuValid = true;
// srcCpu srcGpu destCpu destGpu
// 1 1 1 1
}
}
inline void PathToAttributesMap::allocElementForMove(BucketImpl& srcBucketImpl, const ArrayOfArrayInfo &srcAoaInfo, const AttrName& name, MirroredArray &destArray, MirroredArray *const srcArray)
{
bool srcGpuAlloced = false;
if (srcArray)
{
makeSrcValidIfDestValid(*srcArray, srcBucketImpl, srcAoaInfo, destArray, name);
srcGpuAlloced = (srcArray->gpuCapacity != 0);
}
if (srcArray)
{
if (destArray.type != srcArray->type)
{
if (destArray.typeinfo.size != srcArray->typeinfo.size)
{
CARB_LOG_ERROR_ONCE("PathToAttributesMap (%p) contains attributes with duplicate name \"%s\" with different types and different per-element sizes (%zu vs %zu). Data will almost certainly become corrupted during request to move elements between buckets!", this, toTfToken(name.name).GetString().c_str(), destArray.typeinfo.size, srcArray->typeinfo.size);
}
else
{
CARB_LOG_WARN_ONCE("PathToAttributesMap (%p) contains attributes with duplicate name \"%s\" with different types but same per-element size. Data may become corrupted during request to move elements between buckets!", this, toTfToken(name.name).GetString().c_str());
}
}
}
const size_t allocSize = destArray.typeinfo.size;
const size_t newSize = destArray.size() + allocSize;
destArray.resize(newSize);
const bool destGpuAlloced = (destArray.gpuCapacity != 0);
if (srcGpuAlloced || destGpuAlloced)
{
destArray.resizeGpu(platform.gpuCuda, platform.gpuCudaCtx, destArray.size(), destArray.typeinfo.size);
}
destArray.count++;
}
// When moving elements between buckets we want to only allocate GPU storage if
// the source had a valid GPU mirror.
inline size_t PathToAttributesMap::allocElementForMove(BucketImpl& destBucketImpl,
BucketImpl& srcBucketImpl,
const PathC& path)
{
const size_t element = destBucketImpl.elemToPath.size();
destBucketImpl.elemToPath.emplace_back(); // Allocate an empty path, it gets set later
// Only allocate dest GPU mirror if src has GPU mirror
destBucketImpl.scalarAttributeArrays.forEach([this, &srcBucketImpl](const AttrName& name, ScalarAttributeArray &array) {
ScalarAttributeArray *srcArray = srcBucketImpl.scalarAttributeArrays.find(name, &srcArray) ? srcArray : nullptr;
const ArrayOfArrayInfo aoa = ScalarArrayOfArrayInfo();
allocElementForMove(srcBucketImpl, aoa, name, array, srcArray);
});
destBucketImpl.arrayAttributeArrays.forEach([this, &srcBucketImpl](const AttrName& name, ArrayAttributeArray &array) {
ArrayAttributeArray *srcArray = srcBucketImpl.arrayAttributeArrays.find(name, &srcArray) ? srcArray : nullptr;
static_assert(sizeof(ArrayAttributeArray) == 5 * sizeof(MirroredArray), "ArrayAttributeArray has unexpected size");
const ArrayOfArrayInfo aoa = srcArray ? getArrayOfArrayInfo(*srcArray) : ScalarArrayOfArrayInfo();
allocElementForMove(srcBucketImpl, aoa, name, array.values, srcArray ? &srcArray->values : nullptr);
allocElementForMove(srcBucketImpl, aoa, name, array.elemCounts, srcArray ? &srcArray->elemCounts : nullptr);
allocElementForMove(srcBucketImpl, aoa, name, array.cpuElemCounts, srcArray ? &srcArray->cpuElemCounts : nullptr);
allocElementForMove(srcBucketImpl, aoa, name, array.gpuElemCounts, srcArray ? &srcArray->gpuElemCounts : nullptr);
allocElementForMove(srcBucketImpl, aoa, name, array.gpuPtrs, srcArray ? &srcArray->gpuPtrs : nullptr);
// For array-valued attributes, initialize CPU and GPU element counts
static_assert(sizeof(ArrayAttributeArray) == 5 * sizeof(MirroredArray), "ArrayAttributeArray has unexpected size");
reinterpret_cast<size_t*>(array.elemCounts.cpuData())[array.elemCounts.count - 1] = 0;
array.elemCounts.cpuValid = true;
reinterpret_cast<size_t*>(array.cpuElemCounts.cpuData())[array.cpuElemCounts.count - 1] = 0;
array.cpuElemCounts.cpuValid = true;
reinterpret_cast<size_t*>(array.gpuElemCounts.cpuData())[array.gpuElemCounts.count - 1] = 0;
array.gpuElemCounts.cpuValid = true;
});
addElementToTrackers(element, destBucketImpl);
return element;
}
// Array resize that does not preserve previous data
inline void PathToAttributesMap::destructiveResizeIfNecessary(uint8_t*& cpuData,
size_t& capacity,
size_t desiredElemCount,
size_t elemByteCount)
{
// Resize iff (capacity < desiredElemCount)
if (capacity != desiredElemCount)
{
size_t byteCount = desiredElemCount * elemByteCount;
if (!USE_PINNED_MEMORY || !platform.gpuCuda)
{
free(cpuData);
cpuData = reinterpret_cast<uint8_t*>(malloc(byteCount));
}
else if (platform.gpuCuda)
{
// Use page-locked memory CPU for CUDA
platform.gpuCuda->freeHost(*platform.gpuCudaCtx, cpuData);
platform.gpuCuda->hostAlloc(*platform.gpuCudaCtx, (void**)&cpuData, byteCount);
}
capacity = desiredElemCount;
}
}
// Flatcache only stores POD types, with the following exceptions:
// eToken (pxr::TfToken)
// eAsset (std::array<pxr::TfToken, 2>)
//
// The following code constructs an array of objects of one of these types,
// filling memory[newCpuData + oldByteCount .. newCpuData + newByteCount)
// It is called when enlarging arrays of such types
template <typename T>
void constructInPlace(uint8_t* newCpuData, size_t oldByteCount, size_t newByteCount)
{
T* begin = reinterpret_cast<T*>(newCpuData + oldByteCount);
T* end = reinterpret_cast<T*>(newCpuData + newByteCount);
for (T* current = begin; current != end; current++)
{
new (current) T;
}
}
// We plan to move TfToken and AssetPath construction to IToken.
// Until we do we have to declare this here and depend on USD headers.
struct AssetPath
{
pxr::TfToken assetPath;
pxr::TfToken resolvedPath;
};
inline bool PathToAttributesMap::__validateArrayInvariants() const
{
bool encounteredFailure = false;
// loop over the buckets
BucketId id{ 0 };
for (unsigned int i = 0; i < this->buckets.end(); ++i, ++id)
{
const auto bucketImplPtr = buckets.find(id);
if (!bucketImplPtr)
continue;
const auto& bucketImpl = *bucketImplPtr;
if (bucketImpl.elemToPath.size() == 0)
continue;
//loop over all the arrays
bucketImpl.arrayAttributeArrays.forEach([&encounteredFailure](const AttrName& name, const ArrayAttributeArray& local_array) {
const Typeinfo& typeInfo = local_array.values.typeinfo;
const size_t elemSize = typeInfo.size;
// only care about actual data
if (name.suffix != NameSuffix::none)
return;
// look up array info
const MirroredArray* arraySizeArray = &local_array.elemCounts;
const MirroredArray* arrayCpuCapacityArray = &local_array.cpuElemCounts;
// skip tags and not arrays
if (elemSize != 0)
{
//number of elements
const size_t elemCount = local_array.values.count;
// pointers to data
const uint8_t* const* elemToArrayCpuData = reinterpret_cast<const uint8_t* const*>(local_array.values.cpuData());
for (size_t elem = 0; elem != elemCount; elem++)
{
// get the actual pointer
const uint8_t* cpuData = elemToArrayCpuData[elem];
// look up the cpu capacity
const size_t& cpuCapacity = reinterpret_cast<const size_t*>(arraySizeArray->cpuData())[elem];
const size_t& desiredElemCount = reinterpret_cast<const size_t*>(arrayCpuCapacityArray->cpuData())[elem];
if (cpuCapacity == desiredElemCount)
{
// we should have valid data
if (cpuCapacity != 0 && !cpuData) {
std::cout << "Invalid array name = " << toTfToken(name.name).GetString() << std::endl;
encounteredFailure = true;
}
}
}
}
});
}
return encounteredFailure;
}
// Array resize that preserves previous data
inline void PathToAttributesMap::resizeIfNecessary(
uint8_t*& cpuData, size_t& capacity, size_t desiredElemCount, size_t elemByteCount, TypeC typeC)
{
// TODO: reduce number of reallocations by allocating capacity larger than size
// and not always reallocating when desiredElemCount<capacity
if (capacity < desiredElemCount)
{
size_t oldByteCount = capacity * elemByteCount;
size_t newByteCount = desiredElemCount * elemByteCount;
uint8_t* newCpuData = nullptr;
if (!USE_PINNED_MEMORY || !platform.gpuCuda)
{
newCpuData = reinterpret_cast<uint8_t*>(malloc(newByteCount));
}
else if (platform.gpuCuda)
{
platform.gpuCuda->hostAlloc(*platform.gpuCudaCtx, reinterpret_cast<void**>(&newCpuData), newByteCount);
}
if (cpuData)
{
size_t copyByteCount = std::min(oldByteCount, newByteCount);
memcpy(newCpuData, cpuData, copyByteCount);
if (!USE_PINNED_MEMORY || !platform.gpuCuda)
{
free(cpuData);
}
else if (platform.gpuCuda)
{
platform.gpuCuda->freeHost(*platform.gpuCudaCtx, cpuData);
}
}
// If type has a constructor, construct any new elements
if (oldByteCount < newByteCount)
{
Type type(typeC);
const uint8_t kScalar = 1;
const uint8_t kArray = 1;
if (type == Type(BaseDataType::eToken, kScalar, kArray))
{
constructInPlace<pxr::TfToken>(newCpuData, oldByteCount, newByteCount);
}
else if (type == Type(BaseDataType::eAsset, kScalar, kArray))
{
constructInPlace<flatcache::AssetPath>(newCpuData, oldByteCount, newByteCount);
}
else if (type == Type(BaseDataType::eConnection, kScalar, kArray))
{
constructInPlace<flatcache::Connection>(newCpuData, oldByteCount, newByteCount);
}
}
cpuData = newCpuData;
capacity = desiredElemCount;
}
}
inline void PathToAttributesMap::enableCpuReadImpl(PathToAttributesMap::MirroredArray& array,
const size_t* elemToArraySize,
PathToAttributesMap::MirroredArray* elemToArrayCpuCapacity,
PathToAttributesMap::MirroredArray* elemToArrayGpuCapacity,
PathToAttributesMap::MirroredArray* gpuArrayDataArray,
bool printWarnings)
{
using omni::gpucompute::MemcpyKind;
bool& usdValid = array.usdValid;
bool& cpuValid = array.cpuValid;
bool& gpuValid = array.gpuValid;
bool& usingCuda = array.gpuAllocedWithCuda;
uint8_t* cpuArray = array.cpuData();
uint8_t*& gpuArray = array.gpuArray;
// If CPU copy is valid, nothing to do
// If GPU copy is valid, copy to CPU
// If USD copy is valid, copy to CPU
if (cpuValid)
{
// Nothing to do
}
else if (!cpuValid && gpuValid)
{
size_t byteCount = array.size();
// Select which API to use
omni::gpucompute::GpuCompute* computeAPI = nullptr;
omni::gpucompute::Context* computeCtx = nullptr;
if (usingCuda)
{
computeAPI = platform.gpuCuda;
computeCtx = platform.gpuCudaCtx;
}
else if (!usingCuda)
{
computeAPI = platform.gpuD3dVk;
computeCtx = platform.gpuD3dVkCtx;
}
const Typeinfo &typeinfo = array.typeinfo;
std::lock_guard<MirroredArray::AttributeMutex> hostDeviceLock(array.attributeMutex);
if (typeinfo.isArray)
{
size_t elemCount = array.count;
uint8_t** elemToArrayCpuData = reinterpret_cast<uint8_t**>(cpuArray);
uint8_t** elemToArrayGpuData = reinterpret_cast<uint8_t**>(gpuArrayDataArray->cpuData());
for (size_t elem = 0; elem != elemCount; elem++)
{
// Make sure that the dest (CPU) buffer is large enough
uint8_t*& cpuData = elemToArrayCpuData[elem]; // dest
const uint8_t* const& gpuData = elemToArrayGpuData[elem]; // src
size_t& destCapacity = reinterpret_cast<size_t*>(elemToArrayCpuCapacity->cpuData())[elem];
size_t desiredElemCount = elemToArraySize[elem];
destructiveResizeIfNecessary(cpuData, destCapacity, desiredElemCount, typeinfo.arrayElemSize);
// Copy from GPU to CPU
size_t copyByteCount = desiredElemCount * typeinfo.arrayElemSize;
if(gpuData)
computeAPI->memcpy(*computeCtx, cpuData, gpuData, copyByteCount, MemcpyKind::deviceToHost);
}
// Don't copy the outer array to CPU, because GPU is not allowed to change outer array
}
else
{
log("array values: from GPU\n");
computeAPI->memcpy(*computeCtx, cpuArray, gpuArray, byteCount, MemcpyKind::deviceToHost);
}
cpuValid = true;
}
else if (!cpuValid && usdValid)
{
// printf("TODO: read data lazily from USD\n");
}
else
{
if (printWarnings)
CARB_LOG_WARN("No source has valid data array=%p usdValid=%i cpuValid=%i gpuValid=%i gpuAllocedWithCuda=%i", &array, array.usdValid, array.cpuValid, array.gpuValid, array.gpuAllocedWithCuda);
}
}
inline void PathToAttributesMap::enableCpuReadIfValid(PathToAttributesMap::MirroredArray& array,
const size_t* elemToArraySize,
PathToAttributesMap::MirroredArray* elemToArrayCpuCapacity,
PathToAttributesMap::MirroredArray* elemToArrayGpuCapacity,
PathToAttributesMap::MirroredArray* gpuArrayDataArray)
{
enableCpuReadImpl(array, elemToArraySize, elemToArrayCpuCapacity, elemToArrayGpuCapacity, gpuArrayDataArray, false);
}
inline void PathToAttributesMap::enableCpuRead(PathToAttributesMap::MirroredArray& array,
const size_t* elemToArraySize,
PathToAttributesMap::MirroredArray* elemToArrayCpuCapacity,
PathToAttributesMap::MirroredArray* elemToArrayGpuCapacity,
PathToAttributesMap::MirroredArray* gpuArrayDataArray)
{
enableCpuReadImpl(array, elemToArraySize, elemToArrayCpuCapacity, elemToArrayGpuCapacity, gpuArrayDataArray, true);
}
inline void PathToAttributesMap::enableCpuWrite(PathToAttributesMap::MirroredArray& array,
const size_t* elemToArraySize,
PathToAttributesMap::MirroredArray* elemToArrayCpuCapacity,
PathToAttributesMap::MirroredArray* elemToArrayGpuCapacity,
PathToAttributesMap::MirroredArray* elemToArrayGpuData)
{
using omni::gpucompute::MemcpyKind;
bool& usdValid = array.usdValid;
bool& cpuValid = array.cpuValid;
bool& gpuValid = array.gpuValid;
const Typeinfo &typeinfo = array.typeinfo;
// Array-valued elements are lazily allocated, meaning they are only
// resized when write access is requested.
// Write access has been requested, so resize if necessary
std::lock_guard<MirroredArray::AttributeMutex> hostDeviceLock(array.attributeMutex);
if (typeinfo.isArray)
{
size_t elemCount = array.count;
uint8_t** elemToArrayCpuData = reinterpret_cast<uint8_t**>(array.cpuData());
CARB_ASSERT(elemToArrayCpuCapacity->cpuValid);
for (size_t elem = 0; elem != elemCount; elem++)
{
uint8_t*& cpuData = elemToArrayCpuData[elem];
size_t& cpuCapacity = reinterpret_cast<size_t*>(elemToArrayCpuCapacity->cpuData())[elem];
size_t desiredElemCount = elemToArraySize[elem];
resizeIfNecessary(cpuData, cpuCapacity, desiredElemCount, typeinfo.arrayElemSize, array.type);
}
}
// New state
usdValid = false;
cpuValid = true;
gpuValid = false;
}
inline ArrayAndDirtyIndices PathToAttributesMap::getArraySpanC(MirroredArray& array,
const AttrName& name,
const ArrayOfArrayInfo& aoa,
BucketImpl& bucketImpl,
const IOConfig& io)
{
const size_t elemCount = bucketImpl.elemToPath.size();
const Typeinfo& typeinfo = array.typeinfo;
const size_t elemSize = typeinfo.size;
log("begin getArrayC\n");
bool isTag = (elemSize == 0);
if (isTag)
{
// If is a tag, then array.data() will be zero, so set special value
// to distinguish from tag absent case
return { SpanC{ (uint8_t*)-1, elemCount, 0 }, {} };
}
// Read enable must come before write enable
if (io.enableRead)
{
(this->*io.enableRead)(array, aoa.arraySizeArray, aoa.arrayCpuCapacityArray, aoa.arrayGpuCapacityArray, aoa.arrayGpuPtrArray);
// If requesting GPU access to array-of-array, additionally
// enable array of GPU pointers for GPU read
if (typeinfo.isArray && io.device == Device::eCudaGPU)
{
(this->*io.enableRead)(*aoa.arrayGpuPtrArray, nullptr, nullptr, nullptr, nullptr);
}
}
if (io.enableWrite)
{
(this->*io.enableWrite)(array, aoa.arraySizeArray, aoa.arrayCpuCapacityArray, aoa.arrayGpuCapacityArray, aoa.arrayGpuPtrArray);
// If requesting GPU access to array-of-array, additionally
// enable array of GPU pointers for GPU _read_
// This is necessary because the pointers may have been
// reallocated on CPU, and the GPU needs to _read_ these new
// pointers
if (typeinfo.isArray && io.device == Device::eCudaGPU)
{
(this->*io.enableRdPtrForWrite)(*aoa.arrayGpuPtrArray, nullptr, nullptr, nullptr, nullptr);
}
}
// If CPU pointer requested
// return CPU pointer
// If GPU pointer requested and not array of array
// return GPU pointer
// If GPU pointer requested and array of array
// return GPU pointer to GPU pointer array
uint8_t* retPtr = nullptr;
if (io.device == Device::eCPU)
{
retPtr = array.cpuData();
}
else if (io.device == Device::eCudaGPU && !typeinfo.isArray)
{
retPtr = array.gpuArray;
}
else if (io.device == Device::eCudaGPU && typeinfo.isArray && io.ptrToPtrKind == PtrToPtrKind::eGpuPtrToGpuPtr)
{
retPtr = aoa.arrayGpuPtrArray->gpuArray;
}
else if (io.device == Device::eCudaGPU && typeinfo.isArray && io.ptrToPtrKind == PtrToPtrKind::eCpuPtrToGpuPtr)
{
retPtr = aoa.arrayGpuPtrArray->cpuData();
}
else if (io.device == Device::eD3dVkGPU && !typeinfo.isArray)
{
retPtr = array.gpuArray;
}
else if (io.device == Device::eD3dVkGPU && typeinfo.isArray)
{
retPtr = aoa.arrayGpuPtrArray->cpuData();
}
// If enabling write,
// for each enabled listener listening to this attribute
// if changedIndices exists
// add to vector
// else
// create changedIndices and add to vector
// return vector
// else
// return empty vector
std::vector<ChangedIndicesImpl*> changedIndicesForEachListener;
changedIndicesForEachListener.reserve(listenerIdToChangeTrackerConfig.size());
if (io.enableWrite)
{
// optimization because the cost to create attrNameAndType is non-trivial,
// but they are loop invariant so we should try to only do it once.
bool costlyInvariantsInitialized = false;
AttrNameAndType *const attrNameAndType = (AttrNameAndType*)alloca(sizeof(AttrNameAndType)); // stack-allocate here for scope, but lazily-initialize below
listenerIdToChangeTrackerConfig.forEach([this, &bucketImpl, &costlyInvariantsInitialized, &name, &attrNameAndType, &array, &elemCount, &changedIndicesForEachListener](ListenerId& listenerId, ChangeTrackerConfig& config) {
// Create listener if it doesn't exist in bucket
Changes* changes;
if (bucketImpl.listenerIdToChanges.allocateEntry(listenerId, &changes))
{
new (changes) Changes();
}
if (config.changeTrackingEnabled && config.attrNamesToLog.contains(name.name))
{
if (!costlyInvariantsInitialized)
{
new (attrNameAndType) AttrNameAndType(Type(array.type), name.name, name.suffix);
costlyInvariantsInitialized = true;
}
auto iter = changes->changedAttributes.find(*attrNameAndType);
bool foundChangedIndices = (iter != changes->changedAttributes.end());
if (!foundChangedIndices)
{
// TODO: move this into a new ordered_map class
auto& keys = changes->changedAttributes.v;
auto& values = changes->changedIndices;
auto insertIter = lower_bound(keys.begin(), keys.end(), *attrNameAndType);
ptrdiff_t insertIndex = insertIter - keys.begin();
keys.insert(insertIter, *attrNameAndType);
values.insert(values.begin() + insertIndex, ChangedIndicesImpl(elemCount));
changedIndicesForEachListener.push_back(&values[insertIndex]);
}
else
{
ptrdiff_t attrIndex = iter - changes->changedAttributes.begin();
changedIndicesForEachListener.push_back(&changes->changedIndices[attrIndex]);
}
}
});
}
return { SpanC{ retPtr, elemCount, typeinfo.size }, changedIndicesForEachListener };
}
inline ArrayAndDirtyIndices PathToAttributesMap::getArraySpanC(BucketId bucketId,
TokenC attrName,
const IOConfig &io,
NameSuffix suffix)
{
BucketImpl *const bucketImpl = buckets.find(bucketId);
if (!bucketImpl)
{
return { SpanC{ nullptr, 0, 0 }, {} };
}
const AttrName name{ attrName, suffix };
{
ScalarAttributeArray *array;
if (bucketImpl->scalarAttributeArrays.find(name, &array))
{
const ArrayOfArrayInfo aoa = ScalarArrayOfArrayInfo();
return getArraySpanC(*array, name, aoa, *bucketImpl, io);
}
}
{
ArrayAttributeArray *array;
if (bucketImpl->arrayAttributeArrays.find(name, &array))
{
const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*array);
return getArraySpanC(array->values, name, aoa, *bucketImpl, io);
}
}
return { SpanC{ nullptr, 0, 0 }, {} };
}
template <typename T>
inline const T* PathToAttributesMap::getArrayRd(const Bucket& bucket, const TokenC& attrName)
{
APILOGGER("getArrayRd", apiLogEnabled, attrName);
// TODO: check that T is the correct type
return reinterpret_cast<const T*>(getArrayRdC(bucket, attrName, NameSuffix::none));
}
template <typename T>
inline const T* PathToAttributesMap::getArrayRd(BucketId bucketId, const TokenC& attrName)
{
APILOGGER("getArrayRd", apiLogEnabled, attrName);
// TODO: check that T is the correct type
return reinterpret_cast<const T*>(getArrayRdC(bucketId, attrName, NameSuffix::none).ptr);
}
template <typename T>
inline T* PathToAttributesMap::getArrayWr(const Bucket& bucket, const TokenC& attrName)
{
APILOGGER("getArrayWr", apiLogEnabled, attrName);
// TODO: check that T is the correct type
return reinterpret_cast<T*>(getArrayWrC(bucket, attrName, NameSuffix::none));
}
template <typename T>
inline T* PathToAttributesMap::getArray(const Bucket& bucket, const TokenC& attrName)
{
APILOGGER("getArray", apiLogEnabled, attrName);
// TODO: check that T is the correct type
return reinterpret_cast<T*>(getArrayC(bucket, attrName, NameSuffix::none));
}
template <typename T>
inline std::vector<const T*> PathToAttributesMap::getArraysRd(const std::vector<Bucket>& buckets, const TokenC& attrName)
{
size_t bucketCount = buckets.size();
std::vector<const T*> retval(bucketCount);
for (size_t i = 0; i != bucketCount; i++)
{
retval[i] = getArrayRd<T>(buckets[i], attrName);
}
return retval;
}
template <typename T>
inline std::vector<T*> PathToAttributesMap::getArraysWr(const std::vector<Bucket>& buckets, const TokenC& attrName)
{
size_t bucketCount = buckets.size();
std::vector<const T*> retval(bucketCount);
for (size_t i = 0; i != bucketCount; i++)
{
retval[i] = getArrayWr<T>(buckets[i], attrName);
}
return retval;
}
template <typename T>
inline std::vector<T*> PathToAttributesMap::getArrays(const std::vector<Bucket>& buckets, const TokenC& attrName)
{
size_t bucketCount = buckets.size();
std::vector<T*> retval(bucketCount);
for (size_t i = 0; i != bucketCount; i++)
{
retval[i] = getArray<T>(buckets[i], attrName);
}
return retval;
}
inline BucketId PathToAttributesMap::findBucketId(const Bucket& bucket)
{
auto iter = attrNameSetToBucketId.find(bucket);
bool found = iter != attrNameSetToBucketId.end();
if (!found)
return { kInvalidBucketId };
return iter->second;
}
inline ConstSpanC PathToAttributesMap::getArraySpanRdC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix)
{
APILOGGER("getArraySpanRdC", apiLogEnabled, attrName);
// Get read-only CPU access
BucketId bucketId = findBucketId(bucket);
if (bucketId == kInvalidBucketId)
return { nullptr, 0, 0 };
const ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(bucketId, attrName, CpuReadConfig(), suffix);
// We don't set dirty indices here because this method gives read-only access
return arrayAndDirtyIndices.array;
}
inline const void* PathToAttributesMap::getArrayRdC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix)
{
APILOGGER("getArrayRdC", apiLogEnabled, attrName);
// Get read-only CPU access
BucketId bucketId = findBucketId(bucket);
if (bucketId == kInvalidBucketId)
return nullptr;
const ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(bucketId, attrName, CpuReadConfig(), suffix);
// We don't set dirty indices here because this method gives read-only access
return arrayAndDirtyIndices.array.ptr;
}
inline ConstSpanC PathToAttributesMap::getArrayRdC(BucketId bucketId, const TokenC& attrName, NameSuffix suffix)
{
APILOGGER("getArrayRdC", apiLogEnabled, attrName);
// Get read-only CPU access
const ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(bucketId, attrName, CpuReadConfig(), suffix);
// We don't set dirty indices here because this method gives read-only access
return arrayAndDirtyIndices.array;
}
inline SpanC PathToAttributesMap::getArraySpanWrC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix)
{
APILOGGER("getArraySpanWrC", apiLogEnabled, attrName);
BucketId bucketId = findBucketId(bucket);
if (bucketId == kInvalidBucketId)
return { nullptr, 0, 0 };
ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(bucketId, attrName, CpuWriteConfig(), suffix);
setArrayDirty(arrayAndDirtyIndices);
return arrayAndDirtyIndices.array;
}
inline void* PathToAttributesMap::getArrayWrC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix)
{
APILOGGER("getArrayWrC", apiLogEnabled, attrName);
BucketId bucketId = findBucketId(bucket);
if (bucketId == kInvalidBucketId)
return nullptr;
ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(bucketId, attrName, CpuWriteConfig(), suffix);
setArrayDirty(arrayAndDirtyIndices);
return arrayAndDirtyIndices.array.ptr;
}
inline SpanC PathToAttributesMap::getArrayWrC(BucketId bucketId, const TokenC& attrName, NameSuffix suffix)
{
APILOGGER("getArrayWrC", apiLogEnabled, attrName);
ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(bucketId, attrName, CpuWriteConfig(), suffix);
setArrayDirty(arrayAndDirtyIndices);
return arrayAndDirtyIndices.array;
}
inline SpanC PathToAttributesMap::getOrCreateArrayWrC(
BucketId bucketId, const TokenC& attrName, TypeC type, NameSuffix suffix)
{
APILOGGER("getOrCreateArrayWrC", apiLogEnabled, attrName);
auto bucketImpl = buckets.find(bucketId);
if (!bucketImpl)
{
return SpanC{ nullptr, 0, 0 };
}
const AttrName name{ attrName, suffix };
ArrayOfArrayInfo aoa;
MirroredArray* array = nullptr;
const Typeinfo& typeinfo = getTypeInfo(type);
if (typeinfo.isArray)
{
ArrayAttributeArray *arrayAttributeArray;
if (!bucketImpl->arrayAttributeArrays.find(name, &arrayAttributeArray))
{
Bucket bucket = getNamesAndTypes(bucketId);
bucketImpl = &addAttributeC(*bucketImpl, bucket, attrName, type);
const bool found = bucketImpl->arrayAttributeArrays.find({ attrName, NameSuffix::none }, &arrayAttributeArray);
CARB_ASSERT(found);
CARB_UNUSED(found);
array = &arrayAttributeArray->values;
aoa = getArrayOfArrayInfo(*arrayAttributeArray);
}
else
{
array = &arrayAttributeArray->values;
aoa = getArrayOfArrayInfo(*arrayAttributeArray);
}
}
else
{
ScalarAttributeArray *scalarAttributeArray;
if (!bucketImpl->scalarAttributeArrays.find(name, &scalarAttributeArray))
{
Bucket bucket = getNamesAndTypes(bucketId);
bucketImpl = &addAttributeC(*bucketImpl, bucket, attrName, type);
const bool found = bucketImpl->scalarAttributeArrays.find({ attrName, NameSuffix::none }, &scalarAttributeArray);
CARB_ASSERT(found);
CARB_UNUSED(found);
array = scalarAttributeArray;
aoa = ScalarArrayOfArrayInfo();
}
else
{
array = scalarAttributeArray;
aoa = ScalarArrayOfArrayInfo();
}
}
CARB_ASSERT(type == array->type);
ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(*array, name, aoa, *bucketImpl, CpuWriteConfig());
setArrayDirty(arrayAndDirtyIndices);
return arrayAndDirtyIndices.array;
}
inline SpanC PathToAttributesMap::getArraySpanC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix)
{
APILOGGER("getArraySpanC", apiLogEnabled, attrName);
BucketId bucketId = findBucketId(bucket);
if (bucketId == kInvalidBucketId)
return { nullptr, 0, 0 };
ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(bucketId, attrName, CpuReadWriteConfig(), suffix);
setArrayDirty(arrayAndDirtyIndices);
return arrayAndDirtyIndices.array;
}
inline void* PathToAttributesMap::getArrayC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix)
{
APILOGGER("getArrayC", apiLogEnabled, attrName);
BucketId bucketId = findBucketId(bucket);
if (bucketId == kInvalidBucketId)
return nullptr;
ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(bucketId, attrName, CpuReadWriteConfig(), suffix);
setArrayDirty(arrayAndDirtyIndices);
return arrayAndDirtyIndices.array.ptr;
}
inline void PathToAttributesMap::setArrayDirty(ArrayAndDirtyIndices& array)
{
for (ChangedIndicesImpl* listener : array.changedIndicesForEachListener)
{
listener->dirtyAll();
}
}
inline void PathToAttributesMap::setArrayElementDirty(ArrayAndDirtyIndices& array, size_t elemIndex)
{
for (ChangedIndicesImpl* listener : array.changedIndicesForEachListener)
{
listener->insert(elemIndex, array.array.elementCount);
}
}
inline SpanC PathToAttributesMap::getArrayC(BucketId bucketId, const TokenC& attrName, NameSuffix suffix)
{
APILOGGER("getArrayC", apiLogEnabled, attrName);
ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(bucketId, attrName, CpuReadWriteConfig(), suffix);
setArrayDirty(arrayAndDirtyIndices);
return arrayAndDirtyIndices.array;
}
inline const PathC* PathToAttributesMap::getPathArray(const Bucket& bucket) const
{
auto iter = attrNameSetToBucketId.find(bucket);
bool found = (iter != attrNameSetToBucketId.end());
if (found)
{
BucketId bucketId = iter->second;
const auto implPtr = buckets.find(bucketId);
if (implPtr)
{
return reinterpret_cast<const PathC*>(implPtr->elemToPath.data());
}
else
{
CARB_LOG_WARN_ONCE("Found in attrNameSetToBucketId, but didn't find BucketId %zu in buckets\n", size_t(bucketId));
}
}
CARB_LOG_ERROR("getPathArray: Bucket not found");
printBucket(bucket);
std::cout << "\n";
CARB_LOG_INFO("Bucket list:");
printBucketNamesAndTypes();
return nullptr;
}
inline ConstPathCSpan PathToAttributesMap::getPathArray(BucketId bucketId) const
{
const auto implPtr = buckets.find(bucketId);
if (implPtr)
{
const BucketImpl& bucketImpl = *implPtr;
return { reinterpret_cast<const Path*>(bucketImpl.elemToPath.data()), bucketImpl.elemToPath.size() };
}
else
{
CARB_LOG_WARN_ONCE("Found in attrNameSetToBucketId, but didn't find BucketId %zu in buckets\n", size_t(bucketId));
}
return { nullptr, 0 };
}
inline Bucket PathToAttributesMap::getNamesAndTypes(BucketId bucketId) const
{
auto implPtr = buckets.find(bucketId);
if (implPtr)
{
const BucketImpl& bucketImpl = *implPtr;
size_t maxCount = bucketImpl.scalarAttributeArrays.size();
set<flatcache::AttrNameAndType> bucket;
bucket.reserve(maxCount);
bucketImpl.scalarAttributeArrays.forEach([&bucket](const AttrName& name, const ScalarAttributeArray& array) {
const TypeC& type = array.type;
if (name.suffix == NameSuffix::none || name.suffix == NameSuffix::connection)
{
AttrNameAndType attrNameAndType;
attrNameAndType.type = carb::flatcache::Type(type);
attrNameAndType.name = name.name;
attrNameAndType.suffix = name.suffix;
bucket.insert(attrNameAndType);
}
});
bucketImpl.arrayAttributeArrays.forEach([&bucket](const AttrName& name, const ArrayAttributeArray& array) {
const TypeC& type = array.values.type;
if (name.suffix == NameSuffix::none || name.suffix == NameSuffix::connection)
{
AttrNameAndType attrNameAndType;
attrNameAndType.type = Type(type);
attrNameAndType.name = name.name;
attrNameAndType.suffix = name.suffix;
bucket.insert(attrNameAndType);
}
});
return bucket;
}
else
{
CARB_LOG_ERROR_ONCE("getNamesAndTypes, bucketId %zu not found\n", size_t(bucketId));
return set<AttrNameAndType>();
}
}
inline void PathToAttributesMap::checkInvariants()
{
for (auto& bucketAndId : attrNameSetToBucketId)
{
const Bucket& correctBucket = bucketAndId.first;
BucketId bucketId = bucketAndId.second;
Bucket candidateBucket = getNamesAndTypes(bucketId);
if (candidateBucket.size() != correctBucket.size())
{
CARB_BREAK_POINT();
}
for (size_t i = 0; i != candidateBucket.size(); i++)
{
const AttrNameAndType& candidateNameAndType = candidateBucket.v[i];
const AttrNameAndType& correctNameAndType = correctBucket.v[i];
if (!(candidateNameAndType == correctNameAndType))
{
std::stringstream ss;
ss << "Candidate: " << Type(candidateNameAndType.type) << " "
<< Token(candidateNameAndType.name).getText() << toString(candidateNameAndType.suffix) << " "
<< " Correct: " << Type(correctNameAndType.type)
<< " " << Token(correctNameAndType.name).getText() << toString(correctNameAndType.suffix) << " "
<< "\n";
CARB_LOG_ERROR("%s", ss.str().c_str());
CARB_BREAK_POINT();
}
}
}
}
inline std::pair<bool, std::vector<AttrNameAndType>::const_iterator> findAttrNameAndType(const Bucket& bucket,
const TokenC& attrName)
{
#if 0
// Do O(log n) search of bucket for attrName, ignoring type
auto cmp = [](const AttrNameAndType& a, AttrNameAndTime b) {
return a.name < b;
};
auto i = lower_bound(bucket.begin(), bucket.end(), attrName, cmp);
// There can be multiple elements with same attrName, so check them all
while (i != bucket.end() && i->name == attrName && i->suffix != NameSuffix::none)
i++;
// At this point i is either at the end, or at the end of the elements with attrName, or pointing to an element with
// suffix==none
// If didn't get to the end, and didn't get to the end of the elements with attrName, then must be pointing to
// attrName with suffix==none
bool found = (i != bucket.end() && i->name == attrName);
return found ? (i->tfType) : (pxr::TfType());
#else
// Until we fix the order of the fields in the tuple to make equal attrNames contiguous, do a linear search
auto i = bucket.begin();
while (i != bucket.end() && !(i->name == attrName && i->suffix == NameSuffix::none))
i++;
bool found = (i != bucket.end());
return make_pair(found, i);
#endif
}
inline TypeC PathToAttributesMap::getType(const Bucket& bucket, const TokenC& attrName) const
{
APILOGGER("getType", apiLogEnabled, attrName);
std::vector<AttrNameAndType>::const_iterator pAttrNameAndType;
bool found;
std::tie(found, pAttrNameAndType) = findAttrNameAndType(bucket, attrName);
return found ? TypeC(pAttrNameAndType->type) : TypeC();
}
inline void PathToAttributesMap::addPath(const PathC& path, const Bucket& destBucket)
{
std::pair<BucketId, ArrayIndex> *pathAndBucketElem;
if (pathToBucketElem.allocateEntry(path, &pathAndBucketElem))
{
auto bucketIdAndImpl = findOrCreateBucket(destBucket);
BucketId bucketId = bucketIdAndImpl.first;
BucketImpl& bucketImpl = bucketIdAndImpl.second;
bucketImpl.SetBucket(destBucket);
size_t endElement = allocElement(bucketImpl);
*pathAndBucketElem = std::make_pair(bucketId, endElement);
bucketImpl.elemToPath[endElement] = { toSdfPath(path) };
}
else
{
auto iter = attrNameSetToBucketId.find(destBucket);
bool destBucketExists = (iter != attrNameSetToBucketId.end());
BucketId destBucketId = destBucketExists ? iter->second : kInvalidBucketId;
BucketId currentBucketId = pathAndBucketElem->first;
bool destBucketSpecified = (destBucket.size() != 0);
if (!destBucketSpecified || (destBucketExists && destBucketId == currentBucketId))
{
// If the dest bucket is not specified, or if already in the right
// bucket, then leave path in current bucket
return;
}
else if (destBucketSpecified && (destBucketId != currentBucketId))
{
moveElementBetweenBuckets(path, destBucketId, currentBucketId, destBucket);
}
}
}
// renames a path in a bucket
inline void PathToAttributesMap::renamePath(const PathC& oldPath, const PathC& newPath)
{
// TODO: should this early exit if oldPath == newPath?
std::pair<BucketId, ArrayIndex> *oldPathAndBucketElem;
if (pathToBucketElem.find(oldPath, &oldPathAndBucketElem))
{
BucketImpl* bucketImplPtr = buckets.find(oldPathAndBucketElem->first);
bucketImplPtr->elemToPath[oldPathAndBucketElem->second] = toSdfPath(newPath);
std::pair<BucketId, ArrayIndex> *newPathAndBucketElem;
pathToBucketElem.allocateEntry(newPath, &newPathAndBucketElem);
*newPathAndBucketElem = std::move(*oldPathAndBucketElem);
pathToBucketElem.freeEntry(oldPath);
}
else
{
CARB_LOG_WARN_ONCE("PathToAttributesMap::renamePath(%s,%s) - cannot find bucket to rename\n",
Path(oldPath).getText(), Path(newPath).getText());
return;
}
}
// present - Whether this path has a bucket
// bucket - Pointer to bucket if it does
// element - Index corresponding to path in this bucket's arrays
inline std::tuple<bool, BucketId, size_t> PathToAttributesMap::getPresentAndBucketAndElement(const PathC& path) const
{
const std::pair<BucketId, ArrayIndex>* bucketElem;
if (!pathToBucketElem.find(path, &bucketElem))
{
return { false, kInvalidBucketId, 0 };
}
return { true, bucketElem->first, bucketElem->second };
}
inline BucketId PathToAttributesMap::getBucketId(const PathC& path) const
{
std::tuple<bool, flatcache::BucketId, size_t> presentAndBucketAndElement = getPresentAndBucketAndElement(path);
bool present = std::get<0>(presentAndBucketAndElement);
if (!present)
return flatcache::kInvalidBucketId;
return std::get<1>(presentAndBucketAndElement);
}
inline SpanC PathToAttributesMap::getArrayElementPtr(SpanC array, size_t bucketElement) const
{
if (array.ptr == nullptr)
return { nullptr, 0, 0 };
size_t elemSize = array.elementSize;
return { array.ptr + bucketElement * elemSize, 1, elemSize };
}
inline ConstSpanC PathToAttributesMap::getArrayElementPtr(ConstSpanC array, size_t bucketElement) const
{
if (array.ptr == nullptr)
return { nullptr, 0, 0 };
size_t elemSize = array.elementSize;
return { array.ptr + bucketElement * elemSize, 1, elemSize };
}
inline SpanC PathToAttributesMap::getAttributeC(const PathC& path, const TokenC& attrName, NameSuffix suffix)
{
APILOGGER("getAttributeC", apiLogEnabled, attrName);
bool present; // Whether this path has a bucket
BucketId bucketId; // Pointer to the bucket if it does
size_t element; // Index corresponding to path in this bucket's arrays
std::tie(present, bucketId, element) = getPresentAndBucketAndElement(path);
if (!present)
return { nullptr, 0, 0 };
ArrayAndDirtyIndices arrayAndchangedIndices = getArraySpanC(bucketId, attrName, CpuReadWriteConfig(), suffix);
setArrayElementDirty(arrayAndchangedIndices, element);
SpanC array = arrayAndchangedIndices.array;
return getArrayElementPtr(array, element);
}
inline ConstSpanC PathToAttributesMap::getAttributeRdC(const PathC& path, const TokenC& attrName, NameSuffix suffix)
{
APILOGGER("getAttributeRdC", apiLogEnabled, attrName);
bool present;
BucketId bucketId;
size_t element;
std::tie(present, bucketId, element) = getPresentAndBucketAndElement(path);
if (!present)
return { nullptr, 0, 0 };
const ConstSpanC array = getArraySpanC(bucketId, attrName, CpuReadConfig(), suffix).array;
// We don't set dirty indices here because this method gives read-only access
return getArrayElementPtr(array, element);
}
inline SpanC PathToAttributesMap::getAttributeWrC(const PathC& path, const TokenC& attrName, NameSuffix suffix)
{
APILOGGER("getAttributeWrC", apiLogEnabled, path, attrName);
bool present;
BucketId bucketId;
size_t element;
std::tie(present, bucketId, element) = getPresentAndBucketAndElement(path);
if (!present)
return { nullptr, 0, 0 };
// Writing an element is a RMW on the whole array, so get read/write CPU access
ArrayAndDirtyIndices arrayAndchangedIndices = getArraySpanC(bucketId, attrName, CpuReadIfValidWriteConfig(), suffix);
setArrayElementDirty(arrayAndchangedIndices, element);
SpanC array = arrayAndchangedIndices.array;
return getArrayElementPtr(array, element);
}
inline SpanC PathToAttributesMap::setArrayAttributeSizeAndGet(PathC path, const TokenC& attrName, size_t newSize)
{
bool present; // Whether this path has a bucket
BucketId bucketId; // Pointer to the bucket if it does
size_t elementIndex; // Index corresponding to path in this bucket's arrays
std::tie(present, bucketId, elementIndex) = getPresentAndBucketAndElement(path);
if (!present)
return { nullptr, 0, 0 };
return setArrayAttributeSizeAndGet(bucketId, elementIndex, attrName, newSize);
}
inline SpanC PathToAttributesMap::setArrayAttributeSizeAndGet(
BucketId bucketId, size_t elementIndex, const TokenC& attrName, size_t newSize)
{
APILOGGER("setArrayAttributeSizeAndGet", apiLogEnabled, attrName);
// TODO: remove double hash lookup here
ArrayAndDirtyIndices sizeArray;
{
BucketImpl* bucketImpl;
ArrayAttributeArray *arrayAttributeArray;
if (findArrayAttributeArrayForBucketId(bucketId, attrName, bucketImpl, arrayAttributeArray))
{
const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray);
sizeArray = getArraySpanC(arrayAttributeArray->elemCounts, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CpuReadWriteConfig());
}
else
{
sizeArray = { 0 };
}
}
if (sizeArray.array.elementCount <= elementIndex)
return { nullptr, 0, 0 };
// Set the size
size_t* sizePtr = reinterpret_cast<size_t*>(getArrayElementPtr(sizeArray.array, elementIndex).ptr);
if (!sizePtr)
return { nullptr, 0, 0 };
*sizePtr = newSize;
// TODO: does this need to be moved higher next to getArraySpanC above?
setArrayElementDirty(sizeArray, elementIndex);
// Get the new array-valued element
ArrayAndDirtyIndices arrayAndchangedIndices = getArraySpanC(bucketId, attrName, CpuReadWriteConfig(), NameSuffix::none);
setArrayElementDirty(arrayAndchangedIndices, elementIndex);
SpanC array = arrayAndchangedIndices.array;
uint8_t** arrayData = reinterpret_cast<uint8_t**>(getArrayElementPtr(array, elementIndex).ptr);
if (!arrayData)
return { nullptr, 0, 0 };
return { *arrayData, newSize, 0 };
}
template <typename T>
T* PathToAttributesMap::getAttribute(const PathC& path, const TokenC& attrName)
{
APILOGGER("getAttribute", apiLogEnabled, path, attrName);
// TODO: check that T is the correct type
return reinterpret_cast<T*>(getAttributeC(path, attrName, NameSuffix::none).ptr);
}
template <typename T>
const T* PathToAttributesMap::getAttributeRd(const PathC& path, const TokenC& attrName)
{
APILOGGER("getAttributeRd", apiLogEnabled, path, attrName);
// TODO: check that T is the correct type
return reinterpret_cast<const T*>(getAttributeRdC(path, attrName, NameSuffix::none).ptr);
}
template <typename T>
T* PathToAttributesMap::getAttributeWr(const PathC& path, const TokenC& attrName)
{
APILOGGER("getAttributeWr", apiLogEnabled, path, attrName);
// TODO: check that T is the correct type
return reinterpret_cast<T*>(getAttributeWrC(path, attrName, NameSuffix::none).ptr);
}
template <typename T>
T* PathToAttributesMap::getAttributeWr(const PathC& path, const TokenC& attrName, NameSuffix suffix)
{
APILOGGER("getAttributeWr", apiLogEnabled, path, attrName);
// TODO: check that T is the correct type
return reinterpret_cast<T*>(getAttributeWrC(path, attrName, suffix).ptr);
}
inline ValidMirrors PathToAttributesMap::getAttributeValidBits(const PathC& path,
const TokenC& attrName,
ArrayAttributeArray::MirroredArrays subArray) const
{
APILOGGER("getAttributeValidBits", apiLogEnabled, path, attrName);
bool present; // Whether this path has a bucket
BucketId bucketId; // Pointer to the bucket if it does
size_t element; // Index corresponding to path in this bucket's arrays
std::tie(present, bucketId, element) = getPresentAndBucketAndElement(path);
if (!present)
return ValidMirrors::eNone;
const BucketImpl* bucketImplPtr = buckets.find(bucketId);
if (!bucketImplPtr)
return ValidMirrors::eNone;
const MirroredArray *array = nullptr;
const ScalarAttributeArray *scalarAttributeArray;
const AttrName name{ attrName, NameSuffix::none };
if (bucketImplPtr->scalarAttributeArrays.find(name, &scalarAttributeArray))
{
array = scalarAttributeArray;
}
else
{
const ArrayAttributeArray *arrayAttributeArray;
if (bucketImplPtr->arrayAttributeArrays.find(name, &arrayAttributeArray))
{
// This if statement is only needed because we ported OM-70434 fix from 105 to 104.2
// In 105, these mirrored arrays are stored in an array, indexed by subarray
if (subArray == ArrayAttributeArray::MirroredArrays::Values)
{
array = &arrayAttributeArray->values;
}
else if (subArray == ArrayAttributeArray::MirroredArrays::ElemCounts)
{
array = &arrayAttributeArray->elemCounts;
}
else if (subArray == ArrayAttributeArray::MirroredArrays::CpuElemCounts)
{
array = &arrayAttributeArray->cpuElemCounts;
}
else if (subArray == ArrayAttributeArray::MirroredArrays::GpuElemCounts)
{
array = &arrayAttributeArray->gpuElemCounts;
}
else if (subArray == ArrayAttributeArray::MirroredArrays::GpuPtrs)
{
array = &arrayAttributeArray->gpuPtrs;
}
}
else
{
return ValidMirrors::eNone;
}
}
const size_t elemSize = array->typeinfo.size;
const bool isTag = (elemSize == 0);
if (isTag)
return ValidMirrors::eNone;
ValidMirrors retval = ValidMirrors::eNone;
if (array->cpuValid)
retval = retval | ValidMirrors::eCPU;
if (array->gpuValid && array->gpuAllocedWithCuda)
retval = retval | ValidMirrors::eCudaGPU;
if (array->gpuValid && !array->gpuAllocedWithCuda)
retval = retval | ValidMirrors::eGfxGPU;
return retval;
}
inline bool PathToAttributesMap::findArrayAttributeArrayForPath(const PathC& path, const TokenC& attrName, size_t& outElementIndex, BucketImpl*& outBucketImpl, ArrayAttributeArray*& outArrayAttributeArray)
{
BucketId bucketId;
bool found;
std::tie(found, bucketId, outElementIndex) = getPresentAndBucketAndElement(path);
if (found)
{
outBucketImpl = buckets.find(bucketId);
if (outBucketImpl)
{
ArrayAttributeArray* array;
if (outBucketImpl->arrayAttributeArrays.find({ attrName, NameSuffix::none }, &array))
{
outArrayAttributeArray = array;
return true;
}
}
}
CARB_LOG_WARN_ONCE("Warning: %s not found\n", toTfToken(attrName).GetText());
return false;
}
inline bool PathToAttributesMap::findArrayAttributeArrayForBucketId(const BucketId bucketId, const TokenC& attrName, BucketImpl*& outBucketImpl, ArrayAttributeArray*& outArrayAttributeArray)
{
if (bucketId != kInvalidBucketId)
{
outBucketImpl = buckets.find(bucketId);
if (outBucketImpl)
{
ArrayAttributeArray* array;
if (outBucketImpl->arrayAttributeArrays.find({ attrName, NameSuffix::none }, &array))
{
outArrayAttributeArray = array;
return true;
}
}
}
CARB_LOG_WARN_ONCE("Warning: %s not found\n", toTfToken(attrName).GetText());
return false;
}
inline size_t* PathToAttributesMap::getArrayAttributeSize(const PathC& path, const TokenC& attrName)
{
APILOGGER("getArrayAttributeSize", apiLogEnabled, path, attrName);
size_t element;
BucketImpl* bucketImpl;
ArrayAttributeArray *arrayAttributeArray;
if (findArrayAttributeArrayForPath(path, attrName, element, bucketImpl, arrayAttributeArray))
{
const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray);
ArrayAndDirtyIndices arrayAndchangedIndices = getArraySpanC(arrayAttributeArray->elemCounts, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CpuReadWriteConfig());
setArrayElementDirty(arrayAndchangedIndices, element);
const SpanC array = arrayAndchangedIndices.array;
return reinterpret_cast< size_t*>(getArrayElementPtr(array, element).ptr);
}
return nullptr;
}
inline const size_t* PathToAttributesMap::getArrayAttributeSizeRd(const PathC& path, const TokenC& attrName)
{
APILOGGER("getArrayAttributeSizeRd", apiLogEnabled, path, attrName);
size_t element;
BucketImpl* bucketImpl;
ArrayAttributeArray *arrayAttributeArray;
if (findArrayAttributeArrayForPath(path, attrName, element, bucketImpl, arrayAttributeArray))
{
const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray);
const ArrayAndDirtyIndices arrayAndchangedIndices = getArraySpanC(arrayAttributeArray->elemCounts, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CpuReadConfig());
const ConstSpanC array = arrayAndchangedIndices.array;
return reinterpret_cast<const size_t*>(getArrayElementPtr(array, element).ptr);
}
return nullptr;
}
inline size_t* PathToAttributesMap::getArrayAttributeSizeWr(const PathC& path, const TokenC& attrName)
{
APILOGGER("getArrayAttributeSizeWr", apiLogEnabled, path, attrName);
size_t element;
BucketImpl* bucketImpl;
ArrayAttributeArray *arrayAttributeArray;
if (findArrayAttributeArrayForPath(path, attrName, element, bucketImpl, arrayAttributeArray))
{
const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray);
ArrayAndDirtyIndices arrayAndchangedIndices = getArraySpanC(arrayAttributeArray->elemCounts, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CpuReadIfValidWriteConfig());
setArrayElementDirty(arrayAndchangedIndices, element);
const SpanC array = arrayAndchangedIndices.array;
return reinterpret_cast<size_t*>(getArrayElementPtr(array, element).ptr);
}
return nullptr;
}
inline const size_t* PathToAttributesMap::getArrayAttributeSizeRdGpu(const PathC& path, const TokenC& attrName)
{
APILOGGER("getArrayAttributeSizeRdGpu", apiLogEnabled, path, attrName);
size_t element;
BucketImpl* bucketImpl;
ArrayAttributeArray *arrayAttributeArray;
if (findArrayAttributeArrayForPath(path, attrName, element, bucketImpl, arrayAttributeArray))
{
const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray);
const ArrayAndDirtyIndices arrayAndchangedIndices = getArraySpanC(arrayAttributeArray->gpuElemCounts, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CudaReadConfig());
const ConstSpanC array = arrayAndchangedIndices.array;
return reinterpret_cast<const size_t*>(getArrayElementPtr(array, element).ptr);
}
return nullptr;
}
inline const size_t* PathToAttributesMap::getArrayAttributeSizesRdGpu(const Bucket& bucket, const TokenC& attrName)
{
APILOGGER("getArrayAttributeSizesRdGpu", apiLogEnabled, attrName);
BucketImpl* bucketImpl;
ArrayAttributeArray *arrayAttributeArray;
const BucketId bucketId = findBucketId(bucket);
if (findArrayAttributeArrayForBucketId(bucketId, attrName, bucketImpl, arrayAttributeArray))
{
const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray);
const ArrayAndDirtyIndices arrayAndchangedIndices = getArraySpanC(arrayAttributeArray->gpuElemCounts, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CudaReadConfig());
const ConstSpanC array = arrayAndchangedIndices.array;
return reinterpret_cast<const size_t*>(array.ptr);
}
return nullptr;
}
inline ConstSpanSizeC PathToAttributesMap::getArrayAttributeSizesRdGpu(BucketId bucketId, const TokenC& attrName)
{
APILOGGER("getArrayAttributeSizesRdGpu", apiLogEnabled, attrName);
BucketImpl* bucketImpl;
ArrayAttributeArray *arrayAttributeArray;
if (findArrayAttributeArrayForBucketId(bucketId, attrName, bucketImpl, arrayAttributeArray))
{
const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray);
const ArrayAndDirtyIndices arrayAndchangedIndices = getArraySpanC(arrayAttributeArray->gpuElemCounts, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CudaReadConfig());
const ConstSpanC array = arrayAndchangedIndices.array;
return { reinterpret_cast<const size_t*>(array.ptr), array.elementCount };
}
return { nullptr, 0 };
}
inline size_t* PathToAttributesMap::getArrayAttributeSizes(const Bucket& bucket, const TokenC& attrName)
{
APILOGGER("getArrayAttributeSizes", apiLogEnabled, attrName);
BucketImpl* bucketImpl;
ArrayAttributeArray *arrayAttributeArray;
const BucketId bucketId = findBucketId(bucket);
if (findArrayAttributeArrayForBucketId(bucketId, attrName, bucketImpl, arrayAttributeArray))
{
const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray);
ArrayAndDirtyIndices arrayAndchangedIndices = getArraySpanC(arrayAttributeArray->elemCounts, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CpuReadWriteConfig());
setArrayDirty(arrayAndchangedIndices);
const SpanC array = arrayAndchangedIndices.array;
return reinterpret_cast<size_t*>(array.ptr);
}
return nullptr;
}
inline SpanSizeC PathToAttributesMap::getArrayAttributeSizes(BucketId bucketId, const TokenC& attrName)
{
APILOGGER("getArrayAttributeSizes", apiLogEnabled, attrName);
BucketImpl* bucketImpl;
ArrayAttributeArray *arrayAttributeArray;
if (findArrayAttributeArrayForBucketId(bucketId, attrName, bucketImpl, arrayAttributeArray))
{
const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray);
ArrayAndDirtyIndices arrayAndchangedIndices = getArraySpanC(arrayAttributeArray->elemCounts, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CpuReadWriteConfig());
setArrayDirty(arrayAndchangedIndices);
const SpanC array = arrayAndchangedIndices.array;
CARB_ASSERT(array.elementSize == sizeof(size_t));
return { reinterpret_cast<size_t*>(array.ptr), array.elementCount };
}
return { 0, 0 };
}
inline ConstSpanSizeC PathToAttributesMap::getArrayAttributeSizesRd(BucketId bucketId, const TokenC& attrName)
{
APILOGGER("getArrayAttributeSizesRd", apiLogEnabled, attrName);
BucketImpl* bucketImpl;
ArrayAttributeArray *arrayAttributeArray;
if (findArrayAttributeArrayForBucketId(bucketId, attrName, bucketImpl, arrayAttributeArray))
{
const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray);
ArrayAndDirtyIndices arrayAndchangedIndices = getArraySpanC(arrayAttributeArray->elemCounts, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CpuReadConfig());
const ConstSpanC array = arrayAndchangedIndices.array;
CARB_ASSERT(array.elementSize == sizeof(size_t));
return { reinterpret_cast<const size_t*>(array.ptr), array.elementCount };
}
return { 0, 0 };
}
inline SpanSizeC PathToAttributesMap::getArrayAttributeSizesWr(BucketId bucketId, const TokenC& attrName)
{
APILOGGER("getArrayAttributeSizesWr", apiLogEnabled, attrName);
BucketImpl* bucketImpl;
ArrayAttributeArray *arrayAttributeArray;
if (findArrayAttributeArrayForBucketId(bucketId, attrName, bucketImpl, arrayAttributeArray))
{
const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray);
ArrayAndDirtyIndices arrayAndchangedIndices = getArraySpanC(arrayAttributeArray->elemCounts, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CpuWriteConfig());
setArrayDirty(arrayAndchangedIndices);
const SpanC array = arrayAndchangedIndices.array;
CARB_ASSERT(array.elementSize == sizeof(size_t));
return { reinterpret_cast<size_t*>(array.ptr), array.elementCount };
}
return { 0, 0 };
}
inline ArrayPointersAndSizesC PathToAttributesMap::getArrayAttributeArrayWithSizes(BucketId bucketId,
const TokenC& attrName)
{
APILOGGER("getArrayAttributeArrayWithSizes", apiLogEnabled, attrName);
BucketImpl* bucketImpl;
ArrayAttributeArray *arrayAttributeArray;
if (findArrayAttributeArrayForBucketId(bucketId, attrName, bucketImpl, arrayAttributeArray))
{
const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray);
SpanC spanOfPointers;
{
ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(arrayAttributeArray->values, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CpuReadWriteConfig());
setArrayDirty(arrayAndDirtyIndices);
spanOfPointers = arrayAndDirtyIndices.array;
}
ConstSpanC spanOfSizes;
{
const ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(arrayAttributeArray->elemCounts, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CpuReadConfig());
spanOfSizes = arrayAndDirtyIndices.array;
}
CARB_ASSERT(spanOfPointers.elementSize == sizeof(uint8_t*));
CARB_ASSERT(spanOfSizes.elementSize == sizeof(size_t));
CARB_ASSERT(spanOfPointers.elementCount == spanOfSizes.elementCount);
return { reinterpret_cast<uint8_t* const*>(spanOfPointers.ptr), reinterpret_cast<const size_t*>(spanOfSizes.ptr),
spanOfPointers.elementCount };
}
return ArrayPointersAndSizesC{ 0, 0, 0 };
}
inline ConstArrayPointersAndSizesC PathToAttributesMap::getArrayAttributeArrayWithSizesRd(BucketId bucketId,
const TokenC& attrName)
{
APILOGGER("getArrayAttributeArrayWithSizesRd", apiLogEnabled, attrName);
BucketImpl* bucketImpl;
ArrayAttributeArray *arrayAttributeArray;
if (findArrayAttributeArrayForBucketId(bucketId, attrName, bucketImpl, arrayAttributeArray))
{
const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray);
ConstSpanC spanOfPointers;
{
const ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(arrayAttributeArray->values, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CpuReadConfig());
spanOfPointers = arrayAndDirtyIndices.array;
}
ConstSpanC spanOfSizes;
{
const ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(arrayAttributeArray->elemCounts, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CpuReadConfig());
spanOfSizes = arrayAndDirtyIndices.array;
}
CARB_ASSERT(spanOfPointers.elementSize == sizeof(uint8_t*));
CARB_ASSERT(spanOfSizes.elementSize == sizeof(size_t));
CARB_ASSERT(spanOfPointers.elementCount == spanOfSizes.elementCount);
return { reinterpret_cast<uint8_t* const*>(spanOfPointers.ptr), reinterpret_cast<const size_t*>(spanOfSizes.ptr),
spanOfPointers.elementCount };
}
return ConstArrayPointersAndSizesC{ 0, 0, 0 };
}
inline ArrayPointersAndSizesC PathToAttributesMap::getArrayAttributeArrayWithSizesWr(BucketId bucketId,
const TokenC& attrName)
{
APILOGGER("getArrayAttributeArrayWithSizesWr", apiLogEnabled, attrName);
BucketImpl* bucketImpl;
ArrayAttributeArray *arrayAttributeArray;
if (findArrayAttributeArrayForBucketId(bucketId, attrName, bucketImpl, arrayAttributeArray))
{
const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray);
SpanC spanOfPointers;
{
ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(arrayAttributeArray->values, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CpuReadWriteConfig());
setArrayDirty(arrayAndDirtyIndices);
spanOfPointers = arrayAndDirtyIndices.array;
}
ConstSpanC spanOfSizes;
{
const ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(arrayAttributeArray->elemCounts, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CpuReadConfig());
spanOfSizes = arrayAndDirtyIndices.array;
}
CARB_ASSERT(spanOfPointers.elementSize == sizeof(uint8_t*));
CARB_ASSERT(spanOfSizes.elementSize == sizeof(size_t));
CARB_ASSERT(spanOfPointers.elementCount == spanOfSizes.elementCount);
return { reinterpret_cast<uint8_t* const*>(spanOfPointers.ptr), reinterpret_cast<const size_t*>(spanOfSizes.ptr),
spanOfPointers.elementCount };
}
return ArrayPointersAndSizesC{ 0, 0, 0 };
}
inline const size_t* PathToAttributesMap::getArrayAttributeSizesRd(const Bucket& bucket, const TokenC& attrName)
{
APILOGGER("getArrayAttributeSizesRd", apiLogEnabled, attrName);
const BucketId bucketId = findBucketId(bucket);
BucketImpl* bucketImpl;
ArrayAttributeArray *arrayAttributeArray;
if (findArrayAttributeArrayForBucketId(bucketId, attrName, bucketImpl, arrayAttributeArray))
{
const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray);
const ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(arrayAttributeArray->elemCounts, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CpuReadConfig());
return reinterpret_cast<const size_t*>(arrayAndDirtyIndices.array.ptr);
}
return nullptr;
}
inline size_t* PathToAttributesMap::getArrayAttributeSizesWr(const Bucket& bucket, const TokenC& attrName)
{
APILOGGER("getArrayAttributeSizesWr", apiLogEnabled, attrName);
const BucketId bucketId = findBucketId(bucket);
BucketImpl* bucketImpl;
ArrayAttributeArray *arrayAttributeArray;
if (findArrayAttributeArrayForBucketId(bucketId, attrName, bucketImpl, arrayAttributeArray))
{
const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray);
ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(arrayAttributeArray->elemCounts, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CpuWriteConfig());
setArrayDirty(arrayAndDirtyIndices);
return reinterpret_cast<size_t*>(arrayAndDirtyIndices.array.ptr);
}
return nullptr;
}
// Intersect set<AttrNameAndType> and set<AttrNameAndType> comparing type,
// name and suffix, and ignoring tfType
inline void set_intersection2(set<AttrNameAndType>::const_iterator first1,
set<AttrNameAndType>::const_iterator last1,
set<AttrNameAndType>::const_iterator first2,
set<AttrNameAndType>::const_iterator last2,
std::back_insert_iterator<std::vector<AttrNameAndType>> d_first)
{
// Note that in the name comparisons below TokenC masks off USD's lifetime bit.
// For example, tokens created from the same string are considered equal even
// if one was created with finite lifetime and the other infinite lifetime.
auto comp1 = [](const AttrNameAndType& a, const AttrNameAndType& b) {
if (TypeC(a.type) < TypeC(b.type))
return true;
if (TypeC(b.type) < TypeC(a.type))
return false;
if (TokenC(a.name) < TokenC(b.name))
return true;
if (TokenC(b.name) < TokenC(a.name))
return false;
return a.suffix < b.suffix;
};
auto comp2 = [](const AttrNameAndType& a, const AttrNameAndType& b) {
if (TypeC(a.type) < TypeC(b.type))
return true;
if (TypeC(b.type) < TypeC(a.type))
return false;
if (TokenC(a.name) < TokenC(b.name))
return true;
if (TokenC(b.name) < TokenC(a.name))
return false;
return a.suffix < b.suffix;
};
while (first1 != last1 && first2 != last2)
{
if (comp1(*first1, *first2))
{
++first1;
}
else
{
if (!comp2(*first2, *first1))
{
*d_first++ = *first1++;
}
++first2;
}
}
}
inline flatcache::set<BucketId> PathToAttributesMap::findBuckets(const set<AttrNameAndType>& all,
const set<AttrNameAndType>& any,
const set<AttrNameAndType>& none) const
{
flatcache::set<BucketId> retval;
retval.reserve(256);
// TODO: Do this in a less brute-force way
for (auto& bucketAndId : attrNameSetToBucketId)
{
const Bucket& bucketTypes = bucketAndId.first;
BucketId bucketId = bucketAndId.second;
bool bucketEmpty = getElementCount(bucketId) == 0;
if (bucketEmpty)
continue;
std::vector<AttrNameAndType> allTypesPresent;
std::vector<AttrNameAndType> anyTypesPresent;
std::vector<AttrNameAndType> noneTypesPresent;
set_intersection2(
all.begin(), all.end(), bucketTypes.begin(), bucketTypes.end(), std::back_inserter(allTypesPresent));
set_intersection2(
any.begin(), any.end(), bucketTypes.begin(), bucketTypes.end(), std::back_inserter(anyTypesPresent));
set_intersection2(
none.begin(), none.end(), bucketTypes.begin(), bucketTypes.end(), std::back_inserter(noneTypesPresent));
bool allOfAllTypesPresent = (allTypesPresent.size() == all.size());
bool oneOfAnyTypesPresent = (any.size() == 0) || (anyTypesPresent.size() != 0);
bool noneOfNoneTypesPresent = (noneTypesPresent.size() == 0);
if (allOfAllTypesPresent && oneOfAnyTypesPresent && noneOfNoneTypesPresent)
{
retval.v.push_back(bucketId);
}
}
// Sort the vector to make it a flatcache::set
std::sort(retval.begin(), retval.end());
return retval;
}
inline std::tuple<BucketId, ArrayIndex> PathToAttributesMap::getBucketAndArrayIndex(const PathC& path) const
{
const std::pair<BucketId, ArrayIndex>* bucketAndElem;
if (pathToBucketElem.find(path, &bucketAndElem))
{
const BucketId& bucketId = bucketAndElem->first;
const ArrayIndex& arrayIndex = bucketAndElem->second;
return { bucketId, arrayIndex };
}
else
{
// Commenting out the error in 104.2 as there is no hasPrim API in 104.2 yet
// and FabricSD needs to check for existence of a prim without causing an error to be logged
// CARB_LOG_ERROR_ONCE("getBucketAndArrayIndex called on non-existent path '%s'\n", Path(path).getText());
return { kInvalidBucketId, kInvalidArrayIndex };
}
}
inline Bucket PathToAttributesMap::getTypes(const PathC& path) const
{
const std::pair<BucketId, ArrayIndex>* bucketAndElem;
if (pathToBucketElem.find(path, &bucketAndElem))
{
const BucketImpl* bucketImpl = buckets.find(bucketAndElem->first);
if (bucketImpl)
{
return bucketImpl->GetBucket();
}
}
CARB_LOG_WARN_ONCE("getTypes called on non-existent path %s\n", Path(path).getText());
return Bucket();
}
inline size_t PathToAttributesMap::getAttributeCount(const PathC& path) const
{
const std::pair<BucketId, ArrayIndex>* bucketAndElem;
if (pathToBucketElem.find(path, &bucketAndElem))
{
const BucketImpl* bucketImpl = buckets.find(bucketAndElem->first);
if (bucketImpl)
{
return bucketImpl->GetBucket().size();
}
}
CARB_LOG_ERROR_ONCE("getAttributeCount called on non-existent path %s\n", Path(path).getText());
return 0;
}
inline TypeC PathToAttributesMap::getType(const PathC& path, const TokenC& attrName) const
{
APILOGGER("getType", apiLogEnabled, path, attrName);
const std::pair<BucketId, ArrayIndex>* bucketAndElem;
if (!pathToBucketElem.find(path, &bucketAndElem))
{
CARB_LOG_WARN_ONCE("getTfType called on non-existent path %s\n", Path(path).getText());
return kUnknownType;
}
const BucketId &bucketId = bucketAndElem->first;
const BucketImpl* bucketImplPtr = buckets.find(bucketId);
if (!bucketImplPtr)
{
CARB_LOG_WARN_ONCE(
"getTfType called on non-existent bucket pathAndBucketElem is broken %s\n", Path(path).getText());
return kUnknownType;
}
const AttrName name{ attrName, NameSuffix::none };
const ScalarAttributeArray *scalarAttributeArray;
if (bucketImplPtr->scalarAttributeArrays.find(name, &scalarAttributeArray))
{
return scalarAttributeArray->type;
}
else
{
const ArrayAttributeArray *arrayAttributeArray;
if (bucketImplPtr->arrayAttributeArrays.find(name, &arrayAttributeArray))
{
return arrayAttributeArray->values.type;
}
}
CARB_LOG_WARN_ONCE("getType called on non-existent attribute %s %s\n", Path(path).getText(), Token(attrName).getText());
return kUnknownType;
}
// Return 1 if attribute is present at path, 0 otherwise
inline size_t PathToAttributesMap::count(const PathC& path, const TokenC& attrName) const
{
bool present; // Whether this path has a bucket
BucketId bucketId; // Pointer to the bucket if it does
size_t element; // Index corresponding to path in this bucket's arrays
std::tie(present, bucketId, element) = getPresentAndBucketAndElement(path);
if (!present)
{
return 0;
}
const BucketImpl* bucketImplPtr = buckets.find(bucketId);
if (!bucketImplPtr)
return 0;
const AttrName name{ attrName, NameSuffix::none };
const ScalarAttributeArray *scalarAttributeArray;
if (bucketImplPtr->scalarAttributeArrays.find(name, &scalarAttributeArray))
{
return 1;
}
else
{
const ArrayAttributeArray *arrayAttributeArray;
if (bucketImplPtr->arrayAttributeArrays.find(name, &arrayAttributeArray))
{
return 1;
}
}
return 0;
}
inline void PathToAttributesMap::moveElementScalarData(ScalarAttributeArray &destArray, const size_t destElemIndex, const ScalarAttributeArray &srcArray, const size_t srcElemIndex)
{
if (srcArray.type != destArray.type)
{
return;
}
const Typeinfo& typeinfo = srcArray.typeinfo;
const size_t size = typeinfo.size;
const bool isArray = typeinfo.isArray;
// Ideally usdValid would be driven by the change tracker
// however I can't use that until we have a way to tie into that
// Ideally there would be a subsscriber for "copying back to USD"
// and that could be parsed to see if an element in invalid. At this point
// we just have to do it per-attribute since that is as fine-grained as we
// have data right now. This happens in moveElement to avoid having to repeat
// logic about matching attriubte arrays that the function already does
if (!srcArray.usdValid) {
destArray.usdValid = srcArray.usdValid;
}
//
// In the case where this is the first element in the new bucket
// then the validity of the data needs to be moved from the old
// bucket.
//
if (destElemIndex == 0) {
destArray.cpuValid = srcArray.cpuValid;
destArray.gpuValid = srcArray.gpuValid;
}
if (destArray.cpuValid && !srcArray.cpuValid)
{
// This should not happen because of makeSrcValidIfDestValid
CARB_LOG_ERROR_ONCE("Invalid state while moving element: srcArray.cpuValid=%i destArray.cpuValid=%i",
srcArray.cpuValid, destArray.cpuValid);
assert(false);
}
if (destArray.gpuValid && !srcArray.gpuValid)
{
// This should not happen because of makeSrcValidIfDestValid
CARB_LOG_ERROR_ONCE("Invalid state while moving element: srcArray.gpuValid=%i destArray.gpuValid=%i",
srcArray.gpuValid, destArray.gpuValid);
assert(false);
}
//
// As noted above the validity of the src was already matched to the destinations needs
// in the case, where for example the src has valid but the dest doesn't that is ok
// but we do still move data, because in the case of array-of-array you get to at least
// avoid another malloc.
//
if (srcArray.cpuValid || isArray)
{
uint8_t* destArrayData = destArray.cpuData();
uint8_t* destPtr = destArrayData + destElemIndex * size;
const uint8_t* srcArrayData = srcArray.cpuData();
const uint8_t* srcPtr = srcArrayData + srcElemIndex * size;
memcpy(destPtr, srcPtr, size);
}
if (srcArray.gpuValid)
{
if (isArray)
{
if (srcArray.gpuCapacity)
{
destArray.resizeGpu(platform.gpuCuda, platform.gpuCudaCtx, srcArray.gpuCapacity, typeinfo.size);
platform.gpuCuda->memcpyAsync(*platform.gpuCudaCtx, destArray.gpuArray, srcArray.gpuArray, srcArray.gpuCapacity,
omni::gpucompute::MemcpyKind::deviceToDevice);
}
}
else
{
uint8_t* destArrayData = destArray.gpuArray;
uint8_t* destPtr = destArrayData + destElemIndex * size;
const uint8_t* srcArrayData = srcArray.gpuArray;
const uint8_t* srcPtr = srcArrayData + srcElemIndex * size;
platform.gpuCuda->memcpyAsync(*platform.gpuCudaCtx, destPtr, srcPtr, size, omni::gpucompute::MemcpyKind::deviceToDevice);
}
destArray.gpuAllocedWithCuda = true;
}
}
inline void PathToAttributesMap::moveElementArrayData(ArrayAttributeArray &destArray, const size_t destElemIndex, const ArrayAttributeArray &srcArray, const size_t srcElemIndex)
{
static_assert(sizeof(ArrayAttributeArray) == 5 * sizeof(MirroredArray), "ArrayAttributeArray has unexpected size");
moveElementScalarData(destArray.values, destElemIndex, srcArray.values, srcElemIndex);
moveElementScalarData(destArray.elemCounts, destElemIndex, srcArray.elemCounts, srcElemIndex);
moveElementScalarData(destArray.cpuElemCounts, destElemIndex, srcArray.cpuElemCounts, srcElemIndex);
moveElementScalarData(destArray.gpuElemCounts, destElemIndex, srcArray.gpuElemCounts, srcElemIndex);
moveElementScalarData(destArray.gpuPtrs, destElemIndex, srcArray.gpuPtrs, srcElemIndex);
}
inline void PathToAttributesMap::moveElement(BucketImpl& destBucket,
size_t destElemIndex,
BucketImpl& srcBucket,
size_t srcElemIndex)
{
srcBucket.scalarAttributeArrays.forEach([this, &destBucket, &destElemIndex, &srcElemIndex](const AttrName& name, const ScalarAttributeArray& srcArray) {
// If bucket move is due to removal then one destTupleIndex will be
// invalid. So we check for invalid here
ScalarAttributeArray *destArray;
if (destBucket.scalarAttributeArrays.find(name, &destArray))
{
moveElementScalarData(*destArray, destElemIndex, srcArray, srcElemIndex);
}
});
srcBucket.arrayAttributeArrays.forEach([this, &destBucket, &destElemIndex, &srcElemIndex](const AttrName& name, const ArrayAttributeArray& srcArray) {
// If bucket move is due to removal then one destTupleIndex will be
// invalid. So we check for invalid here
ArrayAttributeArray *destArray;
if (destBucket.arrayAttributeArrays.find(name, &destArray))
{
moveElementArrayData(*destArray, destElemIndex, srcArray, srcElemIndex);
}
});
destBucket.elemToPath[destElemIndex] = std::move(srcBucket.elemToPath[srcElemIndex]);
}
inline void PathToAttributesMap::destroyElement(BucketId bucketId, size_t elemIndex, bool destroyDataPointedTo)
{
BucketImpl* srcBucketImplPtr = buckets.find(bucketId);
if (!srcBucketImplPtr)
return; // nothing to delete
BucketImpl& srcBucketImpl = *srcBucketImplPtr;
size_t elemCount = PathToAttributesMap::getElementCount(srcBucketImpl);
if (elemCount == 0)
return; // nothing to delete
if (destroyDataPointedTo)
{
// Destruct element about to be overwritten
srcBucketImpl.arrayAttributeArrays.forEach([this, &elemIndex](const AttrName& name, ArrayAttributeArray& array) {
// If a CPU array has been allocated, delete it
uint8_t** arrayCpuPtrArray = reinterpret_cast<uint8_t**>(array.values.cpuData());
uint8_t*& cpuPtrToDelete = arrayCpuPtrArray[elemIndex];
if (cpuPtrToDelete)
{
if (!USE_PINNED_MEMORY || !platform.gpuCuda)
{
free(cpuPtrToDelete);
}
else if (platform.gpuCuda)
{
platform.gpuCuda->freeHost(*platform.gpuCudaCtx, cpuPtrToDelete);
}
cpuPtrToDelete = nullptr;
}
// If a GPU array has been allocated, delete it
uint8_t** arrayGpuPtrArray = reinterpret_cast<uint8_t**>(array.gpuPtrs.cpuData());
uint8_t*& gpuPtrToDelete = arrayGpuPtrArray[elemIndex];
if (gpuPtrToDelete)
{
platform.gpuCuda->free(*platform.gpuCudaCtx, gpuPtrToDelete);
gpuPtrToDelete = nullptr;
}
size_t* arrayCpuCapacityArray = reinterpret_cast<size_t*>(array.cpuElemCounts.cpuData());
arrayCpuCapacityArray[elemIndex] = 0;
size_t* arrayGpuCapacityArray = reinterpret_cast<size_t*>(array.gpuElemCounts.cpuData());
arrayGpuCapacityArray[elemIndex] = 0;
size_t* arraySizeArray = reinterpret_cast<size_t*>(array.elemCounts.cpuData());
arraySizeArray[elemIndex] = 0;
});
}
// Copy last element to element to be deleted
PathC movedElemPath;
size_t lastElemIndex = elemCount - 1;
bool deletingLastElement = (elemIndex == lastElemIndex);
// If bucket has more than one element, move last element to deleted element
if (!deletingLastElement)
{
moveElement(srcBucketImpl, elemIndex, srcBucketImpl, lastElemIndex);
movedElemPath = asInt(srcBucketImpl.elemToPath[elemIndex]);
// For all attributes, dirty[elemIndex] := dirty[lastElemIndex]
srcBucketImpl.listenerIdToChanges.forEach([&elemIndex, &lastElemIndex, &elemCount](const ListenerId& listenerId, Changes& changes) {
size_t trackedAttrCount = changes.changedAttributes.size();
for (size_t i = 0; i != trackedAttrCount; i++)
{
ChangedIndicesImpl& changedIndices = changes.changedIndices[i];
if (changedIndices.contains(lastElemIndex) && !changedIndices.contains(elemIndex))
{
changedIndices.insert(elemIndex, elemCount - 1);
}
else if (!changedIndices.contains(lastElemIndex) && changedIndices.contains(elemIndex))
{
changedIndices.erase(elemIndex, elemCount - 1);
}
}
});
}
// Remove last element from change tracker
srcBucketImpl.listenerIdToChanges.forEach([&elemCount](const ListenerId& listenerId, Changes& changes) {
size_t trackedAttrCount = changes.changedAttributes.size();
for (size_t i = 0; i != trackedAttrCount; i++)
{
ChangedIndicesImpl& changedIndices = changes.changedIndices[i];
changedIndices.decrementN(elemCount - 1);
}
});
{
const auto removeLastElementFromMirroredArray = [this](MirroredArray& array) {
if (array.count > 0)
{
const size_t newSize = array.size() - array.typeinfo.size;
array.count--;
array.resize(newSize);
}
if (array.gpuCapacity != 0)
{
array.resizeGpu(platform.gpuCuda, platform.gpuCudaCtx, array.size(), array.typeinfo.size);
}
};
// Reduce element count
srcBucketImpl.scalarAttributeArrays.forEach([this, &removeLastElementFromMirroredArray](const AttrName& name, ScalarAttributeArray& array) {
removeLastElementFromMirroredArray(array);
});
srcBucketImpl.arrayAttributeArrays.forEach([this, &removeLastElementFromMirroredArray](const AttrName& name, ArrayAttributeArray& array) {
removeLastElementFromMirroredArray(array.values);
removeLastElementFromMirroredArray(array.elemCounts);
removeLastElementFromMirroredArray(array.cpuElemCounts);
removeLastElementFromMirroredArray(array.gpuElemCounts);
removeLastElementFromMirroredArray(array.gpuPtrs);
});
}
srcBucketImpl.elemToPath.pop_back();
// If bucket has more than one element, remap path that pointed to last element
if (!deletingLastElement)
{
std::pair<BucketId, ArrayIndex>* movedBucketAndElemIndex;
if (pathToBucketElem.find(movedElemPath, &movedBucketAndElemIndex))
{
movedBucketAndElemIndex->second = elemIndex;
}
else
{
CARB_LOG_ERROR_ONCE("destroyElement attempted to re-index missing path %s\n", Path(movedElemPath).getText());
}
}
// Update change trackers
// We allocate them lazily, so we have to iterate over listenerIdToChangeTrackerConfig
// then allocate bucketImpl.listenerIdToChanges if necessary
auto bucketImpl = buckets.find(bucketId);
if (bucketImpl)
{
listenerIdToChangeTrackerConfig.forEach([this, &bucketImpl, &elemIndex, &lastElemIndex, &deletingLastElement](ListenerId& listenerId, ChangeTrackerConfig& config) {
if (config.changeTrackingEnabled)
{
// Allocate changes if necessary
Changes* changes;
if (bucketImpl->listenerIdToChanges.allocateEntry(listenerId, &changes))
{
new (changes) Changes();
}
//
// Since we may be moving within a bucket we need to inform the change tracker
//
if (!deletingLastElement)
{
if (changes->addedIndices.contains(lastElemIndex))
{
// only need to track that it moved if we already
// cared about it
changes->addNewPrim(elemIndex);
}
changes->removePrim(lastElemIndex);
}
else
{
changes->removePrim(elemIndex);
}
}
});
}
}
inline void PathToAttributesMap::moveElementBetweenBuckets(const PathC& path,
BucketId destBucketId,
BucketId srcBucketId,
const Bucket& destBucket)
{
if (destBucketId == srcBucketId)
return;
// Get source BucketImpl
BucketImpl* srcPtr = buckets.find(srcBucketId);
if (!srcPtr)
{
CARB_LOG_ERROR("moveElementBetweenBuckets failed to move path \"%s\" to from bucketId %zu to bucketId %zu, could not find source bucket\n", Path(path).getText(), size_t(srcBucketId), size_t(destBucketId));
return;
}
// Get bucket and elem index
std::pair<BucketId, ArrayIndex>* srcBucketAndElemIndex;
if (!pathToBucketElem.find(path, &srcBucketAndElemIndex))
{
CARB_LOG_ERROR_ONCE("moveElementBetweenBuckets failed to move path \"%s\" to from bucketId %zu to bucketId %zu, could not find path\n", Path(path).getText(), size_t(srcBucketId), size_t(destBucketId));
return;
}
// Get dest BucketImpl
BucketImpl* destPtr = buckets.find(destBucketId);
if (!destPtr)
{
CARB_LOG_ERROR("moveElementBetweenBuckets failed to move path \"%s\" to from bucketId %zu to bucketId %zu, could not find destination bucket\n", Path(path).getText(), size_t(srcBucketId), size_t(destBucketId));
return;
}
size_t destElemIndex = PathToAttributesMap::getElementCount(*destPtr);
// Allocate element in new bucket
allocElementForMove(*destPtr, *srcPtr, path);
// Copy values from src to dest
//
// Ideally usdValid would be driven by the change tracker
// however I can't use that until we have a way to tie into that
// Ideally there would be a subsscriber for "copying back to USD"
// and that could be parsed to see if an element in invalid. At this point
// we just have to do it per-attribute since that is as fine-grained as we
// have data right now. This happens in moveElement to avoid having to repeat
// logic about matching attriubte arrays that the function already does
moveElement(*destPtr, destElemIndex, *srcPtr, srcBucketAndElemIndex->second);
// Delete element in old bucket
// Don't destroy data pointed to, because we want dest element to point to it
const bool destroyDataPointedTo = false;
destroyElement(srcBucketId, srcBucketAndElemIndex->second, destroyDataPointedTo);
// Map path to new bucket
*srcBucketAndElemIndex = std::make_pair(destBucketId, destElemIndex);
// Convert destBucket to a set<AttrNameAndType>
set<AttrNameAndType> destBucket_v2;
destBucket_v2.v.resize(destBucket.size());
for (size_t i = 0; i != destBucket.size(); i++)
{
destBucket_v2.v[i] =
AttrNameAndType(Type(destBucket.v[i].type), Token(destBucket.v[i].name), destBucket.v[i].suffix);
}
// Copy dirty bits to new bucket
srcPtr->listenerIdToChanges.forEach([&destPtr, &destBucket_v2, &destElemIndex](const ListenerId &listener, const Changes& srcChanges) {
// Create if listenerId doesn't exist on dest bucket
Changes* destChanges;
if (destPtr->listenerIdToChanges.allocateEntry(listener, &destChanges))
{
new (destChanges) Changes();
}
size_t changedAttrCount = srcChanges.changedAttributes.size();
size_t destNewElemCount = destPtr->elemToPath.size();
for (size_t i = 0; i != changedAttrCount; i++)
{
const AttrNameAndType& nameAndType = srcChanges.changedAttributes.v[i];
// TODO: we could optimize this by taking advantage of destBucket_v2
// and changeAttributes being sorted. This would allow us to iterate
// through both at the same time, and avoid doing n O(log n) lookups.
if (destBucket_v2.contains(nameAndType))
{
destChanges->setDirty(nameAndType, destElemIndex, destNewElemCount);
}
}
});
}
inline void PathToAttributesMap::addAttributeC(
const PathC& path, const TokenC& attrName, TypeC type, const void* value)
{
addAttributeC(path, attrName, NameSuffix::none, type, value);
}
inline void PathToAttributesMap::addArrayAttributeC(
const PathC& path, const TokenC& attrName, TypeC type, const void* value, const size_t arrayElemCount)
{
addArrayAttributeC(path, attrName, NameSuffix::none, type, value, arrayElemCount);
}
/**
* @brief adds attributes to a primitive
*
* @details As opposed to addAttributeC this function allows the user to add multiple attributes to
* a primitive at the same time and only does re-bucketing once after adding all of them.
* This should be faster than adding them one-by-one and re-bucketing after each of them.
* @param path - primitive path
* @param attrNames - vector of the attribute names as tokens
* @param tfTypes - vector of dynamic runtime types of the attributes
* @param typeCs - vector of identifiers for types
*
*
*/
inline void PathToAttributesMap::addAttributesToPrim(const PathC& path,
const std::vector<TokenC>& attrNames,
const std::vector<TypeC>& typeCs)
{
CARB_ASSERT(attrNames.size() == typeCs.size());
addAttributesToBucket(path, attrNames, typeCs);
}
// Find bucketId the path is currently in
// Find the bucket from the bucketId
inline void PathToAttributesMap::addAttributesToBucket(const PathC& path,
const std::vector<TokenC>& attrNames,
const std::vector<TypeC>& typeCs)
{
NameSuffix suffix = NameSuffix::none;
BucketId srcBucketId;
ArrayIndex srcElemIndex;
std::tie(srcBucketId, srcElemIndex) = getBucketAndArrayIndex(path);
bool pathIsInFlatcache = (srcBucketId != kInvalidBucketId);
if (!pathIsInFlatcache)
{
addPath(path);
std::tie(srcBucketId, srcElemIndex) = getBucketAndArrayIndex(path);
}
// Get dest bucket
// Dest bucket types = union(source bucket types, new type)
std::pair<BucketId, ArrayIndex>* bucketAndElemIndex;
VALIDATE_TRUE(pathToBucketElem.find(path, &bucketAndElemIndex));
BucketImpl *const bucketImplPtr = buckets.find(bucketAndElemIndex->first);
CARB_ASSERT(bucketImplPtr);
Bucket destBucket = bucketImplPtr->GetBucket();
for (uint32_t c = 0; c < attrNames.size(); ++c)
{
Token attrName(attrNames[c]);
AttrNameAndType nameAndType(Type(typeCs[c]), attrName, NameSuffix::none);
// Early out if attribute already present
if (destBucket.find(nameAndType) != destBucket.end())
continue;
// When adding a new attribute that shadows the name of an existing
// attribute, but with a new type, then we choose to drop the old attribute
// on the floor.
// Unfortunatly since we are searching on name since we will not know the type
// we have to scan the list of attributes.
for (const AttrNameAndType& bucketNameAndType : destBucket)
{
if (bucketNameAndType.name == attrNames[c] && bucketNameAndType.suffix == suffix &&
TypeC(bucketNameAndType.type) != typeCs[c])
{
// we can stop here since this enforces uniquness of attribute names
// todo: check that USD already enforces this
destBucket.erase(bucketNameAndType);
break;
}
}
destBucket.insert(nameAndType);
}
BucketId destBucketId = addBucket(destBucket);
if (srcBucketId != destBucketId)
{
moveElementBetweenBuckets(path, destBucketId, srcBucketId, destBucket);
}
}
// Find bucketId the path is currently in
// Find the bucket from the bucketId
inline std::tuple<BucketId, ArrayIndex> PathToAttributesMap::addAttributeGetBucketAndArrayIndex(
const PathC& path, const TokenC& attrNameC, NameSuffix nameSuffix, TypeC type)
{
BucketId srcBucketId;
ArrayIndex srcElemIndex;
std::tie(srcBucketId, srcElemIndex) = getBucketAndArrayIndex(path);
bool pathIsInFlatcache = (srcBucketId != kInvalidBucketId);
if (!pathIsInFlatcache)
{
addPath(path);
std::tie(srcBucketId, srcElemIndex) = getBucketAndArrayIndex(path);
}
// Get dest bucket
// Dest bucket types = union(source bucket types, new type)
std::pair<BucketId, ArrayIndex>* bucketAndElemIndex;
VALIDATE_TRUE(pathToBucketElem.find(path, &bucketAndElemIndex));
BucketImpl *const bucketImplPtr = buckets.find(bucketAndElemIndex->first);
CARB_ASSERT(bucketImplPtr);
Bucket destBucket = bucketImplPtr->GetBucket();
Token attrName(attrNameC);
AttrNameAndType nameAndType(Type(type), attrName, nameSuffix);
// Early out if attribute already present
if (destBucket.find(nameAndType) != destBucket.end())
return { srcBucketId, srcElemIndex };
// When adding a new attribute that shadows the name of an existing
// attribute, but with a new type, then we choose to drop the old attribute
// on the floor.
// Unfortunatly since we are searching on name since we will not know the type
// we have to scan the list of attributes.
for (const AttrNameAndType& bucketNameAndType : destBucket)
{
if (bucketNameAndType.name == attrNameC && bucketNameAndType.suffix == nameSuffix &&
TypeC(bucketNameAndType.type) != type)
{
// we can stop here since this enforces uniquness of attribute names
// todo: check that USD already enforces this
destBucket.erase(bucketNameAndType);
break;
}
}
destBucket.insert(nameAndType);
BucketId destBucketId = addBucket(destBucket);
size_t destElemIndex;
if (srcBucketId != destBucketId)
{
destElemIndex = getElementCount(destBucket);
moveElementBetweenBuckets(path, destBucketId, srcBucketId, destBucket);
}
else
{
destElemIndex = srcElemIndex;
}
return { destBucketId, destElemIndex };
}
inline void PathToAttributesMap::addAttributeC(
const PathC& path, const TokenC& attrNameC, NameSuffix nameSuffix, TypeC type, const void* value)
{
const Typeinfo& typeinfo = getTypeInfo(type);
if (typeinfo.isArray && value)
{
CARB_LOG_ERROR("addAttributeC: Attempted to add array-value attribute with default values. Use addArrayAttribute instead.");
return;
}
addAttributeInternal(path, attrNameC, nameSuffix, type, value, typeinfo, 0);
}
inline void PathToAttributesMap::addArrayAttributeC(const PathC& path, const TokenC& attrName, NameSuffix suffix, TypeC type, const void* value, const size_t arrayElemCount)
{
const Typeinfo& typeinfo = getTypeInfo(type);
addAttributeInternal(path, attrName, suffix, type, value, typeinfo, arrayElemCount);
}
inline SpanC PathToAttributesMap::getOrCreateAttributeWrC(const PathC& path,
const TokenC& attrName,
TypeC type)
{
APILOGGER("getOrCreateAttributeWrC", apiLogEnabled, path, attrName);
BucketId bucketId;
ArrayIndex elemIndex;
std::tie(bucketId, elemIndex) = addAttributeGetBucketAndArrayIndex(path, attrName, NameSuffix::none, type);
ArrayAndDirtyIndices arrayAndchangedIndices = getArraySpanC(bucketId, attrName, CpuReadWriteConfig(), NameSuffix::none);
setArrayElementDirty(arrayAndchangedIndices, elemIndex);
SpanC array = arrayAndchangedIndices.array;
return getArrayElementPtr(array, elemIndex);
}
template <typename T>
void PathToAttributesMap::addAttribute(
const PathC& path, const TokenC& attrName, TypeC type, const T& value)
{
APILOGGER("addAttribute", apiLogEnabled, path, attrName);
// TODO: check that type is compatible
return addAttributeC(path, attrName, type, &value);
}
template <typename T>
void PathToAttributesMap::addSubAttribute(
const PathC& path, const TokenC& attrName, NameSuffix suffix, TypeC type, const T& value)
{
APILOGGER("addSubAttribute", apiLogEnabled, path, attrName);
// TODO: check that type is compatible
return addAttributeC(path, attrName, suffix, type, &value);
}
// Return a new bucket with all sub-attributes of a single attribute removed
inline Bucket removeAllSubAttributesFromBucket(const Bucket& bucket, const TokenC& attrName)
{
Bucket newBucket;
// TODO: implement set::delete
for (auto nameAndType : bucket)
{
// Don't compare suffix and type to delete all suffix variants of name
if (nameAndType.name != attrName)
{
newBucket.insert(nameAndType);
}
}
return newBucket;
}
// Return a new bucket with all sub-attributes of all named attributes removed
inline Bucket removeAllSubAttributesFromBucket(const Bucket& bucket, const std::vector<TokenC>& attrNames)
{
Bucket newBucket;
for (auto nameAndType : bucket)
{
if (std::find_if(attrNames.begin(), attrNames.end(),
[&](auto attrName) { return nameAndType.name == attrName; }) == attrNames.end())
{
newBucket.insert(nameAndType);
}
}
return newBucket;
}
inline Bucket removeSubAttributeFromBucket(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix)
{
Bucket newBucket;
// TODO: implement set::delete
for (auto nameAndType : bucket)
{
// Don't compare suffix and type to delete all suffix variants of name
if (!(nameAndType.name == attrName && nameAndType.suffix == suffix))
{
newBucket.insert(nameAndType);
}
}
return newBucket;
}
// Remove an attribute and all its subattributes (suffixes).
inline void PathToAttributesMap::removeAttribute(const PathC& path, const TokenC& attrName)
{
APILOGGER("removeAttribute", apiLogEnabled, path, attrName);
BucketId srcBucketId;
ArrayIndex srcElemIndex;
std::tie(srcBucketId, srcElemIndex) = getBucketAndArrayIndex(path);
if (srcBucketId == kInvalidBucketId)
return;
// srcBucketId != kInvalidBucketId guarantees find will succeed
std::pair<BucketId, ArrayIndex>* bucketAndElemIndex;
VALIDATE_TRUE(pathToBucketElem.find(path, &bucketAndElemIndex));
BucketImpl *const bucketImplPtr = buckets.find(bucketAndElemIndex->first);
CARB_ASSERT(bucketImplPtr);
const Bucket& srcTypes = bucketImplPtr->GetBucket();
const Bucket destBucket = removeAllSubAttributesFromBucket(srcTypes, attrName);
const BucketId destBucketId = addBucket(destBucket);
moveElementBetweenBuckets(path, destBucketId, srcBucketId, destBucket);
}
inline void PathToAttributesMap::removeAttributesFromPath(const PathC& path, const std::vector<TokenC>& attrNames)
{
BucketId srcBucketId;
ArrayIndex srcElemIndex;
std::tie(srcBucketId, srcElemIndex) = getBucketAndArrayIndex(path);
if (srcBucketId == kInvalidBucketId)
return;
// srcBucketId != kInvalidBucketId guarantees find will succeed
std::pair<BucketId, ArrayIndex>* bucketAndElemIndex;
VALIDATE_TRUE(pathToBucketElem.find(path, &bucketAndElemIndex));
BucketImpl *const bucketImplPtr = buckets.find(bucketAndElemIndex->first);
CARB_ASSERT(bucketImplPtr);
const Bucket& bucket = bucketImplPtr->GetBucket();
const Bucket destBucket = removeAllSubAttributesFromBucket(bucket, attrNames);
const BucketId destBucketId = addBucket(destBucket);
if (srcBucketId != destBucketId)
{
moveElementBetweenBuckets(path, destBucketId, srcBucketId, destBucket);
}
}
inline void PathToAttributesMap::removeAttributesFromBucket(const Bucket& bucket, const std::vector<TokenC>& attrNames)
{
// first we need to find the actual bucketImpl that we will be
// deleting attributes from
auto iter = attrNameSetToBucketId.find(bucket);
bool found = (iter != attrNameSetToBucketId.end());
if (!found)
return;
BucketId bucketId = iter->second;
BucketImpl* bucketImplPtr = buckets.find(bucketId);
if (!bucketImplPtr)
return;
// Buckets are found based on a set of the attributes, so we build
// the new set based on the attribute names
Bucket newBucket = removeAllSubAttributesFromBucket(bucket, attrNames);
std::pair<BucketId, BucketImpl&> newBucketIdAndImpl = findOrCreateBucket(newBucket);
BucketImpl& newBucketImpl = newBucketIdAndImpl.second;
const size_t origSize = newBucketImpl.elemToPath.size();
if (origSize == 0)
{
// In the case where it is a new bucket we prefer just deleting the no longer needed arrays.
newBucketImpl = *bucketImplPtr;
newBucketImpl.SetBucket(std::move(newBucket));
// loop finding and deleting attribute arrays
for (auto attrName : attrNames)
{
newBucketImpl.scalarAttributeArrays.forEach([this, &attrName, &newBucketImpl](const AttrName& name, ScalarAttributeArray& array) {
if (name.name == attrName)
{
newBucketImpl.scalarAttributeArrays.freeEntry(name);
}
});
newBucketImpl.arrayAttributeArrays.forEach([this, &attrName, &newBucketImpl](const AttrName& name, ArrayAttributeArray& array) {
if (name.name == attrName)
{
newBucketImpl.arrayAttributeArrays.freeEntry(name);
}
});
}
}
else
{
// TODO : there should be a faster way to do this but more to discuss here later
for (const auto path : bucketImplPtr->elemToPath)
{
moveElementBetweenBuckets(
asInt(path), newBucketIdAndImpl.first, bucketId, newBucket);
}
}
//
// need to update the pathToBucketElem for all the items that just "moved" buckets
// TODO: make this work with moving buckets
for (size_t i = origSize; i < newBucketImpl.elemToPath.size(); ++i)
{
std::pair<BucketId, ArrayIndex>* bucketAndElemIndex;
pathToBucketElem.allocateEntry(asInt(newBucketImpl.elemToPath[i]), &bucketAndElemIndex);
*bucketAndElemIndex = std::make_pair(newBucketIdAndImpl.first, i);
}
buckets.erase(bucketId);
attrNameSetToBucketId.erase(bucket);
}
// Remove a particular (name,suffix) pair, for example the connection of an attribute
inline void PathToAttributesMap::removeSubAttribute(const PathC& path, const TokenC& attrName, NameSuffix suffix)
{
APILOGGER("removeSubAttribute", apiLogEnabled, path, attrName);
BucketId srcBucketId;
ArrayIndex srcElemIndex;
std::tie(srcBucketId, srcElemIndex) = getBucketAndArrayIndex(path);
if (srcBucketId == kInvalidBucketId)
return;
// srcBucketId != kInvalidBucketId guarantees find will succeed
std::pair<BucketId, ArrayIndex>* bucketAndElemIndex;
VALIDATE_TRUE(pathToBucketElem.find(path, &bucketAndElemIndex));
BucketImpl *const bucketImplPtr = buckets.find(bucketAndElemIndex->first);
CARB_ASSERT(bucketImplPtr);
const Bucket& srcTypes = bucketImplPtr->GetBucket();
bool pathFound = (srcBucketId != kInvalidBucketId);
if (!pathFound)
{
CARB_LOG_ERROR_ONCE("removeSubAttribute called on non-existent path %s \n", Path(path).getText());
return;
}
const Bucket destBucket = removeSubAttributeFromBucket(srcTypes, attrName, suffix);
const BucketId destBucketId = addBucket(destBucket);
moveElementBetweenBuckets(path, destBucketId, srcBucketId, destBucket);
}
// Removes an attribute (and all its subattributes) for all paths in a bucket
inline void PathToAttributesMap::removeAttributeC(const Bucket& bucket,
const TokenC& attrName,
TypeC type)
{
APILOGGER("removeAttributeC", apiLogEnabled, attrName);
// first we need to find the actual bucketImpl that we will be
// deleting attributes from
auto iter = attrNameSetToBucketId.find(bucket);
bool found = (iter != attrNameSetToBucketId.end());
if (!found)
return;
BucketId bucketId = iter->second;
BucketImpl* bucketImplPtr = buckets.find(bucketId);
if (!bucketImplPtr)
return;
// Buckets are found based on a set of the attributes, so we build
// the new set based on the attribute names
Bucket newBucket = removeAllSubAttributesFromBucket(bucket, attrName);
BucketId newBucketId = findBucketId(newBucket);
if (newBucketId == kInvalidBucketId)
{
// In the case where it is a new bucket we prefer just deleting the no longer needed arrays.
// This means that nothing needs to be updated as all the prims are in the same place
bucketImplPtr->SetBucket(std::move(newBucket));
if (getTypeInfo(type).isArray)
{
bucketImplPtr->arrayAttributeArrays.forEach([this, &attrName, bucketImplPtr](const AttrName& name, ArrayAttributeArray& array) {
if (name.name == attrName)
{
bucketImplPtr->arrayAttributeArrays.freeEntry(name);
}
});
}
else
{
bucketImplPtr->scalarAttributeArrays.forEach([this, &attrName, bucketImplPtr](const AttrName& name, ScalarAttributeArray& array) {
if (name.name == attrName)
{
bucketImplPtr->scalarAttributeArrays.freeEntry(name);
}
});
}
attrNameSetToBucketId[newBucket] = bucketId;
attrNameSetToBucketId.erase(bucket);
}
else
{
std::pair<BucketId, BucketImpl&> newBucketIdAndImpl = findOrCreateBucket(newBucket);
BucketImpl& newBucketImpl = newBucketIdAndImpl.second;
const size_t origSize = newBucketImpl.elemToPath.size();
// TODO : there should be a faster way to do this but more to discuss here later
// currently this pulls from the front, this ensures that elements stay in the same
// "order" when moving buckets -> but this means moving stuff around in elemToPath
// Doing this in "bulk" will make all of this better.
while(bucketImplPtr->elemToPath.size())
{
moveElementBetweenBuckets(
asInt(bucketImplPtr->elemToPath.front()), newBucketIdAndImpl.first, bucketId, newBucket);
}
//
// need to update the pathToBucketElem for all the items that just "moved" buckets
// TODO: make this work with moving buckets
for (size_t i = origSize; i < newBucketImpl.elemToPath.size(); ++i)
{
std::pair<BucketId, ArrayIndex>* bucketAndElemIndex;
pathToBucketElem.allocateEntry(asInt(newBucketImpl.elemToPath[i]), &bucketAndElemIndex);
*bucketAndElemIndex = std::make_pair(newBucketIdAndImpl.first, i);
}
}
}
inline void PathToAttributesMap::removePath(const PathC& path)
{
std::pair<BucketId, ArrayIndex>* bucketAndElemIndex;
if (!pathToBucketElem.find(path, &bucketAndElemIndex))
{
CARB_LOG_ERROR_ONCE("removePath called on non-existent path %s \n", Path(path).getText());
return;
}
const BucketId &bucketId = bucketAndElemIndex->first;
const size_t &elemIndex = bucketAndElemIndex->second;
const bool destroyDataPointedTo = true;
destroyElement(bucketId, elemIndex, destroyDataPointedTo);
pathToBucketElem.freeEntry(path);
}
inline size_t PathToAttributesMap::count(const PathC& path) const
{
const std::pair<BucketId, ArrayIndex>* bucketAndElemIndex;
if (pathToBucketElem.find(path, &bucketAndElemIndex))
{
return 1;
}
else
{
return 0;
}
}
struct ViewIterator
{
size_t bucketIndex = 0;
size_t elementIndex = 0;
std::vector<size_t>::const_iterator bucketElemCount;
ViewIterator& operator++()
{
elementIndex++;
if (elementIndex == *bucketElemCount)
{
bucketIndex++;
bucketElemCount++;
elementIndex = 0;
}
return *this;
}
bool operator!=(const ViewIterator& rhs) const
{
return bucketIndex != rhs.bucketIndex || elementIndex != rhs.elementIndex;
}
ViewIterator& operator*()
{
return *this;
}
};
// Array resize that does not preserve previous data
inline void PathToAttributesMap::destructiveResizeIfNecessaryGPU(MirroredArray& gpuPointerArray,
size_t elem,
size_t& capacity,
size_t desiredElemCount,
size_t elemByteCount,
omni::gpucompute::GpuCompute* computeAPI,
omni::gpucompute::Context* computeCtx)
{
uint8_t** elemToArrayGpuData = reinterpret_cast<uint8_t**>(gpuPointerArray.cpuData());
uint8_t*& gpuData = elemToArrayGpuData[elem];
// Resize iff (capacity < desiredElemCount)
if (capacity != desiredElemCount || gpuData == nullptr)
{
size_t byteCount = desiredElemCount * elemByteCount;
if (gpuData)
{
platform.gpuCuda->freeAsync(*platform.gpuCudaCtx, gpuData);
}
gpuData = reinterpret_cast<uint8_t*>(computeAPI->mallocAsync(*computeCtx, byteCount, elemByteCount));
// We've written to gpuData on the CPU so we have to invalidate any GPU mirror of it
gpuPointerArray.gpuValid = false;
gpuPointerArray.cpuValid = true;
capacity = desiredElemCount;
}
}
// Array resize that preserves previous data
inline void PathToAttributesMap::resizeIfNecessaryGPU(MirroredArray& gpuPointerArray,
size_t elem,
size_t& capacity,
size_t desiredElemCount,
size_t elemByteCount,
omni::gpucompute::GpuCompute* computeAPI,
omni::gpucompute::Context* computeCtx)
{
// TODO: reduce number of reallocations by allocating capacity larger than size
// and not always reallocating when desiredElemCount<capacity
// if gpuCapacity is 0 that means the array was recently copied and needs to be reallocated
if(computeAPI && (capacity != desiredElemCount || gpuPointerArray.gpuCapacity == 0))
{
size_t oldByteCount = capacity * elemByteCount;
size_t newByteCount = desiredElemCount * elemByteCount;
uint8_t** elemToArrayGpuData = reinterpret_cast<uint8_t**>(gpuPointerArray.cpuData());
uint8_t*& gpuData = elemToArrayGpuData[elem];
uint8_t* newGpuData = reinterpret_cast<uint8_t*>(computeAPI->mallocAsync(*computeCtx, newByteCount, elemByteCount));
if (gpuData)
{
using omni::gpucompute::MemcpyKind;
size_t copyByteCount = std::min(oldByteCount, newByteCount);
computeAPI->memcpy(*computeCtx, newGpuData, gpuData, copyByteCount, MemcpyKind::deviceToDevice);
// Note that this free has to be async even though the previous
// memcpy is sync. The reason is that deviceToDevice memcpys
// are always async, even if you call the sync version of cudaMemcpy.
// So if you do "sync" memcpy and then sync free, the free can
// execute on the CPU before the copy executes on the GPU.
// See https://nvidia-omniverse.atlassian.net/browse/OM-46051
computeAPI->freeAsync(*computeCtx, gpuData);
}
gpuData = newGpuData;
// We've written to gpuData on the CPU so we have to invalidate any GPU mirror of it
gpuPointerArray.gpuValid = false;
gpuPointerArray.cpuValid = true;
capacity = desiredElemCount;
}
}
// This function is called when we are just about to do a transfer to GPU, to
// make sure that GPU array is large enough.
// It is called only when !gpuValid, so we don't have to preserve any existing
// GPU array.
//
// Algorithm:
// If capacity is sufficient, do nothing
// If not, free any existing allocation, then allocate
inline void PathToAttributesMap::allocGpuMemIfNecessary(PathToAttributesMap::MirroredArray& array,
size_t byteCount,
size_t elemSize,
omni::gpucompute::GpuCompute* computeAPI,
omni::gpucompute::Context* computeCtx)
{
bool capacitySufficient = (byteCount <= array.gpuCapacity);
if (!capacitySufficient)
{
if (array.gpuArray)
{
computeAPI->freeAsync(*computeCtx, array.gpuArray);
}
array.gpuArray = reinterpret_cast<uint8_t*>(computeAPI->mallocAsync(*computeCtx, byteCount, elemSize));
array.gpuCapacity = byteCount;
}
}
inline PrimBucketListImpl PathToAttributesMap::getChanges(ListenerId listenerId)
{
PrimBucketListImpl changesOut;
// For now, iterate over all buckets
// We'll probably want the user to specify a subset of buckets for change logging
changesOut.buckets.reserve(buckets.end());
changesOut.changes.reserve(buckets.end());
BucketId id{ 0 };
for (unsigned int i = 0; i < buckets.end(); ++i, ++id)
{
BucketImpl* bucketPtr = buckets.find(id);
if (!bucketPtr)
continue;
BucketImpl& bucketImpl = *bucketPtr;
BucketId bucketId = id;
Changes* changesIn;
if (!bucketImpl.listenerIdToChanges.find(listenerId, &changesIn))
{
continue;
}
size_t changedAttrCount = changesIn->changedAttributes.size();
size_t primCount = bucketImpl.elemToPath.size();
bool attributesChanged = (changedAttrCount != 0 && primCount != 0);
bool primsAdded = (changesIn->getNewPrimCount() != 0);
if (attributesChanged || primsAdded)
{
changesOut.buckets.v.push_back(bucketId);
changesOut.changes.push_back(BucketChangesImpl());
BucketChangesImpl& bucketChanges = changesOut.changes.back();
// Write changed attributes
bucketChanges.changedAttributes = changesIn->changedAttributes;
bucketChanges.changedIndices.resize(changedAttrCount);
for (size_t j = 0; j != changedAttrCount; j++)
{
bucketChanges.changedIndices[j] = { changesIn->changedIndices[j].allIndicesChanged,
{ changesIn->changedIndices[j].changedIndices.data(),
changesIn->changedIndices[j].changedIndices.size() } };
}
bucketChanges.pathArray = { reinterpret_cast<const Path*>(bucketImpl.elemToPath.data()),
bucketImpl.elemToPath.size() };
// Write added prims
bucketChanges.addedIndices = { changesIn->addedIndices.data(), changesIn->addedIndices.size() };
}
}
return changesOut;
}
inline void PathToAttributesMap::popChanges(ListenerId listenerId)
{
BucketId id{ 0 };
for (unsigned int i = 0; i < buckets.end(); ++i, ++id)
{
BucketImpl* bucketPtr = buckets.find(id);
if (bucketPtr)
{
// Create listenerId if it doesn't exist
Changes* changes;
if (bucketPtr->listenerIdToChanges.allocateEntry(listenerId, &changes))
{
new (changes) Changes;
}
changes->changedAttributes.clear();
changes->changedIndices.clear();
changes->addedIndices.clear();
}
}
}
inline PathToAttributesMap::MirroredArray::MirroredArray(Platform& platform_, const TypeC &type, const Typeinfo& typeinfo) noexcept
: cpuArray()
, platform(platform_)
, type(type)
, typeinfo(typeinfo)
, gpuArray(nullptr)
, gpuCapacity(0)
, d3dArrays()
, count(0)
, usdValid(true)
, cpuValid(false)
, gpuValid(false)
, gpuAllocedWithCuda(false)
, attributeMutex()
{
}
inline PathToAttributesMap::MirroredArray::~MirroredArray()
{
// clean up any non-array gpu data
if (gpuArray)
{
if (gpuAllocedWithCuda)
{
platform.gpuCuda->freeAsync(*platform.gpuCudaCtx, gpuArray);
}
else
{
// @TODO Fix crash during D3dVk free! N.B. that this backend is incomplete and not in active use
// gpuD3dVk->freeAsync(*gpuD3dVkCtx, gpuArray);
}
gpuArray = nullptr;
}
gpuValid = false;
}
inline PathToAttributesMap::MirroredArray& PathToAttributesMap::MirroredArray::operator=(const MirroredArray& other) noexcept
{
if (!other.isArrayOfArray())
{
cpuArray = other.cpuArray;
}
else
{
// Here we set all pointers in dest array to nullptr
// The allocation and data copy happens in PathToAttributesMap's operator=
cpuArray.resize(other.size());
uint8_t** destPtrs = reinterpret_cast<uint8_t**>(cpuData());
for (size_t elemIndex = 0; elemIndex != other.count; elemIndex++)
{
destPtrs[elemIndex] = nullptr;
}
}
// platform = other.platform; // intentionally not copy-assigning platform
type = other.type;
typeinfo = other.typeinfo;
usdValid = other.usdValid;
cpuValid = other.cpuValid;
count = other.count;
// GPU data needs to be copied explicitly using the gpu compute API
gpuArray = nullptr;
gpuCapacity = 0;
gpuValid = false;
gpuAllocedWithCuda = false;
if (other.gpuValid)
{
// Also need to empty the cpuArray as it is a cpu pointer to now-invalid GPU data
uint8_t** destPtrs = reinterpret_cast<uint8_t**>(cpuData());
for (size_t elemIndex = 0; elemIndex != other.count; elemIndex++)
{
destPtrs[elemIndex] = nullptr;
}
}
return *this;
}
inline PathToAttributesMap::MirroredArray::MirroredArray(MirroredArray&& other) noexcept
: cpuArray(std::move(other.cpuArray)),
platform(other.platform),
type(other.type),
typeinfo(other.typeinfo),
gpuArray(other.gpuArray),
gpuCapacity(other.gpuCapacity),
d3dArrays(std::move(other.d3dArrays)),
count(other.count),
usdValid(other.usdValid),
cpuValid(other.cpuValid),
gpuValid(other.gpuValid),
gpuAllocedWithCuda(other.gpuAllocedWithCuda),
attributeMutex() // intentionally not move constructing the mutex
{
other.gpuArray = nullptr;
}
inline PathToAttributesMap::MirroredArray& PathToAttributesMap::MirroredArray::operator=(MirroredArray&& other) noexcept
{
MirroredArray tmp(std::move(other));
swap(*this, tmp);
return *this;
}
inline void swap(PathToAttributesMap::MirroredArray& a, PathToAttributesMap::MirroredArray& b) noexcept
{
using std::swap;
swap(a.cpuArray, b.cpuArray);
swap(a.type, b.type);
swap(a.typeinfo, b.typeinfo);
swap(a.gpuArray, b.gpuArray);
swap(a.gpuCapacity, b.gpuCapacity);
swap(a.d3dArrays, b.d3dArrays);
swap(a.count, b.count);
swap(a.usdValid, b.usdValid);
swap(a.cpuValid, b.cpuValid);
swap(a.gpuValid, b.gpuValid);
swap(a.gpuAllocedWithCuda, b.gpuAllocedWithCuda);
// swap(a.attributeMutex, b.attributeMutex); // intentionally NOT swapping attribute mutex because it is not move-constructable
}
inline PathToAttributesMap::ArrayAttributeArray::ArrayAttributeArray(Platform& platform_, const TypeC& type, const Typeinfo& typeinfo) noexcept
: values(platform_, type, typeinfo)
, elemCounts(platform_, PTAM_SIZE_TYPEC, Typeinfo{sizeof(PTAM_SIZE_TYPE), false, 0})
, cpuElemCounts(platform_, PTAM_SIZE_TYPEC, Typeinfo{ sizeof(PTAM_SIZE_TYPE), false, 0 })
, gpuElemCounts(platform_, PTAM_SIZE_TYPEC, Typeinfo{ sizeof(PTAM_SIZE_TYPE), false, 0 })
, gpuPtrs(platform_, PTAM_POINTER_TYPEC, Typeinfo{ sizeof(PTAM_POINTER_TYPE), false, 0 })
{
static_assert(sizeof(ArrayAttributeArray) == 5 * sizeof(MirroredArray), "ArrayAttributeArray has unexpected size");
}
inline PathToAttributesMap::ArrayAttributeArray::~ArrayAttributeArray()
{
Platform& platform = values.platform;
CARB_ASSERT(&platform == &elemCounts.platform);
CARB_ASSERT(&platform == &cpuElemCounts.platform);
CARB_ASSERT(&platform == &gpuElemCounts.platform);
CARB_ASSERT(&platform == &gpuPtrs.platform);
if (values.count)
{
uint8_t** elemToCpuPtr = reinterpret_cast<uint8_t**>(values.cpuData());
for (size_t elemIndex = 0; elemIndex != values.count; elemIndex++)
{
// If a CPU array has been allocated, free it
uint8_t*& cpuPtrToDelete = elemToCpuPtr[elemIndex];
if (cpuPtrToDelete)
{
if (!USE_PINNED_MEMORY || !platform.gpuCuda)
{
free(cpuPtrToDelete);
}
else if (platform.gpuCuda)
{
platform.gpuCuda->freeHost(*platform.gpuCudaCtx, cpuPtrToDelete);
}
cpuPtrToDelete = nullptr;
values.cpuValid = false;
}
}
}
if (gpuPtrs.count)
{
// CPU array of GPU pointers
uint8_t** elemToGpuPtr = reinterpret_cast<uint8_t**>(gpuPtrs.cpuData());
for (size_t elemIndex = 0; elemIndex != gpuPtrs.count; elemIndex++)
{
// If a GPU array has been allocated, free it
uint8_t*& gpuPtrToDelete = elemToGpuPtr[elemIndex];
if (gpuPtrToDelete)
{
CARB_ASSERT(platform.gpuCuda);
CARB_ASSERT(platform.gpuCudaCtx);
platform.gpuCuda->freeAsync(*platform.gpuCudaCtx, gpuPtrToDelete);
gpuPtrToDelete = nullptr;
}
}
}
}
inline PathToAttributesMap::ArrayAttributeArray& PathToAttributesMap::ArrayAttributeArray::operator=(const ArrayAttributeArray& other) noexcept
{
static_assert(sizeof(ArrayAttributeArray) == 5 * sizeof(MirroredArray), "ArrayAttributeArray has unexpected size");
values = other.values;
elemCounts = other.elemCounts;
cpuElemCounts = other.cpuElemCounts;
gpuElemCounts = other.gpuElemCounts;
gpuPtrs = other.gpuPtrs;
return *this;
}
inline PathToAttributesMap::ArrayAttributeArray::ArrayAttributeArray(ArrayAttributeArray&& other) noexcept
: values(std::move(other.values))
, elemCounts(std::move(other.elemCounts))
, cpuElemCounts(std::move(other.cpuElemCounts))
, gpuElemCounts(std::move(other.gpuElemCounts))
, gpuPtrs(std::move(other.gpuPtrs))
{
static_assert(sizeof(ArrayAttributeArray) == 5 * sizeof(MirroredArray), "ArrayAttributeArray has unexpected size");
other.values.count = 0;
other.elemCounts.count = 0;
other.cpuElemCounts.count = 0;
other.gpuElemCounts.count = 0;
other.gpuPtrs.count = 0;
}
inline PathToAttributesMap::ArrayAttributeArray& PathToAttributesMap::ArrayAttributeArray::operator=(ArrayAttributeArray&& other) noexcept
{
ArrayAttributeArray tmp(std::move(other));
swap(*this, tmp);
return *this;
}
inline void swap(PathToAttributesMap::ArrayAttributeArray& a, PathToAttributesMap::ArrayAttributeArray& b) noexcept
{
using std::swap;
swap(a.values, b.values);
swap(a.elemCounts, b.elemCounts);
swap(a.cpuElemCounts, b.cpuElemCounts);
swap(a.gpuElemCounts, b.gpuElemCounts);
swap(a.gpuPtrs, b.gpuPtrs);
}
inline void PathToAttributesMap::printBucket(const Bucket& bucket) const
{
bool multiLine = (1 < bucket.size());
printf("{");
if (multiLine)
printf("\n");
for (const auto& b : bucket)
{
TokenC attrName = b.name;
NameSuffix suffix = b.suffix;
Type type(b.type);
if (multiLine)
printf(" ");
Token attrNameToken(attrName);
std::cout << "TypeC(" << type << ") " << attrNameToken.getText() << suffix;
if (multiLine)
printf("\n");
}
printf("} ");
// Print arrays in bucket
auto iter = attrNameSetToBucketId.find(bucket);
bool found = (iter != attrNameSetToBucketId.end());
if (!found)
return;
BucketId bucketId = iter->second;
auto bucketImplPtr = buckets.find(bucketId);
if (bucketImplPtr)
{
const BucketImpl& bucketImpl = *bucketImplPtr;
bucketImpl.scalarAttributeArrays.forEach([&multiLine](const AttrName& name, const ScalarAttributeArray& array) {
#if ENABLE_USD_DEBUGGING
std::cout << toTfToken(name.name).GetText() << " " << toString(name.suffix) << " ";
#else
std::cout << name.name.token << " ";
#endif
std::cout << array.size() << "bytes ";
if (multiLine)
std::cout << "\n";
});
bucketImpl.arrayAttributeArrays.forEach([&multiLine](const AttrName& name, const ArrayAttributeArray& array) {
#if ENABLE_USD_DEBUGGING
std::cout << toTfToken(name.name).GetText() << " " << toString(name.suffix) << " ";
#else
std::cout << name.name.token << " ";
#endif
std::cout << array.values.size() << "bytes ";
if (multiLine)
std::cout << "\n";
});
}
}
inline void PathToAttributesMap::printBucketName(const Bucket& bucketTypes, BucketId bucketId) const
{
const BucketImpl* bucketImplPtr = buckets.find(bucketId);
if (!bucketImplPtr)
return;
const BucketImpl& bucketImpl = *bucketImplPtr;
std::cout << "Id: " << size_t(bucketId) << " ";
size_t bucketPrimCount = bucketImpl.elemToPath.size();
std::cout << "PrimCount: " << bucketPrimCount << " ";
// Find USD prim type
for (auto attrNameAndType : bucketTypes)
{
Type type(attrNameAndType.type);
if (type.role == AttributeRole::ePrimTypeName)
{
Token nameToken(attrNameAndType.name);
std::cout << "PrimType: " << nameToken.getText() << " ";
}
}
std::cout << "AttributeNames: ";
for (auto attrNameAndType : bucketTypes)
{
Token nameToken(attrNameAndType.name);
std::cout << nameToken.getText() << toString(attrNameAndType.suffix) << " ";
}
std::cout << "\n";
}
inline void PathToAttributesMap::printBucketNames() const
{
std::cout << "Buckets:\n";
for (auto& bucketIdAndBucket : attrNameSetToBucketId)
{
const Bucket& bucketTypes = bucketIdAndBucket.first;
BucketId bucketId = bucketIdAndBucket.second;
std::cout << " ";
printBucketName(bucketTypes, bucketId);
}
}
inline void PathToAttributesMap::printBucketNamesAndTypes() const
{
std::cout << "Buckets:\n";
for (auto& bucketIdAndBucket : attrNameSetToBucketId)
{
std::cout << " ";
const Bucket& bucketTypes = bucketIdAndBucket.first;
for (AttrNameAndType attrNameAndType : bucketTypes)
{
Type type(attrNameAndType.type);
Token nameToken(attrNameAndType.name);
std::cout << "(" << type << " " << nameToken.getText() << " " << attrNameAndType.suffix << " "
<< "TypeC(" << attrNameAndType.type << ") ";
}
std::cout << "\n";
}
}
inline void PathToAttributesMap::bucketImplCopyScalarAttributeArray(ScalarAttributeArray &dest, const ScalarAttributeArray &src)
{
CARB_ASSERT(dest.type == src.type);
if (src.gpuValid)
{
dest.resizeGpu(platform.gpuCuda, platform.gpuCudaCtx, src.size(), dest.typeinfo.size);
platform.gpuCuda->memcpyAsync(*platform.gpuCudaCtx, dest.gpuArray, src.gpuArray, src.gpuCapacity, omni::gpucompute::MemcpyKind::deviceToDevice);
dest.gpuValid = true;
dest.gpuCapacity = src.gpuCapacity;
dest.gpuAllocedWithCuda = src.gpuAllocedWithCuda;
}
}
inline void PathToAttributesMap::bucketImplCopyArrayAttributeArray(BucketImpl& destBucketImpl, const AttrName& destName, ArrayAttributeArray &dest, const ArrayAttributeArray &src)
{
CARB_ASSERT(dest.values.type == src.values.type);
const Typeinfo &typeInfo = dest.values.typeinfo;
const size_t arrayElemSize = typeInfo.arrayElemSize;
MirroredArray *const destSizeArray = &dest.elemCounts;
MirroredArray *const destCpuCapacityArray = &dest.cpuElemCounts;
const ArrayOfArrayInfo destAOA = getArrayOfArrayInfo(dest);
// TODO: Figure out how to remove this fixup step in a more "clean" way
// Need to set capacity to zero, because capacity will
// have been erroneously copied from source in
// MirroredArray copy constructor
ArrayAndDirtyIndices arrayAndchangedIndices = getArraySpanC(*destCpuCapacityArray, destName, destAOA, destBucketImpl, CpuWriteConfig());
setArrayDirty(arrayAndchangedIndices);
SpanC destCapacitySpan = arrayAndchangedIndices.array;
size_t* destCapacities = reinterpret_cast<size_t*>(destCapacitySpan.ptr);
for (size_t elemIndex = 0; elemIndex != destCapacitySpan.elementCount; elemIndex++)
{
destCapacities[elemIndex] = 0;
}
// getArrayWrC to allocate data for arrays
ArrayAndDirtyIndices destSpan = getArraySpanC(dest.values, destName, destAOA, destBucketImpl, CpuWriteConfig());
setArrayDirty(destSpan);
const size_t* elemCounts = reinterpret_cast<const size_t*>(destSizeArray->cpuData());
if (src.values.cpuValid)
{
// TODO: Isn't this redundant with the call to getArraySpanC above with cpu write access?
enableCpuWrite(dest.values, elemCounts, destCpuCapacityArray, nullptr, nullptr);
uint8_t** destPtrs = reinterpret_cast<uint8_t**>(destSpan.array.ptr);
uint8_t* const* srcPtrs = reinterpret_cast<uint8_t* const*>(src.values.cpuData());
for (size_t elemIndex = 0; elemIndex != destSpan.array.elementCount; elemIndex++)
{
uint8_t* destPtr = destPtrs[elemIndex];
const uint8_t* srcPtr = srcPtrs[elemIndex];
size_t sizeBytes = elemCounts[elemIndex] * arrayElemSize;
memcpy(destPtr, srcPtr, sizeBytes);
}
}
else
{
dest.values.cpuValid = false;
}
if (src.values.gpuValid)
{
MirroredArray *const destGpuElemCountArray = &dest.gpuElemCounts;
MirroredArray *const destGpuPtrArray = &dest.gpuPtrs;
enableGpuWrite(dest.values, elemCounts, destCpuCapacityArray, destGpuElemCountArray, destGpuPtrArray);
const MirroredArray *const srcGpuPtrArray = &src.gpuPtrs;
// Select which API to use
omni::gpucompute::GpuCompute* computeAPI = nullptr;
omni::gpucompute::Context* computeCtx = nullptr;
if (src.values.gpuAllocedWithCuda)
{
computeAPI = platform.gpuCuda;
computeCtx = platform.gpuCudaCtx;
}
if (computeAPI)
{
uint8_t** destPtrs = reinterpret_cast<uint8_t**>(destGpuPtrArray->cpuData());
uint8_t* const* srcPtrs = reinterpret_cast<uint8_t* const*>(srcGpuPtrArray->cpuData());
for (size_t elemIndex = 0; elemIndex != destSpan.array.elementCount; elemIndex++)
{
uint8_t* destPtr = destPtrs[elemIndex];
const uint8_t* srcPtr = srcPtrs[elemIndex];
size_t sizeBytes = elemCounts[elemIndex] * arrayElemSize;
computeAPI->memcpyAsync(*computeCtx, destPtr, srcPtr, sizeBytes, omni::gpucompute::MemcpyKind::deviceToDevice);
}
destGpuPtrArray->gpuAllocedWithCuda = src.values.gpuAllocedWithCuda;
}
}
}
inline void PathToAttributesMap::bucketImplCopyArrays(BucketImpl& destBucketImpl,
BucketId destBucketId,
const BucketImpl& srcBucketImpl,
BucketId srcBucketId,
const carb::flatcache::set<AttrNameAndType_v2>& attrFilter)
{
destBucketImpl.scalarAttributeArrays.forEach([this, &srcBucketImpl, &attrFilter](const AttrName& destName, ScalarAttributeArray& dest) {
AttrNameAndType_v2 destNameV2(
carb::flatcache::Type(dest.type), destName.name, destName.suffix);
const bool attrIsInFilter = attrFilter.size() == 0 || (attrFilter.find(destNameV2) != attrFilter.end());
if (attrIsInFilter && destName.suffix == NameSuffix::none)
{
const ScalarAttributeArray* src;
VALIDATE_TRUE(srcBucketImpl.scalarAttributeArrays.find(destName, &src));
CARB_ASSERT(src);
bucketImplCopyScalarAttributeArray(dest, *src);
}
});
destBucketImpl.arrayAttributeArrays.forEach([this, &destBucketImpl, &srcBucketImpl, &attrFilter](const AttrName& destName, ArrayAttributeArray& dest) {
AttrNameAndType_v2 destNameV2(
carb::flatcache::Type(dest.values.type), destName.name, destName.suffix);
const bool attrIsInFilter = attrFilter.size() == 0 || (attrFilter.find(destNameV2) != attrFilter.end());
if (attrIsInFilter && destName.suffix == NameSuffix::none)
{
const ArrayAttributeArray* src;
VALIDATE_TRUE(srcBucketImpl.arrayAttributeArrays.find(destName, &src));
CARB_ASSERT(src);
bucketImplCopyArrayAttributeArray(destBucketImpl, destName, dest, *src);
}
});
}
template<typename CallbackT>
void inline PathToAttributesMap::BucketImpl::forEachValueArray(CallbackT callback)
{
scalarAttributeArrays.forEach([&callback](const AttrName& name, ScalarAttributeArray& array) {
callback(name, array);
});
arrayAttributeArrays.forEach([&callback](const AttrName& name, ArrayAttributeArray& array) {
callback(name, array.values);
static_assert(sizeof(PathToAttributesMap::ArrayAttributeArray) == 5 * sizeof(PathToAttributesMap::MirroredArray), "ArrayAttributeArray has unexpected size");
// Intentionally skips these
// callback(name, array.elemCounts);
// callback(name, array.cpuElemCounts);
// callback(name, array.gpuElemCounts);
// callback(name, array.gpuPtrs);
});
}
inline void PathToAttributesMap::Serializer::init(uint8_t *const _buf, uint8_t *const _end)
{
p = buf = _buf;
end = _end;
bytesWritten = 0;
overflowed = false;
}
inline bool PathToAttributesMap::Serializer::writeBytes(const uint8_t *const src, uint64_t size)
{
CARB_ASSERT(src);
bytesWritten += size;
if (p != nullptr && p + size <= end)
{
memcpy(p, src, size);
p += size;
return true;
}
overflowed = true;
return false;
}
inline bool PathToAttributesMap::Serializer::writeString(const char* const s, const size_t len)
{
bool OK = true;
if (!write<size_t>(len))
{
OK = false;
}
if (!writeBytes(reinterpret_cast<const uint8_t*>(s), len))
{
OK = false;
}
return OK;
}
inline bool PathToAttributesMap::Serializer::writeString(const std::string &s)
{
bool OK = true;
if (!write<size_t>(s.length()))
{
OK = false;
}
if (!writeBytes(reinterpret_cast<const uint8_t*>(s.data()), s.length()))
{
OK = false;
}
return OK;
}
template<typename T>
bool PathToAttributesMap::Serializer::write(const T &t)
{
static_assert(std::is_pod<T>::value, "T must be POD");
return writeBytes(reinterpret_cast<const uint8_t*>(&t), sizeof(T));
}
inline void PathToAttributesMap::Deserializer::init(const uint8_t *const _buf, const uint8_t *const _end)
{
p = buf = _buf;
end = _end;
bytesRead = 0;
overflowed = false;
}
inline bool PathToAttributesMap::Deserializer::readBytes(uint8_t *const dst, uint64_t size)
{
CARB_ASSERT(dst);
bytesRead += size;
if (p + size <= end)
{
memcpy(dst, p, size);
p += size;
return true;
}
overflowed = true;
return false;
};
inline bool PathToAttributesMap::Deserializer::readString(std::string &s)
{
size_t len;
read<size_t>(len);
s.resize(len);
return readBytes(reinterpret_cast<uint8_t*>(&s[0]), len);
}
template<typename T>
bool PathToAttributesMap::Deserializer::read(T &t)
{
static_assert(std::is_pod<T>::value, "T must be POD");
return readBytes(reinterpret_cast<uint8_t*>(&t), sizeof(T));
}
inline void PathToAttributesMap::serializeMirroredArrayMetadata(const AttrName& srcName, MirroredArray &srcValuesArray, Serializer &out)
{
out.writeString(toTfToken(srcName.name).GetString());
out.write<NameSuffix>(srcName.suffix);
// TfToken are actually pointers, so we need to serialize the encoded TypeC
out.write<TypeC>(srcValuesArray.type);
out.write<bool>(srcValuesArray.cpuValid);
out.write<bool>(srcValuesArray.usdValid);
out.write<size_t>(srcValuesArray.count);
}
pxr::TfType typeCtoTfType(TypeC typeC);
template<typename ArraysT, typename ArraysMapT>
inline void PathToAttributesMap::deserializeMirroredArrayMetadata(Platform& platform, ArraysMapT& arraysMap, AttrName &destName, Typeinfo *&typeInfo, ArraysT *&destArray, Deserializer &in)
{
std::string nameStr;
in.readString(nameStr);
in.read<NameSuffix>(destName.suffix);
destName.name = asInt(pxr::TfToken(nameStr));
TypeC destType;
{
in.read<TypeC>(destType);
}
// typeToInfo is deserialized before all mirrored arrays, so the type must exist.
VALIDATE_TRUE(typeToInfo.find(destType, &typeInfo));
if (!arraysMap.allocateEntry(destName, &destArray))
{
CARB_LOG_ERROR("Failed to insert dest mirrored array");
return;
}
CARB_ASSERT(destArray);
new (destArray) ArraysT(platform, destType, *typeInfo);
MirroredArray *const destValuesArray = destArray->getValuesArray();
destValuesArray->type = destType;
in.read<bool>(destValuesArray->cpuValid);
in.read<bool>(destValuesArray->usdValid);
in.read<size_t>(destValuesArray->count);
destValuesArray->resize(destValuesArray->count * typeInfo->size);
}
inline uint64_t PathToAttributesMap::serializeScalarAttributeArray(BucketImpl& srcBucketImpl, const BucketId& srcBucketId, const AttrName& srcName, ScalarAttributeArray& srcScalarAttributeArray, Serializer &out)
{
const size_t bytesBegin = out.bytesWritten;
MirroredArray &srcValuesArray = srcScalarAttributeArray;
serializeMirroredArrayMetadata(srcName, srcValuesArray, out);
if (srcValuesArray.cpuValid)
{
const ConstSpanC srcSpan = getArraySpanC(srcValuesArray, srcName, ScalarArrayOfArrayInfo(), srcBucketImpl, CpuReadConfig()).array;
CARB_ASSERT(srcSpan.elementCount == srcValuesArray.count);
const uint8_t* srcPtr = reinterpret_cast<const uint8_t*>(srcSpan.ptr);
const Typeinfo &typeInfo = srcValuesArray.typeinfo;
CARB_ASSERT(!typeInfo.isArray);
out.writeBytes(srcPtr, srcSpan.elementCount * typeInfo.size);
}
return out.bytesWritten - bytesBegin;
}
inline bool PathToAttributesMap::deserializeScalarAttributeArray(BucketImpl& destBucketImpl, const BucketId& destBucketId, Deserializer &in)
{
AttrName destName;
Typeinfo *typeInfo;
ScalarAttributeArray *destArray;
deserializeMirroredArrayMetadata(destBucketImpl.platform, destBucketImpl.scalarAttributeArrays, destName, typeInfo, destArray, in);
CARB_ASSERT(typeInfo);
CARB_ASSERT(destArray);
CARB_ASSERT(!typeInfo->isArray);
if (destArray->cpuValid)
{
uint8_t* destPtr = reinterpret_cast<uint8_t*>(destArray->cpuData());
in.readBytes(destPtr, destArray->count * typeInfo->size);
}
return true;
}
inline uint64_t PathToAttributesMap::serializeArrayAttributeArray(BucketImpl& srcBucketImpl, const BucketId& srcBucketId, const AttrName& srcName, ArrayAttributeArray& srcArrayAttributeArray, Serializer &out)
{
const size_t bytesBegin = out.bytesWritten;
MirroredArray &srcValuesArray = srcArrayAttributeArray.values;
serializeMirroredArrayMetadata(srcName, srcValuesArray, out);
// write scalar metadata
auto writeScalarArrayOfArrayMetadata = [this](BucketImpl& srcBucketImpl, const BucketId& srcBucketId, const AttrName& srcName, ScalarAttributeArray& srcScalarAttributeArray, Serializer &out) {
// similar to serializeScalarAttributeArray, but we can skip some metadata because it should be inferrable
out.write<bool>(srcScalarAttributeArray.cpuValid);
out.write<bool>(srcScalarAttributeArray.usdValid);
out.write<size_t>(srcScalarAttributeArray.count);
if (srcScalarAttributeArray.cpuValid)
{
const ConstSpanC srcSpan = getArraySpanC(srcScalarAttributeArray, srcName, ScalarArrayOfArrayInfo(), srcBucketImpl, CpuReadConfig()).array;
CARB_ASSERT(srcSpan.elementCount == srcScalarAttributeArray.count);
const uint8_t* srcPtr = reinterpret_cast<const uint8_t*>(srcSpan.ptr);
const Typeinfo &typeInfo = srcScalarAttributeArray.typeinfo;
CARB_ASSERT(!typeInfo.isArray);
out.writeBytes(srcPtr, srcSpan.elementCount * typeInfo.size);
}
};
static_assert(sizeof(ArrayAttributeArray) == 5 * sizeof(MirroredArray), "ArrayAttributeArray has unexpected size");
writeScalarArrayOfArrayMetadata(srcBucketImpl, srcBucketId, srcName, srcArrayAttributeArray.elemCounts, out);
writeScalarArrayOfArrayMetadata(srcBucketImpl, srcBucketId, srcName, srcArrayAttributeArray.cpuElemCounts, out);
// TODO: Can we omit these?
writeScalarArrayOfArrayMetadata(srcBucketImpl, srcBucketId, srcName, srcArrayAttributeArray.gpuElemCounts, out);
writeScalarArrayOfArrayMetadata(srcBucketImpl, srcBucketId, srcName, srcArrayAttributeArray.gpuPtrs, out);
// write array-of-array values
if (srcValuesArray.cpuValid)
{
const ArrayOfArrayInfo destAOA = getArrayOfArrayInfo(srcArrayAttributeArray);
const ConstSpanC srcSpan = getArraySpanC(srcValuesArray, srcName, destAOA, srcBucketImpl, CpuReadConfig()).array;
CARB_ASSERT(srcSpan.elementCount == srcValuesArray.count);
// TODO: Should this be cpuElemCounts instead of elemCounts? The requested capacity may not have been applied yet..
const size_t* elemCounts = reinterpret_cast<const size_t*>(srcArrayAttributeArray.elemCounts.cpuData());
const Typeinfo &typeInfo = srcValuesArray.typeinfo;
CARB_ASSERT(typeInfo.isArray);
uint8_t* const* srcPtrs = reinterpret_cast<uint8_t* const*>(srcSpan.ptr);
for (size_t elemIndex = 0; elemIndex != srcSpan.elementCount; elemIndex++)
{
const uint8_t* srcPtr = srcPtrs[elemIndex];
const size_t elemCount = elemCounts[elemIndex];
out.writeBytes(srcPtr, elemCount * typeInfo.arrayElemSize);
}
}
return out.bytesWritten - bytesBegin;
}
inline bool PathToAttributesMap::deserializeArrayAttributeArray(BucketImpl& destBucketImpl, const BucketId& destBucketId, Deserializer &in)
{
AttrName destName;
Typeinfo *typeInfo;
ArrayAttributeArray *destArray;
deserializeMirroredArrayMetadata(destBucketImpl.platform, destBucketImpl.arrayAttributeArrays, destName, typeInfo, destArray, in);
CARB_ASSERT(typeInfo);
CARB_ASSERT(destArray);
CARB_ASSERT(typeInfo->isArray);
// write scalar metadata
auto readScalarArrayOfArrayMetadata = [this](BucketImpl& destBucketImpl, const BucketId& destBucketId, const AttrName& destName, ScalarAttributeArray& destScalarAttributeArray, Deserializer &in) {
// similar to deserializeScalarAttributeArray, but we can skip some metadata because it should be inferrable
in.read<bool>(destScalarAttributeArray.cpuValid);
in.read<bool>(destScalarAttributeArray.usdValid);
in.read<size_t>(destScalarAttributeArray.count);
const Typeinfo &typeInfo = destScalarAttributeArray.typeinfo;
destScalarAttributeArray.resize(typeInfo.size * destScalarAttributeArray.count);
if (destScalarAttributeArray.cpuValid)
{
CARB_ASSERT(destScalarAttributeArray.size() == (getTypeInfo(destScalarAttributeArray.type).size * destScalarAttributeArray.count));
uint8_t *const destPtr = reinterpret_cast<uint8_t*>(destScalarAttributeArray.cpuData());
CARB_ASSERT(!typeInfo.isArray);
in.readBytes(destPtr, destScalarAttributeArray.count * typeInfo.size);
}
};
static_assert(sizeof(ArrayAttributeArray) == 5 * sizeof(MirroredArray), "ArrayAttributeArray has unexpected size");
readScalarArrayOfArrayMetadata(destBucketImpl, destBucketId, destName, destArray->elemCounts, in);
readScalarArrayOfArrayMetadata(destBucketImpl, destBucketId, destName, destArray->cpuElemCounts, in);
// TODO: Can we omit these?
readScalarArrayOfArrayMetadata(destBucketImpl, destBucketId, destName, destArray->gpuElemCounts, in);
readScalarArrayOfArrayMetadata(destBucketImpl, destBucketId, destName, destArray->gpuPtrs, in);
// read array-of-array values
MirroredArray& destValuesArray = destArray->values;
if (destValuesArray.cpuValid)
{
// Need to set capacity to zero, because capacity will
// have been erroneously copied from source
const ArrayOfArrayInfo destAOA = getArrayOfArrayInfo(*destArray);
const SpanC destCapacitySpan = getArraySpanC(destArray->cpuElemCounts, destName, destAOA, destBucketImpl, CpuReadWriteConfig()).array;
size_t *const destCapacities = reinterpret_cast<size_t*>(destCapacitySpan.ptr);
for (size_t elemIndex = 0; elemIndex != destCapacitySpan.elementCount; elemIndex++)
{
destCapacities[elemIndex] = 0;
}
const size_t* elemCounts = reinterpret_cast<const size_t*>(destArray->elemCounts.cpuData());
// getArrayWrC to allocate data for arrays
const SpanC destSpan = getArraySpanC(destValuesArray, destName, destAOA, destBucketImpl, CpuReadWriteConfig()).array;
uint8_t** destPtrs = reinterpret_cast<uint8_t**>(destSpan.ptr);
for (size_t elemIndex = 0; elemIndex != destValuesArray.count; elemIndex++)
{
uint8_t* destPtr = destPtrs[elemIndex];
size_t elemCount = elemCounts[elemIndex];
in.readBytes(destPtr, elemCount * typeInfo->arrayElemSize);
}
destValuesArray.cpuValid = true;
}
return true;
}
inline BucketImpl& PathToAttributesMap::addAttributeInternal(BucketImpl& prevBucketImpl, const Bucket& prevBucket, const TokenC& attrName, const TypeC type, const void* value, const Typeinfo& typeinfo, const size_t arrayElemCount)
{
APILOGGER("addAttributeInternal", apiLogEnabled, attrName);
// newBucket := oldBucket Union { attrName }
// findOrCreate (bucketId, bucketImpl) for newBucket, which updates
// attrNameSetToBucketId and buckets
Bucket nextBucket = prevBucket;
nextBucket.insert({ carb::flatcache::Type(type), attrName, NameSuffix::none });
// Early out if attribute already in bucket
const bool attributeAlreadyInBucket = (nextBucket.size() == prevBucket.size());
if (attributeAlreadyInBucket)
return prevBucketImpl;
const std::pair<BucketId, BucketImpl&> nextBucketIdAndImpl = findOrCreateBucket(nextBucket);
const BucketId nextBucketId = nextBucketIdAndImpl.first;
BucketImpl& nextBucketImpl = nextBucketIdAndImpl.second;
const size_t nextBucketOriginalSize = nextBucketImpl.elemToPath.size();
if (nextBucketOriginalSize == 0)
{
// Move arrays etc. from original bucket
nextBucketImpl = std::move(prevBucketImpl);
nextBucketImpl.SetBucket(std::move(nextBucket));
// Below are codified assumptions about the side-effects of attempted move-assigning of a BucketImpl.
// We assume that move-assigning prevBucketImpl like above will clear it as well. This is important because
// prevBucketImpl may still reside as a valid bucket in the PathToAttributesMap::buckets map.
//
// These asserts live outside of the move-assignment operator definition because, technically, the compiler is
// allowed to elect to use a copy-assignment if it needs to.
//
// TODO: Would this be better expressed as an explicit "clear" of prevBucketImpl? Why wasn't that the original
// behavior?
CARB_ASSERT(prevBucketImpl.scalarAttributeArrays.empty());
CARB_ASSERT(prevBucketImpl.arrayAttributeArrays.empty());
CARB_ASSERT(prevBucketImpl.elemToPath.empty());
CARB_ASSERT(prevBucketImpl.listenerIdToChanges.empty());
}
else
{
// TODO : there should be a faster way to do this but more to discuss here later
auto prevBucketMapIter = attrNameSetToBucketId.find(prevBucket);
const BucketId prevBucketId = (prevBucketMapIter != attrNameSetToBucketId.end()) ? prevBucketMapIter->second : kInvalidBucketId;
for (const auto path : prevBucketImpl.elemToPath)
{
moveElementBetweenBuckets(asInt(path), nextBucketId, prevBucketId, nextBucket);
}
}
const size_t nextBucketNewSize = nextBucketImpl.elemToPath.size();
CARB_ASSERT(nextBucketNewSize >= nextBucketOriginalSize);
CARB_ASSERT(getTypeInfo(type).size == typeinfo.size);
CARB_ASSERT(getTypeInfo(type).isArray == typeinfo.isArray);
CARB_ASSERT(getTypeInfo(type).arrayElemSize == typeinfo.arrayElemSize);
// Add an array for the new attribute
const AttrName name{ attrName, NameSuffix::none };
ArrayAttributeArray *arrayAttributeArray;
MirroredArray* valuesArray;
if (typeinfo.isArray)
{
const bool inserted = nextBucketImpl.arrayAttributeArrays.allocateEntry(std::move(name), &arrayAttributeArray);
valuesArray = &arrayAttributeArray->values;
if (inserted)
{
new (arrayAttributeArray) ArrayAttributeArray(nextBucketImpl.platform, type, typeinfo);
while (valuesArray->count < nextBucketNewSize)
{
allocElement(*arrayAttributeArray);
}
}
}
else
{
const bool inserted = nextBucketImpl.scalarAttributeArrays.allocateEntry(std::move(name), &valuesArray);
arrayAttributeArray = nullptr;
if (inserted)
{
new (valuesArray) ScalarAttributeArray(nextBucketImpl.platform, type, typeinfo);
while (valuesArray->count < nextBucketNewSize)
{
allocElement(*valuesArray);
}
}
arrayAttributeArray = nullptr;
}
CARB_ASSERT(valuesArray);
CARB_ASSERT(!typeinfo.isArray || arrayAttributeArray);
CARB_ASSERT(!typeinfo.isArray || getTypeInfo(valuesArray->type).isArray);
#if CARB_ASSERT_ENABLED
const size_t elemCount = getElementCount(nextBucketImpl.GetBucket());
CARB_ASSERT(valuesArray->count == elemCount);
CARB_ASSERT(!arrayAttributeArray || arrayAttributeArray->cpuElemCounts.count == elemCount);
CARB_ASSERT(!arrayAttributeArray || arrayAttributeArray->elemCounts.count == elemCount);
CARB_ASSERT(!arrayAttributeArray || arrayAttributeArray->gpuElemCounts.count == elemCount);
CARB_ASSERT(!arrayAttributeArray || arrayAttributeArray->gpuPtrs.count == elemCount);
#endif // #if CARB_ASSERT_ENABLED
// fixup elem/path maps
for (pxr::SdfPath& path : nextBucketImpl.elemToPath)
{
std::pair<BucketId, ArrayIndex>* bucketAndElemIndex;
if (pathToBucketElem.find(asInt(path), &bucketAndElemIndex))
{
bucketAndElemIndex->first = nextBucketId;
}
}
// If default value specified, copy it to every element
if (value)
{
fillAttributeInternal(nextBucketImpl, name, nextBucketOriginalSize, nextBucketNewSize, value, typeinfo, arrayElemCount, valuesArray, arrayAttributeArray);
}
return nextBucketImpl;
}
inline void PathToAttributesMap::fillAttributeInternal(BucketImpl& bucketImpl, const AttrName& name, const size_t startIndex, const size_t endIndex, const void* value, const Typeinfo& typeinfo, const size_t arrayElemCount, MirroredArray *const valuesArray, ArrayAttributeArray *const arrayAttributeArray)
{
CARB_ASSERT(valuesArray);
CARB_ASSERT(value);
CARB_ASSERT(!typeinfo.isArray || arrayAttributeArray);
CARB_ASSERT(startIndex < valuesArray->count);
CARB_ASSERT(endIndex <= valuesArray->count);
if (typeinfo.isArray)
{
CARB_ASSERT(arrayAttributeArray);
const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray);
// Fill array sizes
{
ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(arrayAttributeArray->elemCounts, name, aoa, bucketImpl, CpuWriteConfig());
CARB_ASSERT(startIndex < arrayAndDirtyIndices.array.elementCount);
CARB_ASSERT(endIndex <= arrayAndDirtyIndices.array.elementCount);
for (size_t i = startIndex; i < endIndex; ++i)
{
reinterpret_cast<size_t*>(arrayAndDirtyIndices.array.ptr)[i] = arrayElemCount;
setArrayElementDirty(arrayAndDirtyIndices, i);
}
}
// Fill array values
{
ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(*valuesArray, name, aoa, bucketImpl, CpuWriteConfig());
CARB_ASSERT(startIndex < arrayAndDirtyIndices.array.elementCount);
CARB_ASSERT(endIndex <= arrayAndDirtyIndices.array.elementCount);
for (size_t i = startIndex; i < endIndex; ++i)
{
uint8_t** dest = reinterpret_cast<uint8_t**>(arrayAndDirtyIndices.array.ptr) + arrayAndDirtyIndices.array.elementSize * i;
CARB_ASSERT(*dest);
memcpy(*dest, value, arrayElemCount * typeinfo.arrayElemSize); // assumes coherent and packed array value provided
setArrayElementDirty(arrayAndDirtyIndices, i);
}
}
}
else
{
const ArrayOfArrayInfo aoa = ScalarArrayOfArrayInfo();
ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(*valuesArray, name, aoa, bucketImpl, CpuWriteConfig());
CARB_ASSERT(startIndex < arrayAndDirtyIndices.array.elementCount);
CARB_ASSERT(endIndex <= arrayAndDirtyIndices.array.elementCount);
for (size_t i = startIndex; i < endIndex; ++i)
{
uint8_t* dest = arrayAndDirtyIndices.array.ptr + arrayAndDirtyIndices.array.elementSize * i;
memcpy(dest, value, typeinfo.size);
setArrayElementDirty(arrayAndDirtyIndices, i);
}
}
}
inline void PathToAttributesMap::addAttributeInternal(const PathC& path, const TokenC& attrNameC, const NameSuffix nameSuffix, const TypeC ctype, const void* value, const Typeinfo& typeinfo, const size_t arrayElemCount)
{
APILOGGER("addAttributeInternal", apiLogEnabled, path, attrNameC);
BucketId bucketId;
ArrayIndex elemIndex;
std::tie(bucketId, elemIndex) = addAttributeGetBucketAndArrayIndex(path, attrNameC, nameSuffix, ctype);
BucketImpl *const bucketImpl = buckets.find(bucketId);
CARB_ASSERT(bucketImpl);
ArrayAttributeArray *arrayAttributeArray;
MirroredArray* valuesArray;
const AttrName attrName{ attrNameC, nameSuffix };
if (typeinfo.isArray)
{
bucketImpl->arrayAttributeArrays.find(attrName, &arrayAttributeArray);
CARB_ASSERT(arrayAttributeArray);
valuesArray = &arrayAttributeArray->values;
}
else
{
bucketImpl->scalarAttributeArrays.find(attrName, &valuesArray);
arrayAttributeArray = nullptr;
}
CARB_ASSERT(valuesArray);
CARB_ASSERT(!typeinfo.isArray || arrayAttributeArray);
if (value)
{
fillAttributeInternal(*bucketImpl, attrName, elemIndex, elemIndex + 1, value, typeinfo, arrayElemCount, valuesArray, arrayAttributeArray);
}
#if CARB_ASSERT_ENABLED
const size_t elemCount = bucketImpl->elemToPath.size();
CARB_ASSERT(valuesArray->count == elemCount);
CARB_ASSERT(!arrayAttributeArray || arrayAttributeArray->cpuElemCounts.count == elemCount);
CARB_ASSERT(!arrayAttributeArray || arrayAttributeArray->elemCounts.count == elemCount);
CARB_ASSERT(!arrayAttributeArray || arrayAttributeArray->gpuElemCounts.count == elemCount);
CARB_ASSERT(!arrayAttributeArray || arrayAttributeArray->gpuPtrs.count == elemCount);
#endif // #if CARB_ASSERT_ENABLED
}
inline PathToAttributesMap::PathToAttributesMap(const PlatformId& platformId)
: platform(carb::getCachedInterface<carb::flatcache::IPlatform>()->getMutable(platformId))
, pathToBucketElem(0, std::hash<PathId>(), std::equal_to<PathId>(), AllocFunctor{ &platform.allocator }, FreeFunctor{ &platform.allocator })
, buckets(platform)
, attrNameSetToBucketId()
, listenerIdToChangeTrackerConfig(0, ListenerIdHasher(), std::equal_to<ListenerId>(), AllocFunctor{ &platform.allocator }, FreeFunctor{ &platform.allocator })
, typeToInfo(0, std::hash<TypeC>(), std::equal_to<TypeC>(), AllocFunctor{ &platform.allocator }, FreeFunctor{ &platform.allocator })
, usdStageId()
, minimalPopulationDone(false)
{
// required types for arrays of arrays
Typeinfo* typeinfo;
typeToInfo.allocateEntry(PTAM_SIZE_TYPEC, &typeinfo);
*typeinfo = Typeinfo{ sizeof(PTAM_SIZE_TYPE), false, 0 };
typeToInfo.allocateEntry(PTAM_POINTER_TYPEC, &typeinfo);
*typeinfo = Typeinfo{ sizeof(PTAM_POINTER_TYPE), false, 0 };
}
inline PathToAttributesMap& PathToAttributesMap::operator=(const flatcache::PathToAttributesMap& other)
{
carb::profiler::ZoneId zoneId = CARB_PROFILE_BEGIN(1, "Clear buckets");
buckets.clear();
CARB_PROFILE_END(1, zoneId);
zoneId = CARB_PROFILE_BEGIN(1, "Copy pathToBucketElem");
pathToBucketElem.clear();
pathToBucketElem.reserve(other.pathToBucketElem.size());
other.pathToBucketElem.forEach([this](const PathId& key, const std::pair<BucketId, ArrayIndex> &otherValue) {
std::pair<BucketId, ArrayIndex>* value;
VALIDATE_TRUE(pathToBucketElem.allocateEntry(key, &value));
static_assert(std::is_copy_constructible<std::pair<BucketId, ArrayIndex>>::value, "Expected pathToBucketElem values to be copy-constructible");
new (value) std::pair<BucketId, ArrayIndex>(otherValue);
});
CARB_PROFILE_END(1, zoneId);
zoneId = CARB_PROFILE_BEGIN(1, "Copy scalar attributes");
buckets = other.buckets;
CARB_PROFILE_END(1, zoneId);
attrNameSetToBucketId = other.attrNameSetToBucketId;
typeToInfo = other.typeToInfo;
usdStageId = other.usdStageId;
minimalPopulationDone = other.minimalPopulationDone;
stageHierarchy = other.stageHierarchy;
zoneId = CARB_PROFILE_BEGIN(1, "Copy array attributes");
{
BucketId id{ 0 };
for (size_t i = 0; i < buckets.end(); ++i, ++id)
{
auto bucketPtr = buckets.find(id);
if (bucketPtr)
{
const BucketImpl& srcBucketImpl = *(other.buckets.find(id));
BucketImpl& destBucketImpl = *bucketPtr;
// Copy any array-valued attributes
bucketImplCopyArrays(destBucketImpl, id, srcBucketImpl, id);
}
}
}
CARB_PROFILE_END(1, zoneId);
return *this;
}
inline PathToAttributesMap::~PathToAttributesMap()
{
}
}
}
#include <carb/flatcache/GetArrayGPU.h>
// Enable the warnings we disabled when we included USD headers
#if defined(__GNUC__)
# pragma GCC diagnostic pop
# ifdef OMNI_USD_SUPPRESS_DEPRECATION_WARNINGS
# define __DEPRECATED
# undef OMNI_USD_SUPPRESS_DEPRECATION_WARNINGS
# endif
#endif
| 267,444 |
C
| 38.574578 | 369 | 0.645073 |
omniverse-code/kit/fabric/include/carb/flatcache/HashMap.h
|
// Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <cstdint>
#include <cstdlib>
#include <carb/Defines.h>
#include <carb/flatcache/Defines.h>
#include <carb/flatcache/Intrinsics.h>
namespace carb {
namespace flatcache {
struct HashMapDefaultAlloc
{
inline void* operator()(const size_t bytes)
{
return std::malloc(bytes);
}
};
struct HashMapDefaultFree
{
inline void operator()(void *const ptr)
{
std::free(ptr);
}
};
// A hashmap implemented with the following decisions:
//
// * Memory is allocated in a single contiguous buffer so that find operations make linear cache line fetches as much
// as possible. This intends to make more easily predictable memory access patterns, and thus, easier hardware-level
// prefetch decisions. Similarly, whole-map iteration benefits from the same cache-friendly access patterns.
//
// * Find operations intentionally are coded without range checks on the main loop. This is a tradeoff of speed for
// less error-detection in release builds. To help mitigate this, debug builds do track probe counts to validate
// we don't exceed the possible length of the hashmap.
//
// * No opinion codified on thread synchronization. It can be used safely if done carefully, but this is not a
// guarantee of the implementation.
//
// * No iterators provided. If some batch operation must occur, use the forEach() function provided, which should
// suffice. The forEach() method should provide similar performance without the added risk of callers being able to
// arbitrarily cache iterators outside the control of the HashMap, its routines, or even its lifetime.
//
// * Deletes copy-constructor and copy-assignment for non-standard-layout mappings. This forces callers to implement
// these routines explicitly, favoring the clarity of reading intent in explicit implementation over ambiguity over
// compiler selection. Mappings that have standard-layout default to use a memcpy to copy data as fast as possible.
//
// * Implements allocateEntry() method, rather than insert()/emplace() methods mimicing std::unordered_map API. This
// does the minimum steps necessary to reserve address space for a key-value mapping, and provides the caller with
// the reserved buffer memory address for them to do their own construction, assignment, or initialization routines.
// This favors slightly more explicit coding patterns at the caller to force clarity of intent. In particular,
// it make more obvious the choice of the caller between construction vs assignment, and copy vs move semantics. It
// also offers greater flexibility without sacrificing performance.
//
// * ~HashMap() and clear() operate different depending on if KeyT and ValueT are known to be
// std::is_trivially_destructible. If they are, the fastest option is chosen: to deallocate the memory without
// iteration or explicitly destruction per-entry. Otherwise, the implementation iterates to non-trivially destruct
// each object in-place.
template<typename KeyT,
typename ValueT,
typename HashT = std::hash<KeyT>,
typename KeyEqualsT = std::equal_to<KeyT>,
typename AllocT = HashMapDefaultAlloc,
typename FreeT = HashMapDefaultFree>
struct HashMap
{
// I didn't experiment with this exhaustively, could be tuned better, probably
static constexpr size_t LOAD_FACTOR_NUMERATOR = 3ull;
static constexpr size_t LOAD_FACTOR_DENOMENATOR = 4ull;
static constexpr size_t MIN_INIT_CAPACITY = 4ull;
static_assert((MIN_INIT_CAPACITY & (MIN_INIT_CAPACITY - 1ull)) == 0, "MIN_INIT_CAPACITY must be a power of two!");
static constexpr bool KEY_IS_TRIVIALLY_DESTRUCTIBLE = std::is_trivially_destructible<KeyT>::value;
static constexpr bool VALUE_IS_TRIVIALLY_DESTRUCTIBLE = std::is_trivially_destructible<ValueT>::value;
enum EntryState : uint8_t
{
HASH_MAP_ENTRY_STATE_FREE,
HASH_MAP_ENTRY_STATE_OCCUPIED,
HASH_MAP_ENTRY_STATE_DELETED,
};
struct EntryT
{
EntryState state;
KeyT key;
ValueT value;
};
static constexpr size_t allocationSize( const size_t capacity );
static constexpr size_t loadThreshold( const size_t capacity );
static constexpr size_t inverseLoadThreshold( const size_t capacity );
static constexpr size_t capacityAdjustedForLoadThreshold( const size_t capacity );
HashMap( const size_t capacity = 0,
const HashT &hasher = HashT(),
const KeyEqualsT &keyEquals = KeyEqualsT(),
const AllocT &alloc_ = AllocT(),
const FreeT &free_ = FreeT() );
~HashMap();
HashMap(const HashMap& other);
HashMap& operator=(const HashMap& other);
HashMap(HashMap&& other) noexcept;
HashMap& operator=(HashMap&& other) noexcept;
inline friend void swap(HashMap& a, HashMap& b) noexcept
{
using std::swap;
swap(a.m_hasher, b.m_hasher);
swap(a.m_keyEquals, b.m_keyEquals);
swap(a.m_alloc, b.m_alloc);
swap(a.m_free, b.m_free);
swap(a.m_size, b.m_size);
swap(a.m_capacity, b.m_capacity);
swap(a.m_loadThreshold, b.m_loadThreshold);
swap(a.m_mask, b.m_mask);
swap(a.m_entries, b.m_entries);
}
void clear();
const void* data() const;
bool empty() const;
size_t size() const;
size_t capacty() const;
void reserve(const size_t capacity);
bool find( const KeyT& key, ValueT** outValue );
bool find( const KeyT& key, const ValueT** outValue ) const;
bool exists( const KeyT& key ) const;
bool allocateEntry( KeyT&& key, ValueT** outValue );
bool allocateEntry( const KeyT& key, ValueT** outValue );
// Intended to be safe to call during forEach() as it does not invalidate iteration.
bool freeEntry( const KeyT& key );
void freeEntryByKeyAddress( const KeyT *const key );
void freeEntryByValueAddress( const ValueT *const value );
template<typename CallbackT>
inline void forEach( CallbackT callback );
template<typename CallbackT>
inline void forEach( CallbackT callback ) const;
size_t totalCollisionLength() const;
private:
size_t hashInternal( const KeyT& key ) const;
void resizeIfNecessary();
void resize( const size_t nextCapacity );
void freeEntryInternal( EntryT *const entry );
bool findFirstAvailable( const KeyT& key, EntryT** outEntry );
bool findExisting( const KeyT& key, EntryT** outEntry );
bool findExisting( const KeyT& key, const EntryT** outEntry ) const;
HashT m_hasher;
KeyEqualsT m_keyEquals;
AllocT m_alloc;
FreeT m_free;
size_t m_size;
size_t m_capacity;
size_t m_loadThreshold;
size_t m_mask;
EntryT* m_entries;
};
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline constexpr size_t HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::allocationSize( const size_t capacity )
{
return capacity * sizeof( EntryT );
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline constexpr size_t HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::loadThreshold( const size_t capacity )
{
return (capacity * LOAD_FACTOR_NUMERATOR / LOAD_FACTOR_DENOMENATOR);
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline constexpr size_t HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::inverseLoadThreshold( const size_t capacity )
{
return (capacity * LOAD_FACTOR_DENOMENATOR / LOAD_FACTOR_NUMERATOR);
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline constexpr size_t HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::capacityAdjustedForLoadThreshold( const size_t capacity )
{
// reserves capacity to the nearest power of two that satisfies the load threshhold for the requested capacity
size_t adjustedCapacity;
if (capacity && capacity >= loadThreshold(MIN_INIT_CAPACITY))
{
// +1 because we want capacity < loadThreshold(adjustedCapacity), not capacity <= loadThreshold(adjustedCapacity)
adjustedCapacity = 1ull << ( 64u - clz64( inverseLoadThreshold( capacity + 1 ) - 1ull ) );
}
else
{
adjustedCapacity = MIN_INIT_CAPACITY;
}
CARB_ASSERT(capacity < loadThreshold(adjustedCapacity));
CARB_ASSERT((adjustedCapacity & (adjustedCapacity - 1ull)) == 0);
return adjustedCapacity;
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::HashMap(const size_t capacity,
const HashT &hasher,
const KeyEqualsT &keyEquals,
const AllocT &alloc_,
const FreeT &free_)
{
m_hasher = hasher;
m_keyEquals = keyEquals;
m_alloc = alloc_;
m_free = free_;
m_size = 0;
if (capacity)
{
const size_t adjustedCapacity = capacityAdjustedForLoadThreshold(capacity);
const size_t bufSize = allocationSize(adjustedCapacity);
m_capacity = adjustedCapacity;
m_loadThreshold = loadThreshold(adjustedCapacity);
m_mask = adjustedCapacity - 1ull;
m_entries = (EntryT*)m_alloc(bufSize);
memset(m_entries, 0, bufSize);
}
else
{
m_capacity = 0;
m_loadThreshold = 0;
m_mask = 0;
m_entries = nullptr;
}
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::~HashMap()
{
if ( m_entries )
{
if (!KEY_IS_TRIVIALLY_DESTRUCTIBLE || !VALUE_IS_TRIVIALLY_DESTRUCTIBLE)
{
size_t index = 0;
size_t visited = 0;
for ( ; index < m_capacity && visited < m_size; ++index)
{
EntryT *const entry = &m_entries[index];
if ( entry->state == HASH_MAP_ENTRY_STATE_OCCUPIED )
{
if (!KEY_IS_TRIVIALLY_DESTRUCTIBLE)
{
entry->key.~KeyT();
}
if (!VALUE_IS_TRIVIALLY_DESTRUCTIBLE)
{
entry->value.~ValueT();
}
CARB_ASSERT(visited < m_size);
++visited;
}
}
}
m_free(m_entries);
m_entries = nullptr;
}
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::HashMap(const HashMap& other)
: m_hasher(other.m_hasher)
, m_keyEquals(other.m_keyEquals)
, m_alloc(other.m_alloc)
, m_free(other.m_free)
, m_size(other.m_size)
, m_capacity(other.m_capacity)
, m_loadThreshold(other.m_loadThreshold)
, m_mask(other.m_mask)
{
static_assert(std::is_trivially_copyable<EntryT>::value, "Copying of HashMap is only supported for key-value mappings that are use standard-layout classes.");
const size_t bufSize = allocationSize(m_capacity);
m_entries = (EntryT*)m_alloc(bufSize);
memcpy(m_entries, other.m_entries, bufSize);
CARB_ASSERT(m_entries);
CARB_ASSERT(m_capacity);
CARB_ASSERT((m_capacity & (m_capacity - 1ull)) == 0); // assert m_capacity is power of two
CARB_ASSERT(m_size < m_capacity);
CARB_ASSERT(m_size < m_loadThreshold);
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>& HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::operator=(const HashMap& other)
{
HashMap tmp(other);
swap(*this, tmp);
return *this;
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::HashMap(HashMap&& other) noexcept
: m_hasher(std::move(other.m_hasher))
, m_keyEquals(std::move(other.m_keyEquals))
, m_alloc(std::move(other.m_alloc))
, m_free(std::move(other.m_free))
, m_size(std::move(other.m_size))
, m_capacity(std::move(other.m_capacity))
, m_loadThreshold(std::move(other.m_loadThreshold))
, m_mask(std::move(other.m_mask))
, m_entries(std::move(other.m_entries))
{
other.m_entries = nullptr;
other.clear();
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>& HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::operator=(HashMap&& other) noexcept
{
HashMap tmp(std::move(other));
swap(*this, tmp);
return *this;
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline void HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::clear()
{
if ( m_entries )
{
if (!KEY_IS_TRIVIALLY_DESTRUCTIBLE || !VALUE_IS_TRIVIALLY_DESTRUCTIBLE)
{
size_t index = 0;
size_t visited = 0;
for ( ; index < m_capacity && visited < m_size; ++index)
{
EntryT *const entry = &m_entries[index];
if ( entry->state == HASH_MAP_ENTRY_STATE_OCCUPIED )
{
if (!KEY_IS_TRIVIALLY_DESTRUCTIBLE)
{
entry->key.~KeyT();
}
if (!VALUE_IS_TRIVIALLY_DESTRUCTIBLE)
{
entry->value.~ValueT();
}
CARB_ASSERT(visited < m_size);
++visited;
}
entry->state = HASH_MAP_ENTRY_STATE_FREE;
}
}
else
{
static_assert(HASH_MAP_ENTRY_STATE_FREE == 0, "memset(0) requires HASH_MAP_ENTRY_STATE_FREE == 0");
memset(m_entries, 0, allocationSize(m_capacity));
}
}
m_size = 0;
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline const void* HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::data() const
{
return m_entries;
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline bool HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::empty() const
{
return m_size == 0;
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline size_t HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::size() const
{
return m_size;
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline size_t HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::capacty() const
{
return m_capacity;
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline void HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::reserve(const size_t capacity)
{
const size_t adjustedCapacity = capacityAdjustedForLoadThreshold(capacity);
if (m_capacity < adjustedCapacity)
{
resize(adjustedCapacity);
}
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline bool HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::find( const KeyT& key, ValueT** outValue )
{
EntryT* existing;
if (findExisting( key, &existing) )
{
*outValue = &existing->value;
return true;
}
return false;
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline bool HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::find( const KeyT& key, const ValueT** outValue ) const
{
const EntryT* existing;
if (findExisting( key, &existing) )
{
*outValue = &existing->value;
return true;
}
return false;
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline bool HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::exists( const KeyT& key ) const
{
const EntryT* existing;
if (findExisting( key, &existing) )
{
return true;
}
return false;
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline bool HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::allocateEntry( KeyT&& key, ValueT** outValue )
{
EntryT* availableEntry;
resizeIfNecessary();
const bool available = findFirstAvailable(key, &availableEntry);
CARB_ASSERT(availableEntry);
if (available)
{
new (&availableEntry->key) KeyT(std::move(key));
CARB_ASSERT(availableEntry->state != HASH_MAP_ENTRY_STATE_OCCUPIED);
availableEntry->state = HASH_MAP_ENTRY_STATE_OCCUPIED;
*outValue = &availableEntry->value;
CARB_ASSERT(m_size < m_capacity);
CARB_ASSERT(m_size + 1 > m_size);
++m_size;
return true;
}
*outValue = &availableEntry->value;
return false;
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline bool HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::allocateEntry( const KeyT& key, ValueT** outValue )
{
EntryT* availableEntry;
resizeIfNecessary();
const bool available = findFirstAvailable(key, &availableEntry);
CARB_ASSERT(availableEntry);
if (available)
{
new (&availableEntry->key) KeyT(key);
CARB_ASSERT(availableEntry->state != HASH_MAP_ENTRY_STATE_OCCUPIED);
availableEntry->state = HASH_MAP_ENTRY_STATE_OCCUPIED;
*outValue = &availableEntry->value;
CARB_ASSERT(m_size < m_capacity);
CARB_ASSERT(m_size + 1 > m_size);
++m_size;
return true;
}
*outValue = &availableEntry->value;
return false;
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline bool HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::freeEntry( const KeyT& key )
{
EntryT* existing;
if (findExisting(key, &existing))
{
freeEntryInternal(existing);
return true;
}
else
{
return false;
}
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline void HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::freeEntryByKeyAddress( const KeyT *const key )
{
static_assert(!std::is_polymorphic<EntryT>::value, "Unable to freeEntry by key address!");
constexpr size_t OFFSET = offsetof(EntryT, key);
EntryT *const entry = (EntryT*)(((uintptr_t)key) - OFFSET);
freeEntryInternal(entry);
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline void HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::freeEntryByValueAddress(const ValueT *const value)
{
static_assert(!std::is_polymorphic<EntryT>::value, "Unable to freeEntry by value address!");
constexpr size_t OFFSET = offsetof(EntryT, value);
EntryT *const entry = (EntryT*)(((uintptr_t)value) - OFFSET);
freeEntryInternal(entry);
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline size_t HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::hashInternal( const KeyT& key ) const
{
size_t hash = m_hasher(key);
#define HASHMAP_DEFENSIVE_SALT IN_USE
#if USING( HASHMAP_DEFENSIVE_SALT )
// Apply a defensive salt to the user-calculated hash value. It is unsafe to assume user-provided hashes are good.
//
// Kit historically had a problem where std::hash<PathC> caused terrible distributions inside of space-restricted
// hashmaps. This was primarly because the hash values returned had zero entropy in the lower 8 bits. The higher
// bits had excellent entropy, though. It is trivial to improve std::hash<PathC> by doing (oldHashValue >> 8).
// In other words, tossing the bits with zero entropy. This will produce perfectly unique hash value output for
// every PathC input. However, using this directly in a hash map is still not ideal because, while the hash function
// has a guarantee on uniqueness, it does not necessarily lend to good distributions in a hash table. Two hash
// values that are multiples of each other will naturally colliide in any space-restricted hashmap.
// (Which, realistically, is all real hash maps since hardware memory is not infinite.) Applying a little salt on
// top of the hash value fixes this distribution problem.
//
// This also provides general safety against poorly implemented user-provided hash functions that don't generate
// unique or well distributed values.
//
// Known problematic data sets:
// - PathC (interned SdfPaths)
// - TokenC (interned TfTokens)
//
// Salt techniques tried:
// - H3_XX64 (xxhash):
// - good distribution
// - too slow
// - H3_XX64 (xxhash) with custom seeds:
// - no seed performed better than the xxhash default secret
// - Custom H3_XX64 implementation specialized for aligned 64-bit keys:
// - methematically identical distribution to H3_XX64
// - 2x faster performance than official implementation
// - Multiply by a prime
// - best distribution so far
// - best speed so far (3x faster than custom H3_XX64)
//
// TODO: A fun intern experiment would be to investigate our various omniverse hash functions for distribution and
// speed. And also investigate alternative defensive salting techniques.
return hash * 48271ull;
#else // #if USING( HASHMAP_DEFENSIVE_SALT )
return hash;
#endif // #if USING( HASHMAP_DEFENSIVE_SALT )
#undef HASHMAP_DEFENSIVE_SALT
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline void HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::resizeIfNecessary()
{
if (m_size >= m_loadThreshold)
{
CARB_ASSERT(!m_capacity || m_capacity * 2 > m_capacity);
resize(m_capacity ? m_capacity * 2 : MIN_INIT_CAPACITY);
}
else if (!m_entries)
{
const size_t bufSize = allocationSize(m_capacity);
m_entries = (EntryT*)m_alloc(bufSize);
memset(m_entries, 0, bufSize);
}
CARB_ASSERT(m_entries);
CARB_ASSERT(m_capacity);
CARB_ASSERT((m_capacity & (m_capacity - 1)) == 0);
CARB_ASSERT(m_size < m_capacity);
CARB_ASSERT(m_size < m_loadThreshold);
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline void HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::resize(const size_t nextCapacity)
{
CARB_ASSERT(m_size < loadThreshold(nextCapacity));
CARB_ASSERT((nextCapacity & (nextCapacity - 1)) == 0);
HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT> tmp(nextCapacity, m_hasher, m_keyEquals, m_alloc, m_free );
size_t index = 0;
size_t visited = 0;
for ( ; index < m_capacity && visited < m_size; ++index)
{
EntryT *const entry = &m_entries[index];
if ( entry->state == HASH_MAP_ENTRY_STATE_OCCUPIED )
{
ValueT *tmpV;
tmp.allocateEntry(std::move(entry->key), &tmpV);
new (tmpV) ValueT(std::move(entry->value));
CARB_ASSERT(visited < m_size);
++visited;
}
}
CARB_ASSERT(m_size == tmp.m_size);
using std::swap;
swap(m_entries, tmp.m_entries);
swap(m_size, tmp.m_size);
swap(m_capacity, tmp.m_capacity);
swap(m_loadThreshold, tmp.m_loadThreshold);
swap(m_mask, tmp.m_mask);
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline void HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::freeEntryInternal( EntryT *const entry )
{
CARB_ASSERT(entry);
CARB_ASSERT(entry->state == HASH_MAP_ENTRY_STATE_OCCUPIED);
if (!KEY_IS_TRIVIALLY_DESTRUCTIBLE)
{
entry->key.~KeyT();
}
if (!VALUE_IS_TRIVIALLY_DESTRUCTIBLE)
{
entry->value.~ValueT();
}
entry->state = HASH_MAP_ENTRY_STATE_DELETED;
CARB_ASSERT(m_size);
CARB_ASSERT(m_size - 1 < m_size);
--m_size;
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
template<typename CallbackT>
inline void HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::forEach( CallbackT callback )
{
size_t index = 0;
size_t visited = 0;
const size_t size_captured = m_size;
for ( ; index < m_capacity && visited < size_captured; ++index)
{
if (m_entries[index].state == HASH_MAP_ENTRY_STATE_OCCUPIED)
{
callback(m_entries[index].key, m_entries[index].value);
CARB_ASSERT(visited < size_captured);
++visited;
}
}
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
template<typename CallbackT>
inline void HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::forEach(CallbackT callback) const
{
size_t index = 0;
size_t visited = 0;
const size_t size_captured = m_size;
for (; index < m_capacity && visited < size_captured; ++index)
{
if (m_entries[index].state == HASH_MAP_ENTRY_STATE_OCCUPIED)
{
callback(m_entries[index].key, m_entries[index].value);
CARB_ASSERT(visited < size_captured);
++visited;
}
}
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline size_t HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::totalCollisionLength() const
{
size_t len = 0;
if ( m_entries )
{
size_t index = 0;
size_t visited = 0;
const size_t size_captured = m_size;
for (; index < m_capacity && visited < size_captured; ++index)
{
const EntryT *const probe = &m_entries[index];
if (probe->state == HASH_MAP_ENTRY_STATE_OCCUPIED)
{
const EntryT *const natural = &m_entries[hashInternal(probe->key) & m_mask];
len += (size_t)((natural <= probe) ? (probe - natural) : ( ( probe + m_capacity ) - natural) );
CARB_ASSERT(visited < size_captured);
++visited;
}
}
}
return len;
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline bool HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::findFirstAvailable( const KeyT& key, EntryT** outEntry )
{
EntryT* probe;
size_t probeIdx;
// This will technically resize opportunistically if the key might already exist, but at least
// that edge case will only occur once per resize, and being opportunistic avoids searching first.
resizeIfNecessary();
#if USING( ASSERTS )
size_t probes = 0;
#endif // #if USING( ASSERTS )
probeIdx = hashInternal(key) & m_mask;
CARB_ASSERT(m_size < m_capacity); // otherwise we infinite loop
while(1)
{
CARB_ASSERT( probeIdx < m_capacity );
probe = &m_entries[probeIdx];
if ( probe->state == HASH_MAP_ENTRY_STATE_FREE )
{
*outEntry = probe;
return true;
}
else if ( probe->state == HASH_MAP_ENTRY_STATE_OCCUPIED )
{
if ( m_keyEquals(probe->key, key) )
{
*outEntry = probe;
return false;
}
}
else if ( probe->state == HASH_MAP_ENTRY_STATE_DELETED )
{
*outEntry = probe;
return true;
}
probeIdx = ( probeIdx + 1 ) & m_mask;
#if USING( ASSERTS )
++probes;
CARB_ASSERT(probes < m_capacity);
#endif // #if USING( ASSERTS )
}
CARB_ASSERT(false && "unreachable code");
return false;
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline bool HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::findExisting( const KeyT& key, EntryT** outEntry )
{
if (!m_size)
{
return false;
}
EntryT* probe;
size_t probeIdx;
#if USING( ASSERTS )
size_t probes = 0;
#endif // #if USING( ASSERTS )
probeIdx = hashInternal(key) & m_mask;
CARB_ASSERT(m_size < m_capacity); // otherwise we infinite loop
while(1)
{
CARB_ASSERT( probeIdx < m_capacity );
probe = &m_entries[probeIdx];
if ( probe->state == HASH_MAP_ENTRY_STATE_FREE )
{
return false;
}
else if ( probe->state == HASH_MAP_ENTRY_STATE_OCCUPIED )
{
if ( m_keyEquals(probe->key, key) )
{
*outEntry = probe;
return true;
}
}
else
{
// skip
CARB_ASSERT( probe->state == HASH_MAP_ENTRY_STATE_DELETED );
}
probeIdx = ( probeIdx + 1 ) & m_mask;
#if USING( ASSERTS )
++probes;
CARB_ASSERT(probes < m_capacity);
#endif // #if USING( ASSERTS )
}
CARB_ASSERT(false && "unreachable code");
return false;
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline bool HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::findExisting( const KeyT& key, const EntryT** outEntry ) const
{
return const_cast<HashMap*>(this)->findExisting( key, const_cast< EntryT** >(outEntry) );
}
} // namespace flatcache
} // namespace carb
| 31,028 |
C
| 35.634002 | 162 | 0.655182 |
omniverse-code/kit/fabric/include/carb/flatcache/Enums.h
|
// Copyright (c) 2021-2021 NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
namespace carb {
namespace flatcache {
/**
* @enum PtrToPtrKind
*
* @details When getting an array-valued attribute for GPU access, you can
* optionally use this enum to ask for a GPU pointer to the GPU data
* pointer (eGpuPtrToGpuPtr), or a CPU pointer to the GPU data
* pointer (eCpuPtrToGpuPtr).
* The advantage of using eCpuPtrToGpuPtr is that you can dereference
* the returned pointer on the CPU, and pass the GPU data pointer as
* a CUDA kernel parameter.
* The advantage of using eGpuPtrToGpuPtr is that it makes it easier
* to extend kernels to operate on arrays of arrays later. Also it
* allows us to support allocation and resizing of array-valued
* attributes on the GPU in the future.
*
* PtrToPtrKind is not a parameter of methods returning arrays of
* arrays, for example getArrayGPU(). This is because there is no way
* to pass a variable length array of GPU pointers to a kernel using
* its CPU launch parameters. So GPU arrays of arrays always have to
* be passed to kernels as a GPU pointer to an array of GPU pointers.
*/
enum class PtrToPtrKind
{
eNotApplicable = 0,
eGpuPtrToGpuPtr = 0, // eGpuPtrToGpuPtr == eNotApplicable for backward compatibility
eCpuPtrToGpuPtr = 1
};
} // namespace flatcache
} // namespace carb
| 1,865 |
C
| 41.40909 | 88 | 0.70563 |
omniverse-code/kit/fabric/include/carb/flatcache/IdTypes.h
|
// Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <cstdint>
#include <stddef.h>
namespace carb
{
namespace flatcache
{
struct StageInProgressId
{
uint64_t id;
};
struct StageAtTimeIntervalId
{
uint64_t id;
};
struct StageWithHistoryId
{
uint64_t id;
};
struct PrimBucketListId
{
uint64_t id;
};
struct ListenerId
{
size_t id;
bool operator==(ListenerId other) const
{
return id == other.id;
}
};
struct ListenerIdHasher
{
size_t operator()(const ListenerId& key) const
{
return key.id;
}
};
enum class PlatformId : uint8_t
{
Global = 0,
// add additional platforms here
Count,
};
}
}
| 1,081 |
C
| 15.149254 | 77 | 0.696577 |
omniverse-code/kit/fabric/include/carb/flatcache/OGUtilsNotForGeneralUser.h
|
// Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <carb/Interface.h>
namespace carb
{
namespace flatcache
{
struct OGUtilsNotForGeneralUser
{
CARB_PLUGIN_INTERFACE("carb::flatcache::OGUtilsNotForGeneralUser", 0, 2);
//////////////////////////////////////////////////////////////////////////
// A simple collection of utility functions for interacting with flatcache
//////////////////////////////////////////////////////////////////////////
/** @brief Import attributes from a usd prim, in provided cache, at a given path,
* read at a given time
*
* @cache[in/out] The cache to be populated
* @dstPath[in] The path location in the cache to import attributes to
* @prim[in] The prim from which to read the attributes in USD
* @time[in] The time at which to read the attributes from
* @filter[in] A subset of attributes to consider during this import process.
* Will import all attributes if left empty
* @force[in] Whether to overwrite values, or just add missing ones
*/
void(CARB_ABI* importPrimAttributesToCacheAtPathAtTime)(struct PathToAttributesMap& cache,
const pxr::SdfPath& dstPath,
const pxr::UsdPrim& prim,
const pxr::UsdTimeCode& time,
const std::set<TokenC>& filter,
bool force);
/** @brief Copy a subset of data from the cache back to USD
*
* @cache[in/out] The cache to read the data from
* @buckets[in] A collection of bucket subsets, from which the data needs to be copied back to USD
*/
void (CARB_ABI* pushDataToUSD)(struct PathToAttributesMap& cache, struct BucketSubset const& bucket, bool skipMeshPoints);
// Prefetch the whole USD stage to the cache
// Typically you only call this at stage load time, because the USD notify
// handler updates the cache if the stage changes.
void(CARB_ABI* usdToCache)(PathToAttributesMap& cache, bool processConnections);
};
}
}
| 2,644 |
C
| 41.66129 | 126 | 0.59947 |
omniverse-code/kit/fabric/include/omni/gpucompute/D3dContext.h
|
// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <carb/graphics/Graphics.h>
using namespace carb::graphics;
namespace omni
{
namespace gpucompute
{
// TODO: move out of public API
struct ContextD3D
{
std::unique_ptr<Device, GfxResult (*)(Device*)> device;
CommandQueue* commandQueue; // Not unique_ptr because we don't own it
std::unique_ptr<CommandAllocator, void (*)(CommandAllocator*)> commandAllocator;
std::unique_ptr<CommandList, void (*)(CommandList*)> commandList;
std::unique_ptr<Fence, void (*)(Fence*)> fence;
ContextD3D(DeviceDesc deviceDesc, carb::graphics::Graphics* graphics)
: device(graphics->createDevice(deviceDesc), graphics->destroyDevice),
commandQueue(graphics->getCommandQueue(device.get(), CommandQueueType::eRender, 0)),
commandAllocator(graphics->createCommandAllocator(commandQueue), graphics->destroyCommandAllocator),
commandList(graphics->createCommandList(commandAllocator.get()), graphics->destroyCommandList),
fence(graphics->createFence(device.get(), carb::graphics::FenceDesc { kFenceCreateFlagNone, "GPU compute context fence" }), graphics->destroyFence)
{
}
};
}
}
| 1,595 |
C
| 39.923076 | 157 | 0.743574 |
omniverse-code/kit/fabric/include/omni/gpucompute/GpuCompute.h
|
// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <carb/Types.h>
#include <map>
#include <memory>
#include <vector>
using namespace carb;
namespace carb
{
namespace graphics
{
struct Context;
struct Graphics;
struct Device;
struct CommandList;
struct CommandQueue;
struct Fence;
struct Shader;
}
}
namespace omni
{
namespace gpucompute
{
enum class MemcpyKind
{
hostToHost = 0,
hostToDevice = 1,
deviceToHost = 2,
deviceToDevice = 3
};
struct Context;
struct Shader;
struct Parameter
{
const char* name;
bool isConstantBuffer;
};
// API agnostic representation of
// static_cast<char*>(buffer) + byteOffset
// which points to data of size elemSize
struct GpuPointer
{
void* buffer;
size_t byteOffset;
size_t elemSize;
};
struct CpuBuffer
{
void* data;
size_t count;
};
struct Args
{
// TODO: move CPU backend into its own plugin
// GPU
std::vector<GpuPointer> gpuBuffers;
std::vector<size_t> gpuArgToBufferCount;
// CPU
std::vector<CpuBuffer> cpuArgs;
};
// A CUDA-style interface for D3D, consisting of GPU malloc, free, memcpy and
// kernel dispatch
struct GpuCompute
{
CARB_PLUGIN_INTERFACE("omni::gpucompute::GpuCompute", 0, 1)
// CPU memory allocation
void(CARB_ABI* hostAlloc)(Context& context, void** ptr, size_t byteCount);
void(CARB_ABI* freeHost)(Context& context, void* ptr);
// GPU memory allocation
void*(CARB_ABI* malloc)(Context& context, size_t byteCount, size_t elemSize);
void(CARB_ABI* free)(Context& context, void* ptr);
// GPU async memory allocation (uses stream 0 for CUDA)
void*(CARB_ABI* mallocAsync)(Context& context, size_t byteCount, size_t elemSize);
void(CARB_ABI* freeAsync)(Context& context, void* ptr);
void(CARB_ABI* memcpy)(Context& context, void* dst, const void* src, size_t byteCount, MemcpyKind kind);
void(CARB_ABI* dispatch)(Context& context, Shader& shader, Args& args, carb::Uint3 gridDim);
Context&(CARB_ABI* createContext)();
Context&(CARB_ABI* createContextD3dVk)(carb::graphics::Graphics* graphics,
carb::graphics::Device* device,
carb::graphics::CommandList* commandList,
carb::graphics::CommandQueue* commandQueue,
carb::graphics::Fence* fence);
void(CARB_ABI* destroyContext)(Context& context);
uint32_t(CARB_ABI* peekAtLastError)(Context& context);
uint32_t(CARB_ABI* getLastError)(Context& context);
void(CARB_ABI* memcpyAsync)(Context& context, void* dst, const void* src, size_t byteCount, MemcpyKind kind);
};
enum class Target
{
CPU,
GPU
};
struct ComputeCompiler
{
CARB_PLUGIN_INTERFACE("omni::gpucompute::ComputeCompiler", 0, 1)
omni::gpucompute::Shader*(CARB_ABI* compile)(carb::graphics::Device* device, Target target, const char* codeString);
void(CARB_ABI* getParameters)(Parameter*& parameters, size_t& count, Shader& shader);
void(CARB_ABI* destroyShader)(Shader& shader);
};
}
}
| 3,512 |
C
| 25.022222 | 120 | 0.679385 |
omniverse-code/kit/include/carb/IObject.h
|
// Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//!
//! @brief Implementation of Carbonite objects.
#pragma once
#include "Interface.h"
#include <cstdint>
namespace carb
{
/**
* Reference-counted object base.
*/
class IObject
{
public:
CARB_PLUGIN_INTERFACE("carb::IObject", 1, 0)
/**
* Destructor.
*/
virtual ~IObject() = default;
/**
* Atomically add one to the reference count.
* @returns The current reference count after one was added, though this value may change before read if other
* threads are also modifying the reference count. The return value is guaranteed to be non-zero.
*/
virtual size_t addRef() = 0;
/**
* Atomically subtracts one from the reference count. If the result is zero, carb::deleteHandler() is called for
* `this`.
* @returns The current reference count after one was subtracted. If zero is returned, carb::deleteHandler() was
* called for `this`.
*/
virtual size_t release() = 0;
};
/**
* Smart pointer type for ref counting `IObject`. It automatically controls reference count for the underlying
* `IObject` pointer.
*/
template <class T>
class ObjectPtr
{
public:
//////////// Ctors/dtor ////////////
/**
* Policy directing how the smart pointer is initialized from from raw pointer.
*/
enum class InitPolicy
{
eBorrow, ///< Increases reference count.
eSteal ///< Assign the pointer without increasing the reference count.
};
/**
* Default Constructor
*/
ObjectPtr() : m_object(nullptr)
{
}
/**
* Nullptr Constructor
*/
ObjectPtr(std::nullptr_t) : m_object(nullptr)
{
}
/**
* Constructor
* @param object The raw pointer to an object. If not `nullptr`, it will be "borrowed"; that is, the reference count
* will be increased as long as `*this` contains it.
*/
explicit ObjectPtr(T* object) : m_object(object)
{
if (m_object)
{
m_object->addRef();
}
}
/**
* Constructor.
* @param object The raw pointer to an object.
* @param policy Directive on whether the reference count should be increased or not.
*/
ObjectPtr(T* object, InitPolicy policy) : m_object(object)
{
if (policy == InitPolicy::eBorrow && m_object != nullptr)
{
m_object->addRef();
}
}
/**
* Copy constructor. Always increases the reference count.
* @param other The smart pointer from which to copy a reference.
*/
ObjectPtr(const ObjectPtr<T>& other) : ObjectPtr(other.m_object, InitPolicy::eBorrow)
{
}
/// @copydoc ObjectPtr(const ObjectPtr<T>& other)
template <class U>
ObjectPtr(const ObjectPtr<U>& other) : ObjectPtr(other.m_object, InitPolicy::eBorrow)
{
}
/**
* Move constructor. Steals the reference count from @p other and leaves it empty.
* @param other The smart pointer from which to steal a reference.
*/
ObjectPtr(ObjectPtr<T>&& other) : m_object(other.m_object)
{
other.m_object = nullptr;
}
/// @copydoc ObjectPtr(ObjectPtr<T>&& other)
template <class U>
ObjectPtr(ObjectPtr<U>&& other) : m_object(other.m_object)
{
other.m_object = nullptr;
}
/**
* Destructor.
*/
~ObjectPtr()
{
_release();
}
//////////// Helpers ////////////
//////////// Ptr ////////////
/**
* Converts the smart pointer to a raw pointer.
* @returns The raw pointer referenced by the smart pointer. May be `nullptr`.
*/
T* get() const
{
return m_object;
}
/**
* Pointer dereference operator.
* @returns The raw pointer referenced by the smart pointer.
*/
T* operator->() const
{
CARB_ASSERT(m_object);
return m_object;
}
/**
* Dereference operator.
* @returns A reference to the pointed-at object.
*/
T& operator*() const
{
CARB_ASSERT(m_object);
return *m_object;
}
/**
* Boolean conversion operator.
* @returns `true` if the smart pointer is not empty; `false` if the smart pointer is empty.
*/
explicit operator bool() const
{
return get() != nullptr;
}
//////////// Explicit access ////////////
/**
* Returns the address of the internal reference.
* @returns The address of the internal reference.
*/
T* const* getAddressOf() const
{
return &m_object;
}
/// @copydoc getAddressOf() const
T** getAddressOf()
{
return &m_object;
}
/**
* Helper function to release any current reference and return the address of the internal reference pointer.
* @returns The address of the internal reference.
*/
T** releaseAndGetAddressOf()
{
_release();
return &m_object;
}
/**
* Resets this smart pointer to `nullptr` and returns the previously reference object @a without releasing the held
* reference.
* @returns The previously referenced object.
*/
T* detach()
{
T* temp = m_object;
m_object = nullptr;
return temp;
}
/**
* Releases the reference on any held object and instead @a steals the given object.
* @param other The object to steal a reference to.
*/
void attach(T* other)
{
_release();
m_object = other;
}
//////////// Assignment operator ////////////
/**
* Assignment to @a nullptr. Releases any previously held reference.
* @returns @a *this
*/
ObjectPtr& operator=(decltype(nullptr))
{
_release();
return *this;
}
/**
* Releases any previously held reference and copies a reference to @p other.
* @param other The object to reference.
* @returns @a *this
*/
ObjectPtr& operator=(T* other)
{
ObjectPtr(other).swap(*this);
return *this;
}
/// @copydoc operator=
template <typename U>
ObjectPtr& operator=(U* other)
{
ObjectPtr(other).swap(*this);
return *this;
}
/// @copydoc operator=
ObjectPtr& operator=(const ObjectPtr& other)
{
ObjectPtr(other).swap(*this);
return *this;
}
/// @copydoc operator=
template <class U>
ObjectPtr& operator=(const ObjectPtr<U>& other)
{
ObjectPtr(other).swap(*this);
return *this;
}
/**
* Releases any previously held reference and steals the reference from @p other.
* @param other The reference to steal. Will be swapped with @a *this.
* @returns @a *this
*/
ObjectPtr& operator=(ObjectPtr&& other)
{
other.swap(*this);
return *this;
}
/// @copydoc operator=(ObjectPtr&& other)
template <class U>
ObjectPtr& operator=(ObjectPtr<U>&& other)
{
ObjectPtr(std::move(other)).swap(*this);
return *this;
}
/**
* Compares equality of this object and another one of the same type.
*
* @param[in] other The other object to compare this one to.
* @returns `true` if the two objects identify the same underlying object. Returns
* `false` otherwise.
*/
template <class U>
bool operator==(const ObjectPtr<U>& other) const
{
return get() == other.get();
}
/**
* Compares inequality of this object and another one of the same type.
*
* @param[in] other The other object to compare this one to.
* @returns `true` if the two objects do not identify the same underlying object. Returns
* `false` otherwise.
*/
template <class U>
bool operator!=(const ObjectPtr<U>& other) const
{
return get() != other.get();
}
/**
* Swaps with another smart pointer.
* @param other The smart pointer to swap with.
*/
void swap(ObjectPtr& other)
{
std::swap(m_object, other.m_object);
}
private:
void _release()
{
if (T* old = std::exchange(m_object, nullptr))
{
old->release();
}
}
T* m_object;
};
/**
* Helper function to create carb::ObjectPtr from a carb::IObject pointer by "stealing" the pointer; that is, without
* increasing the reference count.
* @param other The raw pointer to steal.
* @returns A smart pointer referencing @p other.
*/
template <class T>
inline ObjectPtr<T> stealObject(T* other)
{
return ObjectPtr<T>(other, ObjectPtr<T>::InitPolicy::eSteal);
}
/**
* Helper function to create carb::ObjectPtr from a carb::IObject pointer by "borrowing" the pointer; that is, by
* increasing the reference count.
* @param other The raw pointer to reference.
* @returns A smart pointer referencing @p other.
*/
template <class T>
inline ObjectPtr<T> borrowObject(T* other)
{
return ObjectPtr<T>(other, ObjectPtr<T>::InitPolicy::eBorrow);
}
} // namespace carb
| 9,445 |
C
| 23.663185 | 120 | 0.596506 |
omniverse-code/kit/include/carb/SdkVersion.h
|
// Copyright (c) 2023-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// NOTE: This file is generated by 'make_version.lua' and should not be modified directly.
//
//! @file
//! @brief Defines a macro containing the SDK version this header was built with.
#pragma once
//! Version string for this SDK build. This string is also returned by carbGetSdkVersion().
//! This value can be passed to @ref CARB_IS_SAME_SDK_VERSION() to verify that the loaded
//! version of the Carbonite framework library matches the headers that are in use.
#define CARB_SDK_VERSION "158.5+release158.tc9626.54324001"
| 966 |
C
| 49.894734 | 92 | 0.773292 |
omniverse-code/kit/include/carb/PluginInitializers.h
|
// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//!
//! @brief Utilities to ease the creation of Carbonite plugins.
#pragma once
#include "Defines.h"
namespace carb
{
#ifndef DOXYGEN_BUILD
namespace detail
{
inline bool& initialized() noexcept
{
static bool init = false;
return init;
}
} // namespace detail
#endif
struct Framework;
namespace logging
{
void registerLoggingForClient() noexcept;
void deregisterLoggingForClient() noexcept;
} // namespace logging
namespace profiler
{
void registerProfilerForClient() noexcept;
void deregisterProfilerForClient() noexcept;
} // namespace profiler
namespace assert
{
void registerAssertForClient() noexcept;
void deregisterAssertForClient() noexcept;
} // namespace assert
namespace l10n
{
void registerLocalizationForClient() noexcept;
void deregisterLocalizationForClient() noexcept;
} // namespace l10n
/**
* Function called automatically at plugin startup to initialize utilities within each plugin.
*/
inline void pluginInitialize()
{
if (detail::initialized())
return;
carb::detail::initialized() = true;
logging::registerLoggingForClient();
profiler::registerProfilerForClient();
assert::registerAssertForClient();
l10n::registerLocalizationForClient();
}
/**
* Function called automatically at plugin shutdown to de-initialize utilities within each plugin.
*/
inline void pluginDeinitialize()
{
if (!detail::initialized())
return;
carb::detail::initialized() = false;
assert::deregisterAssertForClient();
profiler::deregisterProfilerForClient();
logging::deregisterLoggingForClient();
l10n::deregisterLocalizationForClient();
}
} // namespace carb
| 2,084 |
C
| 24.426829 | 98 | 0.758157 |
omniverse-code/kit/include/carb/Defines.h
|
// Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//!
//! @brief Carbonite basic defines and helper functions.
#pragma once
#include <cassert>
#include <cinttypes>
#include <climits>
#include <cstdarg>
#include <cstdint>
#include <cstdio>
#include <cstdlib>
#include <csignal>
#ifndef CARB_NO_MALLOC_FREE
# include <cstring>
#else
# include <cstddef> // for size_t
#endif
#include <new>
#include <exception> // for std::terminate
#include <type_traits>
#include <mutex>
/** A macro to put into `#else` branches when writing platform-specific code. */
#define CARB_UNSUPPORTED_PLATFORM() static_assert(false, "Unsupported platform!")
/** A macro to put into the `#else` branches when writing CPU architecture specific code. */
#define CARB_UNSUPPORTED_ARCHITECTURE() static_assert(false, "Unsupported architecture!")
#ifndef CARB_DEBUG
# if defined(NDEBUG) || defined(DOXYGEN_BUILD)
//! A macro indicating whether the current compilation unit is built in debug mode. Always defined as either 0 or 1. Can
//! be overridden by defining before this file is included or by passing on the compiler command line. Defined as `0`
//! if `NDEBUG` is defined; `1` otherwise.
# define CARB_DEBUG 0
# else
# define CARB_DEBUG 1
# endif
#endif
//! A macro that can be used to indicate classes and members that participate in visualizers, such as \a carb.natvis.
//! This is a reminder that these classes, members and types will require visualizer fixup if changes are made.
#define CARB_VIZ
#ifdef DOXYGEN_BUILD
//! A macro defined as `1` if compilation is targeting Windows; `0` otherwise. Exactly one of the `CARB_PLATFORM_*`
//! macros will be set to `1`. May be overridden by defining before this file is included or by passing on the compiler
//! command line. By default, set to `1` if `_WIN32` is defined.
# define CARB_PLATFORM_WINDOWS 0
//! A macro defined as `1` if compilation is targeting Linux; `0` otherwise. Exactly one of the `CARB_PLATFORM_*`
//! macros will be set to `1`. May be overridden by defining before this file is included or by passing on the compiler
//! command line. By default, set to `1` if `_WIN32` is not defined and `__linux__` is defined.
# define CARB_PLATFORM_LINUX 1
//! A macro defined as `1` if compilation is targeting Mac OS; `0` otherwise. Exactly one of the `CARB_PLATFORM_*`
//! macros will be set to `1`. May be overridden by defining before this file is included or by passing on the compiler
//! command line. By default, set to `1` if `_WIN32` and `__linux__` are not defined and `__APPLE__` is defined.
# define CARB_PLATFORM_MACOS 0
//! The name of the current platform as a string.
# define CARB_PLATFORM_NAME
#elif defined(CARB_PLATFORM_WINDOWS) && defined(CARB_PLATFORM_LINUX) && defined(CARB_PLATFORM_MACOS)
# if (!!CARB_PLATFORM_WINDOWS) + (!!CARB_PLATFORM_LINUX) + (!!CARB_PLATFORM_MACOS) != 1
# define CARB_PLATFORM_WINDOWS // show previous definition
# define CARB_PLATFORM_LINUX // show previous definition
# define CARB_PLATFORM_MACOS // show previous definition
# error Exactly one of CARB_PLATFORM_WINDOWS, CARB_PLATFORM_LINUX or CARB_PLATFORM_MACOS must be non-zero.
# endif
#elif !defined(CARB_PLATFORM_WINDOWS) && !defined(CARB_PLATFORM_LINUX)
# ifdef _WIN32
# define CARB_PLATFORM_WINDOWS 1
# define CARB_PLATFORM_LINUX 0
# define CARB_PLATFORM_MACOS 0
# define CARB_PLATFORM_NAME "windows"
# elif defined(__linux__)
# define CARB_PLATFORM_WINDOWS 0
# define CARB_PLATFORM_LINUX 1
# define CARB_PLATFORM_MACOS 0
# define CARB_PLATFORM_NAME "linux"
# elif defined(__APPLE__)
# define CARB_PLATFORM_WINDOWS 0
# define CARB_PLATFORM_LINUX 0
# define CARB_PLATFORM_MACOS 1
# define CARB_PLATFORM_NAME "macos"
# else
CARB_UNSUPPORTED_PLATFORM();
# endif
#else
# error "Must define all of CARB_PLATFORM_WINDOWS, CARB_PLATFORM_LINUX and CARB_PLATFORM_MACOS or none."
#endif
#if CARB_PLATFORM_LINUX || CARB_PLATFORM_MACOS || defined(DOXYGEN_BUILD)
# include <unistd.h> // _POSIX_VERSION comes from unistd.h
/** This is set to `_POSIX_VERSION` platforms that are mostly-compliant with POSIX.
* This is set to 0 on other platforms (e.g. no GNU extensions).
*/
# define CARB_POSIX _POSIX_VERSION
#else
# define CARB_POSIX 0
#endif
#ifndef DOXYGEN_SHOULD_SKIP_THIS
# if CARB_PLATFORM_WINDOWS
# ifndef CARB_NO_MALLOC_FREE
# include "malloc.h"
# endif
# include <intrin.h>
# elif CARB_PLATFORM_LINUX
# include <alloca.h>
# include <signal.h>
# define _alloca alloca
# endif
#endif
// Architecture defines
#ifdef DOXYGEN_BUILD
//! A macro defined as `1` if compilation is targeting the AArch64 platform; `0` otherwise. May not be overridden on the
//! command line or by defining before including this file. Set to `1` if `__aarch64__` is defined, `0` if `__x86_64__`
//! or `_M_X64` are defined, and left undefined otherwise.
# define CARB_AARCH64 0
//! A macro defined as `1` if compilation is targeting the x86-64 platform; `0` otherwise. May not be overridden on the
//! command line or by defining before including this file. Set to `0` if `__aarch64__` is defined, `1` if `__x86_64__`
//! or `_M_X64` are defined, and left undefined otherwise.
# define CARB_X86_64 1
//! The name of the current architecture as a string.
# define CARB_ARCH_NAME
#elif defined(__aarch64__)
# define CARB_AARCH64 1
# define CARB_X86_64 0
#elif defined(__x86_64__) /*GCC*/ || defined(_M_X64) /*MSVC*/
# define CARB_X86_64 1
# define CARB_AARCH64 0
#endif
#if CARB_PLATFORM_MACOS
# define CARB_ARCH_NAME "universal"
#else
# if CARB_X86_64
# define CARB_ARCH_NAME "x86_64"
# elif CARB_AARCH64
# define CARB_ARCH_NAME "aarch64"
# endif
#endif
#ifndef CARB_PROFILING
//! When set to a non-zero value, profiling macros in \a include/carb/profiler/Profile.h will report to the profiler;
//! otherwise the profiling macros have no effect. Always set to `1` by default, but may be overridden by defining a
//! different value before including this file or by specifying a different value on the compiler command line.
# define CARB_PROFILING 1
#endif
#ifdef DOXYGEN_BUILD
//! A macro defined as `1` if compilation is targeting the Tegra platform. By default set to `1` only if `__aarch64__`
//! and `__LINARO_RELEASE__` are defined; `0` otherwise. May be overridden by defining a different value before
//! including this file or by specifying a different value on the compiler command line.
# define CARB_TEGRA 0
#elif !defined(CARB_TEGRA)
# if defined(__aarch64__) && defined(__LINARO_RELEASE__)
# define CARB_TEGRA 1
# else
# define CARB_TEGRA 0
# endif
#endif
#ifdef DOXYGEN_BUILD
//! A macro defined as `1` if compilation is using Microsoft Visual C++, that is, if `_MSC_VER` is defined. May be
//! overridden by defining a different value before including this file or by specifying a different value on the
//! compiler command line, however, only one of `CARB_COMPILER_MSC` and `CARB_COMPILER_GNUC` must be set to `1`; the
//! other macro(s) must be set to `0`.
# define CARB_COMPILER_MSC 0
//! A macro defined as `1` if compilation is using GNU C Compiler (GCC), that is, if `_MSC_VER` is not defined but
//! `__GNUC__` is defined. May be overridden by defining a different value before including this file or by specifying a
//! different value on the compiler command line, however, only one of `CARB_COMPILER_MSC` and `CARB_COMPILER_GNUC` must
//! be set to `1`; the other macro(s) must be set to `0`.
# define CARB_COMPILER_GNUC 1
#elif defined(CARB_COMPILER_MSC) && defined(CARB_COMPILER_GNUC)
# if (!!CARB_COMPILER_MSC) + (!!CARB_COMPILER_GNUC) != 1
# define CARB_COMPILER_MSC // Show previous definition
# define CARB_COMPILER_GNUC // Show previous definition
# error Exactly one of CARB_COMPILER_MSC or CARB_COMPILER_GNUC must be non-zero.
# endif
#elif !defined(CARB_COMPILER_MSC) && !defined(CARB_COMPILER_GNUC)
# ifndef CARB_COMPILER_MSC
# if defined(_MSC_VER)
# define CARB_COMPILER_MSC 1
# define CARB_COMPILER_GNUC 0
# elif defined(__GNUC__)
# define CARB_COMPILER_MSC 0
# define CARB_COMPILER_GNUC 1
# else
# error "Unsupported compiler."
# endif
# endif
#else
# error "Must define CARB_COMPILER_MSC and CARB_COMPILER_GNUC or neither."
#endif
#ifdef DOXYGEN_BUILD
//! A macro defined as `1` if a Clang-infrastructure toolchain is building the current file, that is, if `__clang__` is
//! defined; `0` if not. May be overridden by defining a different value before including this file or by specifying a
//! different value on the compiler command line.
//! @note It is legal to have \ref CARB_COMPILER_MSC and \ref CARB_TOOLCHAIN_CLANG both as `1` simultaneously, which
//! represents a Clang-infrastructure toolchain running in Microsoft compatibility mode.
# define CARB_TOOLCHAIN_CLANG 0
#elif !defined(CARB_TOOLCHAIN_CLANG)
# if defined(__clang__)
# define CARB_TOOLCHAIN_CLANG 1
# else
# define CARB_TOOLCHAIN_CLANG 0
# endif
#endif
#ifdef DOXYGEN_BUILD
//! A macro defined as `1` if the toolchain is building the current file with `-fsanitize=address`, that is, if
//! `__SANITIZE_ADDRESS__` is defined; `0` otherwise. May be overridden by defining a different value before including
//! this file or by specifying a different value on the compiler command line. Microsoft Visual Studio supports address
//! sanitizer starting with 2019 (v16.9) by specifying `/fsanitize=address` on the compiler command line.
//! See https://learn.microsoft.com/en-us/cpp/sanitizers/asan?view=msvc-160
# define CARB_ASAN_ENABLED 0
#elif !defined(CARB_ASAN_ENABLED)
# ifdef __SANITIZE_ADDRESS__
# define CARB_ASAN_ENABLED __SANITIZE_ADDRESS__
# else
# define CARB_ASAN_ENABLED 0
# endif
#endif
//! De-parenthesize the contents of \c pack_. `CARB_DEPAREN((x, y))` becomes `x, y`. An unparenthesized pack will cause
//! cause a compilation failure; e.g.: `CARB_DEPAREN(foo)` will not work, but `CARB_DEPAREN((foo))` will.
#define CARB_DEPAREN(pack_) CARB_IDENTITY pack_
//! Return exactly the arguments. This is useful for expansion of the tokens.
#define CARB_IDENTITY(...) __VA_ARGS__
// Compiler specific defines. Exist for all supported compilers but may be a no-op for certain compilers.
#ifdef DOXYGEN_BUILD
//! Acts as a `char[]` with the current full function signature.
# define CARB_PRETTY_FUNCTION "<function signature here>"
//! GCC only, defined as `__attribute__((__VA_ARGS__))`; ignored on non-GCC compilers.
# define CARB_ATTRIBUTE(...)
//! MSVC only, defined as `__declspec(__VA_ARGS__)`; ignored on non-MSVC compilers.
# define CARB_DECLSPEC(...)
//! MSVC only, defined as `__VA_ARGS__`; ignored on non-MSVC compilers.
# define CARB_MSC_ONLY(...)
//! Only non-MSVC compilers, defined as `__VA_ARGS__`; ignored on MSVC.
# define CARB_NOT_MSC(...)
//! GCC only, defined as `gnuc_only_block`; ignored on non-GCC compilers.
# define CARB_GNUC_ONLY(...)
//! Only non-GCC compilers, defined as `__VA_ARGS__`; ignored on GCC.
# define CARB_NOT_GNUC(...)
//! Generic pragma, only to be used for pragmas that are the same on all supported compilers.
//! @see CARB_PRAGMA_MSC
//! @see CARB_PRAGMA_GNUC
# define CARB_PRAGMA(...)
//! MSVC only, defined as `__pragma(__VA_ARGS__)`; ignored on non-MSVC compilers.
# define CARB_PRAGMA_MSC(...)
//! GCC only, defined as `_Pragma(__VA_ARGS__)`; ignored on non-GCC compilers.
# define CARB_PRAGMA_GNUC(...)
//! Macro to work around Exhale tripping over `constexpr` sometimes and reporting things like:
//! `Invalid C++ declaration: Expected identifier in nested name, got keyword: static`
# define CARB_DOC_CONSTEXPR const
//! Indicates whether exceptions are enabled for the current compilation unit. Value depends on parameters passed to the
//! compiler.
# define CARB_EXCEPTIONS_ENABLED 1
//! Conditionally includes text only when documenting (i.e. when `DOXYGEN_BUILD` is defined).
//! @param ... The text to include if documenting
# define CARB_DOC_ONLY(...) __VA_ARGS__
//! Declares a value or statement in a way that prevents Doxygen and Sphinx from getting confused
//! about matching symbols. There seems to be a bug in Sphinx that prevents at least templated
//! symbols from being matched to the ones generated by Doxygen when keywords such as `decltype`
//! are used. This is effectively the opposite operation as CARB_DOC_ONLY().
# define CARB_NO_DOC(...)
#else
# define CARB_DOC_CONSTEXPR constexpr
# define CARB_DOC_ONLY(...)
# define CARB_NO_DOC(...) __VA_ARGS__
# if CARB_COMPILER_MSC
# define CARB_PRETTY_FUNCTION __FUNCSIG__
# define CARB_ATTRIBUTE(...)
# define CARB_MSC_ONLY(...) __VA_ARGS__
# define CARB_NOT_MSC(...)
# define CARB_GNUC_ONLY(...)
# define CARB_NOT_GNUC(...) __VA_ARGS__
# define CARB_PRAGMA(...) __pragma(__VA_ARGS__)
# define CARB_DECLSPEC(...) __declspec(__VA_ARGS__)
# define CARB_PRAGMA_MSC(...) CARB_PRAGMA(__VA_ARGS__)
# define CARB_PRAGMA_GNUC(...)
# ifdef __cpp_exceptions
# define CARB_EXCEPTIONS_ENABLED 1
# else
# define CARB_EXCEPTIONS_ENABLED 0
# endif
// Other MSC-specific definitions that must exist outside of the carb namespace
extern "C" void _mm_prefetch(char const* _A, int _Sel); // From winnt.h/intrin.h
# if defined(__INTELLISENSE__) && _MSC_VER < 1920
// See: https://stackoverflow.com/questions/61485127/including-windows-h-causes-unknown-attributeno-init-all-error
# define no_init_all deprecated
# endif
# elif CARB_COMPILER_GNUC
# define CARB_PRETTY_FUNCTION __PRETTY_FUNCTION__
# define CARB_ATTRIBUTE(...) __attribute__((__VA_ARGS__))
# define CARB_DECLSPEC(...)
# define CARB_MSC_ONLY(...)
# define CARB_NOT_MSC(...) __VA_ARGS__
# define CARB_GNUC_ONLY(...) __VA_ARGS__
# define CARB_NOT_GNUC(...)
# define CARB_PRAGMA(...) _Pragma(__VA_ARGS__)
# define CARB_PRAGMA_MSC(...)
# define CARB_PRAGMA_GNUC(...) CARB_PRAGMA(__VA_ARGS__)
# ifdef __EXCEPTIONS
# define CARB_EXCEPTIONS_ENABLED 1
# else
# define CARB_EXCEPTIONS_ENABLED 0
# endif
# else
# error Unsupported compiler
# endif
#endif
#if defined(DOXYGEN_BUILD) || defined(OMNI_BIND)
//! Turns optimizations off at the function level until a CARB_OPTIMIZE_ON_MSC() call is seen.
//! This must be called outside of the body of any function and will remain in effect until
//! either a CARB_OPTIMIZE_ON_MSC() call is seen or the end of the translation unit. This
//! unfortunately needs to be a separate set of macros versus the one for GCC and Clang due
//! to the different style of disabling and enabling optimizations under the MSC compiler.
# define CARB_OPTIMIZE_OFF_MSC()
//! Restores previous optimizations that were temporarily disable due to an earlier call to
//! CARB_OPTIMIZE_OFF_MSC(). This must be called outside the body of any function. If this
//! call is not made, the previous optimization state will remain until the end of the current
//! translation unit.
# define CARB_OPTIMIZE_ON_MSC()
//! Disables optimizations for the function that is tagged with this attribute. This only
//! affects the single function that it tags. Optimizations will be restored to the previous
//! settings for the translation unit outside of the tagged function.
# define CARB_NO_OPTIMIZE_GNUC_CLANG()
#else
# if CARB_COMPILER_MSC
# define CARB_OPTIMIZE_OFF_MSC() CARB_PRAGMA_MSC(optimize("", off))
# define CARB_OPTIMIZE_ON_MSC() CARB_PRAGMA_MSC(optimize("", on))
# define CARB_NO_OPTIMIZE_GNUC_CLANG()
# elif CARB_TOOLCHAIN_CLANG
# define CARB_NO_OPTIMIZE_GNUC_CLANG() CARB_ATTRIBUTE(optnone)
# define CARB_OPTIMIZE_OFF_MSC()
# define CARB_OPTIMIZE_ON_MSC()
# elif CARB_COMPILER_GNUC
# define CARB_NO_OPTIMIZE_GNUC_CLANG() CARB_ATTRIBUTE(optimize("-O0"))
# define CARB_OPTIMIZE_OFF_MSC()
# define CARB_OPTIMIZE_ON_MSC()
# else
# error Unsupported compiler
# endif
#endif
// MSC-specific warning macros are defined only for MSC
// CARB_IGNOREWARNING_MSC_PUSH: MSVC only; pushes the warning state
// CARB_IGNOREWARNING_MSC_POP: MSVC only; pops the warning state
// CARB_IGNOREWARNING_MSC(w): MSVC only; disables the given warning number (ex: CARB_IGNOREWARNING_MSC(4505))
// CARB_IGNOREWARNING_MSC_WITH_PUSH(w): MSVC only; combines CARB_IGNOREWARNING_MSC_PUSH and CARB_IGNOREWARNING_MSC()
#if !defined(DOXYGEN_BUILD) && CARB_COMPILER_MSC
# define CARB_IGNOREWARNING_MSC_PUSH __pragma(warning(push))
# define CARB_IGNOREWARNING_MSC_POP __pragma(warning(pop))
# define CARB_IGNOREWARNING_MSC(w) __pragma(warning(disable : w))
# define CARB_IGNOREWARNING_MSC_WITH_PUSH(w) \
CARB_IGNOREWARNING_MSC_PUSH \
CARB_IGNOREWARNING_MSC(w)
#else
//! For MSVC only, pushes the current compilation warning configuration. Defined as `__pragma(warning(push))` for MSVC
//! only; ignored by other compilers.
# define CARB_IGNOREWARNING_MSC_PUSH
//! For MSVC only, pops the compilation warning configuration previously pushed with \ref CARB_IGNOREWARNING_MSC_PUSH,
//! overwriting the current state. Defined as `__pragma(warning(pop))` for MSVC only; ignored by other compilers.
# define CARB_IGNOREWARNING_MSC_POP
//! For MSVC only, disables a specific compiler warning for the current compilation warning configuration. Defined as
//! `__pragma(warning(disable : <w>))` for MSVC only; ignored by other compilers.
//! @param w The warning number to disable.
# define CARB_IGNOREWARNING_MSC(w)
//! Syntactic sugar for \ref CARB_IGNOREWARNING_MSC_PUSH followed by \ref CARB_IGNOREWARNING_MSC.
//! @param w The warning number to disable.
# define CARB_IGNOREWARNING_MSC_WITH_PUSH(w)
#endif
// GNUC-specific helper macros are defined for GCC and Clang-infrastructure
// CARB_IGNOREWARNING_GNUC_PUSH: GCC only; pushes the warning state
// CARB_IGNOREWARNING_GNUC_POP: GCC only; pops the warning state
// CARB_IGNOREWARNING_CLANG_PUSH: Clang only; pushes the warning state
// CARB_IGNOREWARNING_CLANG_POP: Clang only; pops the warning state
// CARB_IGNOREWARNING_GNUC(w): GCC only; disables the given warning (ex: CARB_IGNOREWARNING_GNUC("-Wattributes"))
// CARB_IGNOREWARNING_GNUC_WITH_PUSH(w): GCC only; combines CARB_IGNOREWARNING_GNUC_PUSH and CARB_IGNOREWARNING_GNUC()
// CARB_IGNOREWARNING_CLANG(w): Clang only; disables the given warning (ex: CARB_IGNOREWARNING_CLANG("-Wattributes"))
// CARB_IGNOREWARNING_CLANG_WITH_PUSH(w): Clang only; combines CARB_IGNOREWARNING_CLANG_PUSH and
// CARB_IGNOREWARNING_CLANG()
#if !defined(DOXYGEN_BUILD) && (CARB_COMPILER_GNUC || CARB_TOOLCHAIN_CLANG)
# define CARB_IGNOREWARNING_GNUC_PUSH _Pragma("GCC diagnostic push")
# define CARB_IGNOREWARNING_GNUC_POP _Pragma("GCC diagnostic pop")
# define INTERNAL_CARB_IGNOREWARNING_GNUC(str) _Pragma(# str)
# define CARB_IGNOREWARNING_GNUC(w) INTERNAL_CARB_IGNOREWARNING_GNUC(GCC diagnostic ignored w)
# define CARB_IGNOREWARNING_GNUC_WITH_PUSH(w) CARB_IGNOREWARNING_GNUC_PUSH CARB_IGNOREWARNING_GNUC(w)
# if CARB_TOOLCHAIN_CLANG
# define CARB_IGNOREWARNING_CLANG_PUSH _Pragma("GCC diagnostic push")
# define CARB_IGNOREWARNING_CLANG_POP _Pragma("GCC diagnostic pop")
# define INTERNAL_CARB_IGNOREWARNING_CLANG(str) _Pragma(# str)
# define CARB_IGNOREWARNING_CLANG(w) INTERNAL_CARB_IGNOREWARNING_CLANG(GCC diagnostic ignored w)
# define CARB_IGNOREWARNING_CLANG_WITH_PUSH(w) CARB_IGNOREWARNING_CLANG_PUSH CARB_IGNOREWARNING_CLANG(w)
# else
# define CARB_IGNOREWARNING_CLANG_PUSH
# define CARB_IGNOREWARNING_CLANG_POP
# define CARB_IGNOREWARNING_CLANG(w)
# define CARB_IGNOREWARNING_CLANG_WITH_PUSH(w)
# endif
#else
//! For GCC only, pushes the current compilation warning configuration. Defined as `_Pragma("GCC diagnostic push")` for
//! GCC only; ignored by other compilers.
# define CARB_IGNOREWARNING_GNUC_PUSH
//! For GCC only, pops the compilation warning configuration previously pushed with \ref CARB_IGNOREWARNING_GNUC_PUSH,
//! overwriting the current state. Defined as `_Pragma("GCC diagnostic pop")` for GCC only; ignored by other compilers.
# define CARB_IGNOREWARNING_GNUC_POP
//! For Clang only, pushes the current compilation warning configuration. Defined as `_Pragma("GCC diagnostic push")`
//! for Clang only; ignored by other compilers.
# define CARB_IGNOREWARNING_CLANG_PUSH
//! For Clang only, pops the compilation warning configuration previously pushed with \ref
//! CARB_IGNOREWARNING_CLANG_PUSH, overwriting the current state. Defined as `_Pragma("GCC diagnostic pop")` for Clang
//! only; ignored by other compilers.
# define CARB_IGNOREWARNING_CLANG_POP
//! For GCC only, disables a specific compiler warning for the current compilation warning configuration. Defined as
//! `_Pragma("GCC diagnostic ignored <warning>")` for GCC only; ignored by other compilers.
//! @param w The warning to disable, example: `"-Wattributes"` (note that quotes must be specified)
# define CARB_IGNOREWARNING_GNUC(w)
//! Syntactic sugar for \ref CARB_IGNOREWARNING_GNUC_PUSH followed by \ref CARB_IGNOREWARNING_GNUC.
//! @param w The warning to disable, example: `"-Wattributes"` (note that quotes must be specified)
# define CARB_IGNOREWARNING_GNUC_WITH_PUSH(w)
//! For Clang only, disables a specific compiler warning for the current compilation warning configuration. Defined as
//! `_Pragma("GCC diagnostic ignored <warning>")` for Clang only; ignored by other compilers.
//! @param w The warning to disable, example: `"-Wattributes"` (note that quotes must be specified)
# define CARB_IGNOREWARNING_CLANG(w)
//! Syntactic sugar for \ref CARB_IGNOREWARNING_CLANG_PUSH followed by \ref CARB_IGNOREWARNING_CLANG.
//! @param w The warning to disable, example: `"-Wattributes"` (note that quotes must be specified)
# define CARB_IGNOREWARNING_CLANG_WITH_PUSH(w)
#endif
#if defined(__cplusplus) || defined(DOXYGEN_BUILD)
//! Defined as `extern "C"` for C++ compilation, that is, when `__cplusplus` is defined; empty define otherwise.
# define CARB_EXTERN_C extern "C"
#else
# define CARB_EXTERN_C
#endif
//! Grants a function external linkage in a dynamic library or executable.
//!
//! On MSVC, `extern "C" __declspec(dllexport)`. On GCC/Clang: `extern "C" __attribute__((visibility("default")))`.
//!
//! This macro is always defined as such. If conditional import/export is desired, use \ref CARB_DYNAMICLINK.
#define CARB_EXPORT CARB_EXTERN_C CARB_DECLSPEC(dllexport) CARB_ATTRIBUTE(visibility("default"))
//! Imports a function with external linkage from a shared object or DLL.
//!
//! On all compilers: `extern "C"`
//!
//! \note on Windows platforms we do not use `__declspec(dllimport)` as it is <a
//! href="https://learn.microsoft.com/en-us/cpp/build/importing-into-an-application-using-declspec-dllimport?view=msvc-160">optional</a>
//! and can lead to linker warning <a
//! href="https://learn.microsoft.com/en-us/cpp/error-messages/tool-errors/linker-tools-warning-lnk4217?view=msvc-160">LNK4217</a>.
#define CARB_IMPORT CARB_EXTERN_C
// For documentation only
#ifdef DOXYGEN_BUILD
//! Instructs CARB_DYNAMICLINK to export instead of import
//!
//! \warning This symbol is not defined anywhere; it is up to the user of \ref CARB_DYNAMICLINK to define this in the
//! compilation unit that exports the symbols. **This must be defined before carb/Defines.h is included.**
//!
//! \see CARB_DYNAMICLINK
# define CARB_EXPORTS
#endif
#if defined(CARB_EXPORTS) || defined(DOXYGEN_BUILD)
//! Conditional (import/export) dynamic linking.
//!
//! If and only if \ref CARB_EXPORTS is defined before including this file, this will match \ref CARB_EXPORT and
//! function as granting a function external linkage. If `CARB_EXPORTS` is not defined, this functions as merely
//! declaring the function as `extern "C"` so that it can be imported.
# define CARB_DYNAMICLINK CARB_EXPORT
#else
# define CARB_DYNAMICLINK CARB_IMPORT
#endif
#if CARB_PLATFORM_WINDOWS || defined(DOXYGEN_BUILD)
//! Defined as `__cdecl` on Windows and an empty define on Linux. Used to explicitly state ABI calling convention for
//! API functions.
# define CARB_ABI __cdecl
#else
# define CARB_ABI
#endif
#if (defined(__cplusplus) && __cplusplus >= 201400L) || defined(DOXYGEN_BUILD)
//! Defined as `1` if the current compiler supports C++14; `0` otherwise. C++14 is the minimum required for using
//! Carbonite (though building Carbonite requires C++17).
# define CARB_HAS_CPP14 1
#else
# define CARB_HAS_CPP14 0
#endif
#if (defined(__cplusplus) && __cplusplus >= 201700L) || defined(DOXYGEN_BUILD)
//! Defined as `1` if the current compiler supports C++17; `0` otherwise.
# define CARB_HAS_CPP17 1
#else
# define CARB_HAS_CPP17 0
#endif
#if (defined(__cplusplus) && __cplusplus >= 202000L) || defined(DOXYGEN_BUILD)
//! Defined as `1` if the current compiler supports C++20; `0` otherwise.
# define CARB_HAS_CPP20 1
#else
# define CARB_HAS_CPP20 0
#endif
// [[nodiscard]]
#if CARB_HAS_CPP17 || defined(DOXYGEN_BUILD)
//! Defined as `[[nodiscard]]` if the current compiler supports C++17. This reverts to \c warn_unused_result attribute
//! where it is available and will be empty if it is not.
# define CARB_NODISCARD [[nodiscard]]
//! Defined as `[[nodiscard]]` if the current compiler supports C++17 and is empty otherwise. This operates similar to
//! \c CARB_NODISCARD but is meant to be used on type definitions, as the \c warn_unused_result fallback is not
//! supported for types.
# define CARB_NODISCARD_TYPE [[nodiscard]]
#elif CARB_COMPILER_GNUC
# define CARB_NODISCARD __attribute__((warn_unused_result))
# define CARB_NODISCARD_TYPE
#else // not supported
# define CARB_NODISCARD
# define CARB_NODISCARD_TYPE
#endif
// [[nodiscard(msg)]]
#if CARB_HAS_CPP20 || defined(DOXYGEN_BUILD)
//! Defined as `[[nodiscard(msg)]]` if the current compiler supports C++20; falls back to \c CARB_NODISCARD without the
//! message pre-C++20.
# define CARB_NODISCARD_MSG(msg) [[nodiscard(msg)]]
//! Defined as `[[nodiscard(msg)]]` if the current compiler supports C++20; falls back to \c CARB_NODISCARD_TYPE without
//! the message pre-C++20.
# define CARB_NODISCARD_TYPE_MSG(msg) [[nodiscard(msg)]]
#else
# define CARB_NODISCARD_MSG(msg) CARB_NODISCARD
# define CARB_NODISCARD_TYPE_MSG(msg) CARB_NODISCARD_TYPE
#endif
// [[fallthrough]]
#if CARB_HAS_CPP17 || defined(DOXYGEN_BUILD)
//! Defined as `[[fallthrough]]` if the current compiler supports C++17; empty otherwise.
# define CARB_FALLTHROUGH [[fallthrough]]
#elif CARB_COMPILER_GNUC
# if __GNUC__ >= 7
# define CARB_FALLTHROUGH __attribute__((fallthrough))
# else
// Marker comment
# define CARB_FALLTHROUGH /* fall through */
# endif
#else // not supported
# define CARB_FALLTHROUGH
#endif
// [[maybe_unused]]
#if CARB_HAS_CPP17 && !defined(DOXYGEN_BUILD)
# define CARB_MAYBE_UNUSED [[maybe_unused]]
# define CARB_CPP17_CONSTEXPR constexpr
#elif CARB_COMPILER_GNUC && !defined(DOXYGEN_BUILD)
# define CARB_MAYBE_UNUSED __attribute__((unused))
# define CARB_CPP17_CONSTEXPR
#else // not supported
//! Defined as `[[maybe_unused]]` if the current compiler supports C++17; empty otherwise.
# define CARB_MAYBE_UNUSED
//! Defined as `constexpr` if the current compiler supports C++17; empty otherwise.
# define CARB_CPP17_CONSTEXPR
#endif
// [[likely]] / [[unlikely]]
#if CARB_HAS_CPP20 || defined(DOXYGEN_BUILD)
//! Defined as `([[likely]] !!(<expr>))` if the current compiler supports C++20. If the current compiler is GCC, as a
//! fallback, `__builtin_expect(!!(<expr>), 1)` will be used. Otherwise, defined as `(!!(<expr>))`
//! @param expr The expression to evaluate, optimized with a `true` outcome likely and expected.
//! @returns The boolean result of \p expr.
# define CARB_LIKELY(expr) ([[likely]] !!(expr))
//! Defined as `([[unlikely]] !!(<expr>))` if the current compiler supports C++20. If the current compiler is GCC, as a
//! fallback, `__builtin_expect(!!(<expr>), 0)` will be used. Otherwise, defined as `(!!(<expr>))`
//! @param expr The expression to evaluate, optimized with a `false` outcome likely and expected.
//! @returns The boolean result of \p expr.
# define CARB_UNLIKELY(expr) ([[unlikely]] !!(expr))
#elif CARB_COMPILER_GNUC
# define CARB_LIKELY(expr) __builtin_expect(!!(expr), 1)
# define CARB_UNLIKELY(expr) __builtin_expect(!!(expr), 0)
#else // not supported
# define CARB_LIKELY(expr) (!!(expr))
# define CARB_UNLIKELY(expr) (!!(expr))
#endif
// [[no_unique_address]]
#if CARB_HAS_CPP20 || defined(DOXYGEN_BUILD)
//! Defined as `[[no_unique_address]]` if the current compiler supports C++20; empty otherwise.
# define CARB_NO_UNIQUE_ADDRESS [[no_unique_address]]
#else // not supported
# define CARB_NO_UNIQUE_ADDRESS
#endif
//! Syntactic sugar for `CARB_ATTRIBUTE(visibility("hidden"))`; ignored on compilers other than GCC.
#define CARB_HIDDEN CARB_ATTRIBUTE(visibility("hidden"))
//! Syntactic sugar for `CARB_DECLSPEC(selectany) CARB_ATTRIBUTE(weak)`, used to enable weak linking.
#define CARB_WEAKLINK CARB_DECLSPEC(selectany) CARB_ATTRIBUTE(weak)
// constexpr in CPP20, but not before
#if CARB_HAS_CPP20 || defined(DOXYGEN_BUILD)
//! Defined as `constexpr` if the current compiler supports C++20; empty otherwise.
# define CARB_CPP20_CONSTEXPR constexpr
#else
# define CARB_CPP20_CONSTEXPR
#endif
// include the IAssert interface here. Note that this cannot be included any earlier because
// it requires symbols such as "CARB_ABI". Also note that it cannot be put into the CARB_DEBUG
// section below because the mirroring tool picks it up and generates type information for it.
// If it is not unconditionally included here, that leads to build errors in release builds.
#include "assert/IAssert.h"
#ifdef DOXYGEN_BUILD
//! On Windows platforms, defined as `__debugbreak()`; on Linux, `raise(SIGTRAP)`. Used to break into the debugger.
# define CARB_BREAK_POINT()
#elif CARB_POSIX
# define CARB_BREAK_POINT() ::raise(SIGTRAP)
#elif CARB_PLATFORM_WINDOWS
# define CARB_BREAK_POINT() ::__debugbreak()
#else
CARB_UNSUPPORTED_PLATFORM();
#endif
namespace carb
{
#ifndef DOXYGEN_SHOULD_SKIP_THIS
namespace detail
{
// clang-format off
#define C(a) (unsigned char)(0x##a)
constexpr unsigned char lowerTable[256] = {
C(00), C(01), C(02), C(03), C(04), C(05), C(06), C(07), C(08), C(09), C(0A), C(0B), C(0C), C(0D), C(0E), C(0F),
C(10), C(11), C(12), C(13), C(14), C(15), C(16), C(17), C(18), C(19), C(1A), C(1B), C(1C), C(1D), C(1E), C(1F),
C(20), C(21), C(22), C(23), C(24), C(25), C(26), C(27), C(28), C(29), C(2A), C(2B), C(2C), C(2D), C(2E), C(2F),
C(30), C(31), C(32), C(33), C(34), C(35), C(36), C(37), C(38), C(39), C(3A), C(3B), C(3C), C(3D), C(3E), C(3F),
C(40),
// [0x41, 0x5A] -> [0x61, 0x7A]
C(61), C(62), C(63), C(64), C(65), C(66), C(67), C(68), C(69), C(6A), C(6B), C(6C), C(6D), C(6E), C(6F),
C(70), C(71), C(72), C(73), C(74), C(75), C(76), C(77), C(78), C(79), C(7A),
C(5B), C(5C), C(5D), C(5E), C(5F),
C(60), C(61), C(62), C(63), C(64), C(65), C(66), C(67), C(68), C(69), C(6A), C(6B), C(6C), C(6D), C(6E), C(6F),
C(70), C(71), C(72), C(73), C(74), C(75), C(76), C(77), C(78), C(79), C(7A), C(7B), C(7C), C(7D), C(7E), C(7F),
C(80), C(81), C(82), C(83), C(84), C(85), C(86), C(87), C(88), C(89), C(8A), C(8B), C(8C), C(8D), C(8E), C(8F),
C(90), C(91), C(92), C(93), C(94), C(95), C(96), C(97), C(98), C(99), C(9A), C(9B), C(9C), C(9D), C(9E), C(9F),
C(A0), C(A1), C(A2), C(A3), C(A4), C(A5), C(A6), C(A7), C(A8), C(A9), C(AA), C(AB), C(AC), C(AD), C(AE), C(AF),
C(B0), C(B1), C(B2), C(B3), C(B4), C(B5), C(B6), C(B7), C(B8), C(B9), C(BA), C(BB), C(BC), C(BD), C(BE), C(BF),
C(C0), C(C1), C(C2), C(C3), C(C4), C(C5), C(C6), C(C7), C(C8), C(C9), C(CA), C(CB), C(CC), C(CD), C(CE), C(CF),
C(D0), C(D1), C(D2), C(D3), C(D4), C(D5), C(D6), C(D7), C(D8), C(D9), C(DA), C(DB), C(DC), C(DD), C(DE), C(DF),
C(E0), C(E1), C(E2), C(E3), C(E4), C(E5), C(E6), C(E7), C(E8), C(E9), C(EA), C(EB), C(EC), C(ED), C(EE), C(EF),
C(F0), C(F1), C(F2), C(F3), C(F4), C(F5), C(F6), C(F7), C(F8), C(F9), C(FA), C(FB), C(FC), C(FD), C(FE), C(FF),
};
constexpr unsigned char upperTable[256] = {
C(00), C(01), C(02), C(03), C(04), C(05), C(06), C(07), C(08), C(09), C(0A), C(0B), C(0C), C(0D), C(0E), C(0F),
C(10), C(11), C(12), C(13), C(14), C(15), C(16), C(17), C(18), C(19), C(1A), C(1B), C(1C), C(1D), C(1E), C(1F),
C(20), C(21), C(22), C(23), C(24), C(25), C(26), C(27), C(28), C(29), C(2A), C(2B), C(2C), C(2D), C(2E), C(2F),
C(30), C(31), C(32), C(33), C(34), C(35), C(36), C(37), C(38), C(39), C(3A), C(3B), C(3C), C(3D), C(3E), C(3F),
C(40), C(41), C(42), C(43), C(44), C(45), C(46), C(47), C(48), C(49), C(4A), C(4B), C(4C), C(4D), C(4E), C(4F),
C(50), C(51), C(52), C(53), C(54), C(55), C(56), C(57), C(58), C(59), C(5A), C(5B), C(5C), C(5D), C(5E), C(5F),
C(60),
// [0x61, 0x7A] -> [0x41, 0x5A]
C(41), C(42), C(43), C(44), C(45), C(46), C(47), C(48), C(49), C(4A), C(4B), C(4C), C(4D), C(4E), C(4F),
C(50), C(51), C(52), C(53), C(54), C(55), C(56), C(57), C(58), C(59), C(5A),
C(7B), C(7C), C(7D), C(7E), C(7F),
C(80), C(81), C(82), C(83), C(84), C(85), C(86), C(87), C(88), C(89), C(8A), C(8B), C(8C), C(8D), C(8E), C(8F),
C(90), C(91), C(92), C(93), C(94), C(95), C(96), C(97), C(98), C(99), C(9A), C(9B), C(9C), C(9D), C(9E), C(9F),
C(A0), C(A1), C(A2), C(A3), C(A4), C(A5), C(A6), C(A7), C(A8), C(A9), C(AA), C(AB), C(AC), C(AD), C(AE), C(AF),
C(B0), C(B1), C(B2), C(B3), C(B4), C(B5), C(B6), C(B7), C(B8), C(B9), C(BA), C(BB), C(BC), C(BD), C(BE), C(BF),
C(C0), C(C1), C(C2), C(C3), C(C4), C(C5), C(C6), C(C7), C(C8), C(C9), C(CA), C(CB), C(CC), C(CD), C(CE), C(CF),
C(D0), C(D1), C(D2), C(D3), C(D4), C(D5), C(D6), C(D7), C(D8), C(D9), C(DA), C(DB), C(DC), C(DD), C(DE), C(DF),
C(E0), C(E1), C(E2), C(E3), C(E4), C(E5), C(E6), C(E7), C(E8), C(E9), C(EA), C(EB), C(EC), C(ED), C(EE), C(EF),
C(F0), C(F1), C(F2), C(F3), C(F4), C(F5), C(F6), C(F7), C(F8), C(F9), C(FA), C(FB), C(FC), C(FD), C(FE), C(FF),
};
#undef C
// clang-format on
} // namespace detail
#endif
/**
* Assertion handler helper function. Do not call directly. Used by CARB_CHECK and CARB_ASSERT if the
* `IAssert` interface is not available (i.e. the Framework is not instantiated). This function prints an "Assertion
* failed" message to `stderr` by default.
*
* @param condition The condition from an assert in progress.
* @param file The source file location from an assert in progress.
* @param func The source file function name from an assert in progress.
* @param line The source file line from an assert in progress.
* @param fmt A `printf`-style format specifier string for the assert in progress.
* @param ... Arguments corresponding to format specifiers in \p fmt.
* @returns \c true if the software breakpoint should be triggered; \c false if a software breakpoint should be skipped.
*/
inline bool assertHandlerFallback(
const char* condition, const char* file, const char* func, int32_t line, const char* fmt = nullptr, ...)
{
static std::mutex m;
std::lock_guard<std::mutex> g(m);
if (fmt != nullptr)
{
fprintf(stderr, "%s:%s():%" PRId32 ": Assertion (%s) failed: ", file, func, line, condition);
va_list args;
va_start(args, fmt);
vfprintf(stderr, fmt, args);
va_end(args);
fputc('\n', stderr);
}
else
fprintf(stderr, "%s:%" PRId32 ":%s(): Assertion (%s) failed.\n", file, line, func, condition);
return true;
}
} // namespace carb
#ifdef DOXYGEN_BUILD
//! Indicates whether asserts are enabled. May be overridden by defining this before including this file. By default, is
//! set to `1` if `CARB_DEBUG` is non-zero. If this is overridden to a non-zero value and `CARB_ASSERT` is not defined,
//! `CARB_ASSERT` will receive the default implementation.
# define CARB_ASSERT_ENABLED 0
//! Indicates whether runtime checking is enabled. May be overridden by defining this before including this file. By
//! default, is set to `1` always. If this is overridden to a non-zero value and `CARB_CHECK` is not defined,
//! `CARB_CHECK` will receive the default implementation.
# define CARB_CHECK_ENABLED 0
//! Optionally performs an assertion, by default for debug builds only.
//! @warning The \p cond should have no side effects! Asserts can be disabled which will cause \p cond to not be
//! evaluated.
//! @note The \ref CARB_ASSERT_ENABLED define can be used to determine if asserts are enabled, or to cause them to be
//! enabled or disabled by defining it before including this file.
//!
//! The implementation can be overridden on the command line, or by defining to a different implementation before
//! including this file.
//!
//! When \p cond produces a `false` result, the failure is reported to the `g_carbAssert` assertion handler, or if that
//! global variable is `nullptr`, calls \ref carb::assertHandlerFallback(). Depending on the result from that function
//! call, execution is allowed to continue, or `CARB_BREAK_POINT()` is invoked to notify the debugger.
//! @param cond A condition that is evaluated for a boolean result. If the condition produces \c false, the assert
//! handler is notified.
//! @param ... An optional printf-style format string and variadic parameters.
# define CARB_ASSERT(cond, ...) ((void)0)
//! Optionally performs a runtime check assertion, by default for both debug and release builds.
//! @warning The \p cond should have no side effects! Asserts can be disabled which will cause \p cond to not be
//! evaluated.
//! @note The \ref CARB_CHECK_ENABLED define can be used to determine if runtime check asserts are enabled, or to cause
//! them to be enabled or disabled by defining it before including this file.
//!
//! The implementation can be overridden on the command line, or by defining to a different implementation before
//! including this file.
//!
//! When \p cond produces a `false` result, the failure is reported to the `g_carbAssert` assertion handler, or if that
//! global variable is `nullptr`, calls \ref carb::assertHandlerFallback(). Depending on the result from that function
//! call, execution is allowed to continue, or `CARB_BREAK_POINT()` is invoked to notify the debugger.
//! @param cond A condition that is evaluated for a boolean result. If the condition produces \c false, the assert
//! handler is notified.
//! @param ... An optional printf-style format string and variadic parameters.
# define CARB_CHECK(cond, ...) ((void)0)
//! Terminates the application if a check fails.
//!
//! The implementation can be overridden on the command line, or by defining to a different implementation before
//! including this file.
//!
//! @warning The application is malformed and undefined behavior occurs if an overriding implementation of
//! `CARB_FATAL_UNLESS` allows continuing when \p cond returns false.
//! @param cond A condition that is evaluated for a boolean result. If the condition produces \c false, the assert
//! handler is notified. If the assert handler returns, `std::terminate()` is called.
//! @param fmt An explanation of the failure is required. This is a printf-style format string.
//! @param ... printf-style variadic parameters
# define CARB_FATAL_UNLESS(cond, fmt, ...) (!(cond) ? (std::terminate(), false) : true)
#else
/* main assertion test entry point. This is implemented as a single conditional statement to
* ensure that the assertion failure breakpoint occurs on the same line of code as the assertion
* test itself. CARB_CHECK() exists in release and debug, and CARB_ASSERT() is debug-only.
*/
// example-begin CARB_IMPL_ASSERT
# define CARB_IMPL_ASSERT(cond, ...) \
(CARB_LIKELY(cond) || \
 CARB_NOINLINE { \
return g_carbAssert ? \
g_carbAssert->reportFailedAssertion(#cond, __FILE__, funcname__, __LINE__, ##__VA_ARGS__) : \
::carb::assertHandlerFallback(#cond, __FILE__, funcname__, __LINE__, ##__VA_ARGS__); \
}(CARB_PRETTY_FUNCTION) || \
(CARB_BREAK_POINT(), false))
// example-end CARB_IMPL_ASSERT
# ifndef CARB_CHECK
# ifndef CARB_CHECK_ENABLED
# define CARB_CHECK_ENABLED 1
# endif
# if CARB_CHECK_ENABLED
# define CARB_CHECK(cond, ...) CARB_IMPL_ASSERT(cond, ##__VA_ARGS__)
# else
# define CARB_CHECK(cond, ...) ((void)0)
# endif
# else
// CARB_CHECK was already defined
# ifndef CARB_CHECK_ENABLED
# define CARB_CHECK /* cause an error showing where it was already defined */
# error CARB_CHECK_ENABLED must also be defined if CARB_CHECK is pre-defined!
# endif
# endif
# ifndef CARB_FATAL_UNLESS
// example-begin CARB_FATAL_UNLESS
# define CARB_FATAL_UNLESS(cond, fmt, ...) \
(CARB_LIKELY(cond) || \
([&](const char* funcname__, ...) CARB_NOINLINE { \
if (false) \
::printf(fmt, ##__VA_ARGS__); \
g_carbAssert ? g_carbAssert->reportFailedAssertion(#cond, __FILE__, funcname__, __LINE__, fmt, ##__VA_ARGS__) : \
::carb::assertHandlerFallback(#cond, __FILE__, funcname__, __LINE__, fmt, ##__VA_ARGS__); \
}(CARB_PRETTY_FUNCTION), std::terminate(), false))
// example-end CARB_FATAL_UNLESS
# endif
# ifndef CARB_ASSERT
# ifndef CARB_ASSERT_ENABLED
# if CARB_DEBUG
# define CARB_ASSERT_ENABLED 1
# else
# define CARB_ASSERT_ENABLED 0
# endif
# endif
# if CARB_ASSERT_ENABLED
# define CARB_ASSERT(cond, ...) CARB_IMPL_ASSERT(cond, ##__VA_ARGS__)
# else
# define CARB_ASSERT(cond, ...) ((void)0)
# endif
# else
// CARB_ASSERT was already defined
# ifndef CARB_ASSERT_ENABLED
# define CARB_ASSERT /* cause an error showing where it was already defined */
# error CARB_ASSERT_ENABLED must also be defined if CARB_ASSERT is pre-defined!
# endif
# endif
#endif
//! A helper to determine if the size and alignment of two given structures match, causing a static assert if unmatched.
//! @param A One type to compare.
//! @param B Another type to compare.
#define CARB_ASSERT_STRUCTS_MATCH(A, B) \
static_assert( \
sizeof(A) == sizeof(B) && alignof(A) == alignof(B), "Size or alignment mismatch between " #A " and " #B ".")
//! A helper to determine if member `A.a` matches the offset and size of `B.b`, causing a static assert if unmatched.
//! @param A The struct containing public member \p a.
//! @param a A public member of \p A.
//! @param B The struct containing public member \p b.
//! @param b A public member of \p B.
#define CARB_ASSERT_MEMBERS_MATCH(A, a, B, b) \
static_assert(offsetof(A, a) == offsetof(B, b) && sizeof(A::a) == sizeof(B::b), \
"Offset or size mismatch between members " #a " of " #A " and " #b " of " #B ".")
//! The maximum value that can be represented by `uint16_t`.
#define CARB_UINT16_MAX UINT16_MAX
//! The maximum value that can be represented by `uint32_t`.
#define CARB_UINT32_MAX UINT32_MAX
//! The maximum value that can be represented by `uint64_t`.
#define CARB_UINT64_MAX UINT64_MAX
//! The maximum value that can be represented by `unsigned long long`.
#define CARB_ULLONG_MAX ULLONG_MAX
//! The maximum value that can be represented by `unsigned short`.
#define CARB_USHRT_MAX USHRT_MAX
//! The maximum value that can be represented by `float`.
#define CARB_FLOAT_MAX 3.402823466e+38F
//! A macro that returns the least of two values.
//! @warning This macro will evaluate parameters more than once! Consider using carb_min() or `std::min`.
//! @param a The first value.
//! @param b The second value.
//! @returns The least of \p a or \p b. If the values are equal \p b will be returned.
#define CARB_MIN(a, b) (((a) < (b)) ? (a) : (b))
//! A macro the returns the largest of two values.
//! @warning This macro will evaluate parameters more than once! Consider using carb_max() or `std::max`.
//! @param a The first value.
//! @param b The second value.
//! @returns The largest of \p a or \p b. If the values are equal \p b will be returned.
#define CARB_MAX(a, b) (((a) > (b)) ? (a) : (b))
//! A macro the returns the largest of two values.
//! @warning This macro will evaluate parameters more than once! Consider using `std::clamp` or an inline function
//! instead.
//! @param x The value to clamp.
//! @param lo The lowest acceptable value. This will be returned if `x < lo`.
//! @param hi The highest acceptable value. This will be returned if `x > hi`.
//! @return \p lo if \p x is less than \p lo; \p hi if \p x is greater than \p hi; \p x otherwise.
#define CARB_CLAMP(x, lo, hi) (((x) < (lo)) ? (lo) : (((x) > (hi)) ? (hi) : (x)))
//! Rounds a given value to the next highest multiple of another given value.
//! @warning This macro will evaluate the \p to parameter more than once! Consider using an inline function instead.
//! @param value The value to round.
//! @param to The multiple to round to.
//! @returns \p value rounded up to the next multiple of \p to.
#define CARB_ROUNDUP(value, to) ((((value) + (to)-1) / (to)) * (to))
#ifndef DOXYGEN_SHOULD_SKIP_THIS
// CARB_JOIN will join together `a` and `b` and also work properly if either parameter is another macro like __LINE__.
// This requires two macros since the preprocessor will only recurse macro expansion if # and ## are not present.
# define __CARB_JOIN(a, b) a##b
#endif
//! A macro that joins two parts to create one symbol allowing one or more parameters to be a macro, as if by the `##`
//! preprocessor operator.
//! Example: `CARB_JOIN(test, __LINE__)` on line 579 produces `test579`.
//! @param a The first name to join.
//! @param b The second name to join.
#define CARB_JOIN(a, b) __CARB_JOIN(a, b)
//! A macro that deletes the copy-construct and copy-assign functions for the given classname.
//! @param classname The class to delete copy functions for.
#define CARB_PREVENT_COPY(classname) \
classname(const classname&) = delete; /**< @private */ \
classname& operator=(const classname&) = delete /**< @private */
//! A macro that deletes the move-construct and move-assign functions for the given classname.
//! @param classname The class to delete move functions for.
#define CARB_PREVENT_MOVE(classname) \
classname(classname&&) = delete; /**< @private */ \
classname& operator=(classname&&) = delete /**< @private */
//! Syntactic sugar for both \ref CARB_PREVENT_COPY and \ref CARB_PREVENT_MOVE.
//! @param classname The class to delete copy and move functions for.
#define CARB_PREVENT_COPY_AND_MOVE(classname) \
CARB_PREVENT_COPY(classname); \
CARB_PREVENT_MOVE(classname)
#if defined(__COUNTER__) || defined(DOXYGEN_BUILD)
//! A helper macro that appends a number to the given name to create a unique name.
//! @param str The name to decorate.
# define CARB_ANONYMOUS_VAR(str) CARB_JOIN(str, __COUNTER__)
#else
# define CARB_ANONYMOUS_VAR(str) CARB_JOIN(str, __LINE__)
#endif
namespace carb
{
#ifndef DOXYGEN_SHOULD_SKIP_THIS
template <typename T, size_t N>
constexpr size_t countOf(T const (&)[N])
{
return N;
}
#endif
//! Returns the count of an array as a `size_t` at compile time.
//! @param a The array to count.
//! @returns The number of elements in \p a.
#define CARB_COUNTOF(a) carb::countOf(a)
#ifndef DOXYGEN_SHOULD_SKIP_THIS
template <typename T, uint32_t N>
constexpr uint32_t countOf32(T const (&)[N])
{
return N;
}
#endif
//! Returns the count of an array as a `uint32_t` at compile time.
//! @param a The array to count.
//! @returns The number of elements in \p a.
#define CARB_COUNTOF32(a) carb::countOf32(a)
#ifndef DOXYGEN_SHOULD_SKIP_THIS
template <typename T, typename U>
constexpr uint32_t offsetOf(U T::*member)
{
CARB_IGNOREWARNING_GNUC_PUSH
# if CARB_TOOLCHAIN_CLANG && __clang_major__ >= 13 // this error is issued on clang 13
CARB_IGNOREWARNING_GNUC_WITH_PUSH("-Wnull-pointer-subtraction")
# endif
return (uint32_t)((char*)&((T*)nullptr->*member) - (char*)nullptr);
CARB_IGNOREWARNING_GNUC_POP
}
#endif
//! Returns the offset of a member of a class at compile time.
//! @param a The member of a class. The member must have visibility to the call of `CARB_OFFSETOF`. The class is
//! inferred.
//! @returns The offset of \p a from its containing class, in bytes, as a `uint32_t`.
#define CARB_OFFSETOF(a) carb::offsetOf(&a)
#if CARB_COMPILER_MSC || defined(DOXYGEN_BUILD)
//! Returns the required alignment of a type.
//! @param T The type to determine alignment of.
//! @returns The required alignment of \p T, in bytes.
# define CARB_ALIGN_OF(T) __alignof(T)
#elif CARB_COMPILER_GNUC
# define CARB_ALIGN_OF(T) __alignof__(T)
#else
# error "Align of cannot be determined - compiler not known"
#endif
// Implement CARB_HARDWARE_PAUSE; a way of idling the pipelines and reducing the penalty
// from memory order violations. See
// https://software.intel.com/en-us/articles/long-duration-spin-wait-loops-on-hyper-threading-technology-enabled-intel-processors
#ifdef DOXYGEN_BUILD
//! Instructs the underlying hardware to idle the CPU pipelines and reduce the penalty from memory order violations.
# define CARB_HARDWARE_PAUSE()
#elif CARB_X86_64
// avoid including immintrin.h
# if CARB_COMPILER_MSC
# pragma intrinsic(_mm_pause)
# define CARB_HARDWARE_PAUSE() _mm_pause()
# else
# define CARB_HARDWARE_PAUSE() __builtin_ia32_pause()
# endif
#elif defined(__aarch64__)
# define CARB_HARDWARE_PAUSE() __asm__ __volatile__("yield" ::: "memory")
#else
CARB_UNSUPPORTED_PLATFORM();
#endif
#if CARB_COMPILER_MSC || defined(DOXYGEN_BUILD)
# pragma intrinsic(_mm_prefetch)
//! Instructs the compiler to force inline of the decorated function
# define CARB_ALWAYS_INLINE __forceinline
//! Attempts to prefetch from memory using a compiler intrinsic.
//! @param addr The address to prefetch
//! @param write Pass `true` if writing to the address is intended; `false` otherwise.
//! @param level The `carb::PrefetchLevel` hint.
# define CARB_PREFETCH(addr, write, level) _mm_prefetch(reinterpret_cast<char*>(addr), int(level))
//! A prefetch level hint to pass to \ref CARB_PREFETCH()
enum class PrefetchLevel
{
kHintNonTemporal = 0, //!< prefetch data into non-temporal cache structure and into a location close to the
//!< processor, minimizing cache pollution.
kHintL1 = 1, //!< prefetch data into all levels of the cache hierarchy.
kHintL2 = 2, //!< prefetch data into level 2 cache and higher.
kHintL3 = 3, //!< prefetch data into level 3 cache and higher, or an implementation specific choice.
};
#elif CARB_COMPILER_GNUC
# define CARB_ALWAYS_INLINE CARB_ATTRIBUTE(always_inline)
# define CARB_PREFETCH(addr, write, level) __builtin_prefetch((addr), (write), int(level))
enum class PrefetchLevel
{
kHintNonTemporal = 0,
kHintL1 = 3,
kHintL2 = 2,
kHintL3 = 1,
};
#else
CARB_UNSUPPORTED_PLATFORM();
#endif
//! A macro that declares that a function may not be inlined.
#define CARB_NOINLINE CARB_ATTRIBUTE(noinline) CARB_DECLSPEC(noinline)
#ifdef DOXYGEN_BUILD
//! Declares a function as deprecated.
# define CARB_DEPRECATED(msg)
//! Declares a file as deprecated.
# define CARB_FILE_DEPRECATED
//! Declares that a function will not throw any exceptions
# define CARB_NOEXCEPT throw()
//! Used when declaring opaque types to prevent Doxygen from getting confused about not finding any implementation.
# define DOXYGEN_EMPTY_CLASS \
{ \
}
#else
# define CARB_DEPRECATED(msg) CARB_ATTRIBUTE(deprecated(msg)) CARB_DECLSPEC(deprecated(msg))
# ifdef CARB_IGNORE_REMOVEFILE_WARNINGS
# define CARB_FILE_DEPRECATED
# define CARB_FILE_DEPRECATED_MSG(...)
# else
# define CARB_FILE_DEPRECATED_MSG(msg) \
CARB_PRAGMA(message("\x1b[33m" __FILE__ ":" CARB_STRINGIFY( \
__LINE__) ": " msg " (#define CARB_IGNORE_REMOVEFILE_WARNINGS to ignore these warnings)\x1b[0m")) \
CARB_PRAGMA(warning_see_message)
# define CARB_FILE_DEPRECATED CARB_FILE_DEPRECATED_MSG("This file is no longer needed and will be removed soon")
# endif
# define CARB_NOEXCEPT noexcept
# define DOXYGEN_EMPTY_CLASS
#endif
#ifndef DOXYGEN_SHOULD_SKIP_THIS
template <typename T>
constexpr T align(T x, size_t alignment)
{
return (T)(((size_t)x + alignment - 1) / alignment * alignment);
}
template <typename T>
T* align(T* x, size_t alignment)
{
return (T*)(((size_t)x + alignment - 1) / alignment * alignment);
}
#endif
//! Aligns a number or pointer to the next multiple of a provided alignment.
//! @note The alignment need not be a power-of-two.
//! @param x The pointer or value to align
//! @param alignment The alignment value in bytes.
//! @returns If \p x is already aligned to \p alignment, returns \p x; otherwise returns \p x rounded up to the next
//! multiple of \p alignment.
#define CARB_ALIGN(x, alignment) carb::align(x, alignment)
#ifndef DOXYGEN_SHOULD_SKIP_THIS
template <typename T>
constexpr T alignedSize(const T& size, uint32_t alignment)
{
return ((size + alignment - 1) / alignment) * alignment;
}
#endif
//! Aligns a size to the given alignment.
//! @note The alignment need not be a power-of-two.
//! @param size The size to align.
//! @param alignment The alignment value in bytes.
//! @returns If \p size is already aligned to \p alignment, returns \p size; otherwise returns \p size rounded up to the
//! next multiple of \p alignment.
#define CARB_ALIGNED_SIZE(size, alignment) carb::alignedSize(size, alignment)
//! Defined as `alignas(T)`.
#define CARB_ALIGN_AS(T) alignas(T)
#ifndef DOXYGEN_SHOULD_SKIP_THIS
template <typename T>
constexpr T divideCeil(T size, uint32_t divisor)
{
static_assert(std::is_integral<T>::value, "Integral required.");
return (size + divisor - 1) / divisor;
}
#endif
/**
* Divides size by divisor and returns the closest integer greater than or equal to the division result.
* For uses such as calculating a number of thread groups that cover all threads in a compute dispatch.
* @param size An integer value.
* @param divisor The divisor value.
* @returns `size / divisor`, rounded up to the nearest whole integer. The type is based on \p size.
*/
#define CARB_DIVIDE_CEIL(size, divisor) carb::divideCeil(size, divisor)
#if (CARB_HAS_CPP17 && defined(__cpp_lib_hardware_interference_size)) || defined(DOXYGEN_BUILD)
//! Minimum offset between two objects to avoid false sharing, i.e. cache line size. If C++17 is not supported, falls
//! back to the default value of 64 bytes.
# define CARB_CACHELINE_SIZE (std::hardware_destructive_interference_size)
#else
# define CARB_CACHELINE_SIZE (64)
#endif
//! Defined as `CARB_ALIGN_AS(CARB_CACHELINE_SIZE)`.
#define CARB_CACHELINE_ALIGN CARB_ALIGN_AS(CARB_CACHELINE_SIZE)
/** This is a wrapper for the platform-specific call to the non-standard but almost universal alloca() function. */
#if CARB_PLATFORM_WINDOWS
# define CARB_ALLOCA(size) _alloca(size)
#elif CARB_PLATFORM_LINUX || CARB_PLATFORM_MACOS
# define CARB_ALLOCA(size) alloca(size)
#else
CARB_UNSUPPORTED_PLATFORM();
#endif
//! Attempts to allocate an array of the given type on the stack.
//! @warning On Windows, the underlying call to `_alloca()` may throw a SEH stack overflow exception if the stack does
//! not have sufficient space to perform the allocation. However, on Linux, there is no error handling for the
//! underlying `alloca()` call. The caller is advised to use caution.
//! @note The memory allocated is within the stack frame of the current function and is automatically freed when the
//! function returns or `longjmp()` or `siglongjmp()` is called. The memory is \a not freed when leaving the scope that
//! allocates it, except by the methods mentioned.
//! @param T The type of the object(s) to allocate.
//! @param number The number of objects to allocate. If `0`, a `nullptr` is returned.
//! @returns A properly-aligned pointer that will fit \p number quantity of type \p T on the stack. This memory will be
//! freed automatically when the function returns or `longjmp()` or `siglongjmp()` is called.
#define CARB_STACK_ALLOC(T, number) \
carb::align<T>(((number) ? (T*)CARB_ALLOCA((number) * sizeof(T) + alignof(T)) : nullptr), alignof(T))
//! Allocates memory from the heap.
//! @rst
//! .. deprecated:: 126.0
//! Please use `carb::allocate()` instead.
//! @endrst
//! @warning Memory allocated from this method must be freed within the same module that allocated it.
//! @param size The number of bytes to allocate.
//! @returns A valid pointer to a memory region of \p size bytes. If an error occurs, `nullptr` is returned.
#define CARB_MALLOC(size) std::malloc(size)
//! Frees memory previously allocated using CARB_MALLOC().
//! @rst
//! .. deprecated:: 126.0
//! Please use `carb::deallocate()` instead.
//! @endrst
//! @param ptr The pointer previously returned from \c CARB_MALLOC.
#define CARB_FREE(ptr) std::free(ptr)
#ifndef DOXYGEN_SHOULD_SKIP_THIS
# define __CARB_STRINGIFY(x) # x
#endif
//! Turns a name into a string, resolving macros (i.e. `CARB_STRINGIFY(__LINE__)` on line 815 will produce `"815"`).
//! @param x The name to turn into a string.
//! @returns \p x as a string.
#define CARB_STRINGIFY(x) __CARB_STRINGIFY(x)
//! FNV-1a 64-bit hash basis.
//! @see http://www.isthe.com/chongo/tech/comp/fnv/#FNV-param
constexpr uint64_t kFnvBasis = 14695981039346656037ull;
//! FNV-1a 64-bit hash prime.
//! @see http://www.isthe.com/chongo/tech/comp/fnv/#FNV-param
constexpr uint64_t kFnvPrime = 1099511628211ull;
//! Compile-time FNV-1a 64-bit hash, use with CARB_HASH_STRING macro
//! @param str The string to hash.
//! @param n The number of characters in \p str, not including the NUL terminator.
//! @param hash The previous hash value or starting hash basis.
//! @returns A hash computed from the given parameters.
constexpr uint64_t fnv1aHash(const char* str, std::size_t n, uint64_t hash = kFnvBasis)
{
return n > 0 ? fnv1aHash(str + 1, n - 1, (hash ^ *str) * kFnvPrime) : hash;
}
#ifndef DOXYGEN_SHOULD_SKIP_THIS
//! Compile-time FNV-1a 64-bit hash for a static string (char array).
//! @param array The static string to hash.
//! @returns A hash computed from the given parameters.
template <std::size_t N>
constexpr uint64_t fnv1aHash(const char (&array)[N])
{
return fnv1aHash(&array[0], N - 1);
}
#endif
//! Runtime FNV-1a 64-bit string hash
//! @param str The C-style (NUL terminated) string to hash.
//! @param hash The previous hash value or starting hash basis.
//! @returns A hash computed from the given parameters.
inline uint64_t hashString(const char* str, uint64_t hash = kFnvBasis)
{
while (*str != '\0')
{
hash ^= static_cast<unsigned char>(*(str++));
hash *= kFnvPrime;
}
return hash;
}
//! A fast table-based implementation of std::tolower for ASCII characters only.
//! @warning This function does not work on Unicode characters and is not locale-aware; it is ASCII only.
//! @param c The character to change to lower case.
//! @return The lower-case letter of \p c if \p c is an upper-case letter; \p c otherwise.
constexpr unsigned char tolower(unsigned char c)
{
return detail::lowerTable[c];
};
//! A fast table-based implementation of std::toupper for ASCII characters only.
//! @warning This function does not work on Unicode characters and is not locale-aware; it is ASCII only.
//! @param c The character to change to upper case.
//! @return The upper-case letter of \p c if \p c is a lower-case letter; \p c otherwise.
constexpr unsigned char toupper(unsigned char c)
{
return detail::upperTable[c];
}
//! Runtime FNV-1a 64-bit lower-case string hash (as if the string had been converted using \ref tolower()).
//! @param str The C-style (NUL terminated) string to hash.
//! @param hash The previous hash value or starting hash basis.
//! @returns A hash computed from the given parameters.
inline uint64_t hashLowercaseString(const char* str, uint64_t hash = kFnvBasis)
{
while (*str != '\0')
{
hash ^= tolower(static_cast<unsigned char>(*(str++)));
hash *= kFnvPrime;
}
return hash;
}
//! Runtime FNV-1a 64-bit lower-case byte hash (as if the bytes had been converted using \ref tolower()).
//! @param buffer The byte buffer to hash.
//! @param len The number of bytes in \p buffer.
//! @param hash The previous hash value or starting hash basis.
//! @returns A hash computed from the given parameters.
inline uint64_t hashLowercaseBuffer(const void* buffer, size_t len, uint64_t hash = kFnvBasis)
{
const unsigned char* data = static_cast<const unsigned char*>(buffer);
const unsigned char* const end = data + len;
while (data != end)
{
hash ^= tolower(*(data++));
hash *= kFnvPrime;
}
return hash;
}
//! Runtime FNV-1a 64-bit upper-case string hash (as if the string had been converted using \ref toupper()).
//! @param str The C-style (NUL terminated) string to hash.
//! @param hash The previous hash value or starting hash basis.
//! @returns A hash computed from the given parameters.
inline uint64_t hashUppercaseString(const char* str, uint64_t hash = kFnvBasis)
{
while (*str != '\0')
{
hash ^= toupper(static_cast<unsigned char>(*(str++)));
hash *= kFnvPrime;
}
return hash;
}
//! Runtime FNV-1a 64-bit upper-case byte hash (as if the bytes had been converted using \ref toupper()).
//! @param buffer The byte buffer to hash.
//! @param len The number of bytes in \p buffer.
//! @param hash The previous hash value or starting hash basis.
//! @returns A hash computed from the given parameters.
inline uint64_t hashUppercaseBuffer(const void* buffer, size_t len, uint64_t hash = kFnvBasis)
{
const unsigned char* data = static_cast<const unsigned char*>(buffer);
const unsigned char* const end = data + len;
while (data != end)
{
hash ^= toupper(*(data++));
hash *= kFnvPrime;
}
return hash;
}
//! Runtime FNV-1a 64-bit byte hash.
//! @param buffer The byte buffer to hash.
//! @param length The number of bytes in \p buffer.
//! @param hash The previous hash value or starting hash basis.
//! @returns A hash computed from the given parameters.
inline uint64_t hashBuffer(const void* buffer, size_t length, uint64_t hash = kFnvBasis)
{
const char* ptr = static_cast<const char*>(buffer);
for (size_t i = 0; i < length; ++i)
{
hash ^= static_cast<unsigned char>(ptr[i]);
hash *= kFnvPrime;
}
return hash;
}
//! Runtime FNV-1a 64-bit hash of a scalar type.
//! @param type An scalar to hash.
//! @param hash The previous hash value or starting hash basis.
//! @returns A hash computed from the given parameters.
template <class T>
constexpr uint64_t hashScalar(const T& type, uint64_t hash = kFnvBasis)
{
static_assert(std::is_scalar<T>::value, "Unsupported type for hashing");
return hashBuffer(reinterpret_cast<const char*>(std::addressof(type)), sizeof(type), hash);
}
/**
* Combines two hashes producing better collision avoidance than XOR.
*
* @param hash1 The initial hash
* @param hash2 The hash to combine with @p hash1
* @returns A combined hash of @p hash1 and @p hash2
*/
inline constexpr uint64_t hashCombine(uint64_t hash1, uint64_t hash2) noexcept
{
constexpr uint64_t kConstant{ 14313749767032793493ull };
constexpr int kRotate = 47;
hash2 *= kConstant;
hash2 ^= (hash2 >> kRotate);
hash2 *= kConstant;
hash1 ^= hash2;
hash1 *= kConstant;
// Add an arbitrary value to prevent 0 hashing to 0
hash1 += 0x42524143; // CARB
return hash1;
}
// The string hash macro is guaranteed to evaluate at compile time. MSVC raises a warning for this, which we disable.
#if defined(__CUDACC__) || defined(DOXYGEN_BUILD)
//! Computes a literal string hash at compile time.
//! @param str The string literal to hash
//! @returns A hash computed from the given string literal as if by \ref carb::fnv1aHash().
# define CARB_HASH_STRING(str) std::integral_constant<uint64_t, carb::fnv1aHash(str)>::value
#else
# define CARB_HASH_STRING(str) \
CARB_IGNOREWARNING_MSC_WITH_PUSH(4307) /* 'operator': integral constant overflow */ \
std::integral_constant<uint64_t, carb::fnv1aHash(str)>::value CARB_IGNOREWARNING_MSC_POP
#endif
//! Syntactic sugar for `CARB_HASH_STRING(CARB_STRINGIFY(T))`.
#define CARB_HASH_TYPE(T) CARB_HASH_STRING(CARB_STRINGIFY(T))
// printf-like functions attributes
#if CARB_COMPILER_GNUC || defined(DOXYGEN_BUILD)
//! Requests that the compiler validate any variadic arguments as printf-style format specifiers, if supported by the
//! compiler. Causes a compilation error if the printf-style format specifier doesn't match the given variadic types.
//! @note The current implementation is effective only when `CARB_COMPILER_GNUC` is non-zero. The Windows implementation
//! does not work properly for custom printf-like function pointers. It is recommended where possible to use a "fake
//! printf" trick to force the compiler to evaluate the arguments:
//! ```cpp
//! if (0) printf(fmt, arg1, arg2); // Compiler will check but never execute.
//! ```
//! @param fmt_ordinal The 1-based function parameter receiving the printf-style format string.
//! @param args_ordinal The 1-based function parameter receiving the first variadic argument.
# define CARB_PRINTF_FUNCTION(fmt_ordinal, args_ordinal) CARB_ATTRIBUTE(format(printf, fmt_ordinal, args_ordinal))
#elif CARB_COMPILER_MSC
// Microsoft suggest to use SAL annotations _Printf_format_string_ and _Printf_format_string_params_ for
// printf-like functions. Unfortunately it does not work properly for custom printf-like function pointers.
// So, instead of defining marker attribute for format string, we use the "fake printf" trick to force compiler
// checks and keep function attribute empty.
# define CARB_PRINTF_FUNCTION(fmt_ordinal, args_ordinal)
#else
# define CARB_PRINTF_FUNCTION(fmt_ordinal, args_ordinal)
#endif
//! An empty class tag type used with \ref EmptyMemberPair constructors.
struct ValueInitFirst
{
//! Default constructor.
constexpr explicit ValueInitFirst() = default;
};
//! An empty class tag type used with \ref EmptyMemberPair constructors.
struct InitBoth
{
//! Default constructor.
constexpr explicit InitBoth() = default;
};
//! Attempts to invoke the Empty Member Optimization by inheriting from the First element if possible, which, if empty
//! will eliminate the storage necessary for an empty class; the Second element is always stored as a separate member.
//! The First element is inherited from if it is an empty `class`/`struct` and is not declared `final`.
//! @tparam First The first element of the pair that the pair will inherit from if empty and not `final`.
//! @tparam Second The second element of the pair that will always be a member.
template <class First, class Second, bool = std::is_empty<First>::value && !std::is_final<First>::value>
class EmptyMemberPair : private First
{
public:
//! Type of the First element
using FirstType = First;
//! Type of the Second element
using SecondType = Second;
//! Constructor that default-initializes the `First` member and passes all arguments to the constructor of `Second`.
//! @param args arguments passed to the constructor of `second`.
template <class... Args2>
constexpr explicit EmptyMemberPair(ValueInitFirst, Args2&&... args)
: First{}, second{ std::forward<Args2>(args)... }
{
}
//! Constructor that initializes both members.
//! @param arg1 the argument that is forwarded to the `First` constructor.
//! @param args2 arguments passed to the constructor of `second`.
template <class Arg1, class... Args2>
constexpr explicit EmptyMemberPair(InitBoth, Arg1&& arg1, Args2&&... args2)
: First(std::forward<Arg1>(arg1)), second(std::forward<Args2>(args2)...)
{
}
//! Non-const access to `First`.
//! @returns a non-const reference to `First`.
constexpr FirstType& first() noexcept
{
return *this;
}
//! Const access to `First`.
//! @returns a const reference to `First`.
constexpr const FirstType& first() const noexcept
{
return *this;
}
//! Direct access to the `Second` member.
SecondType second;
};
#ifndef DOXYGEN_SHOULD_SKIP_THIS
template <class First, class Second>
class EmptyMemberPair<First, Second, false>
{
public:
using FirstType = First;
using SecondType = Second;
template <class... Args2>
constexpr explicit EmptyMemberPair(ValueInitFirst, Args2&&... args)
: m_first(), second(std::forward<Args2>(args)...)
{
}
template <class Arg1, class... Args2>
constexpr explicit EmptyMemberPair(InitBoth, Arg1&& arg1, Args2&&... args2)
: m_first(std::forward<Arg1>(arg1)), second(std::forward<Args2>(args2)...)
{
}
constexpr FirstType& first() noexcept
{
return m_first;
}
constexpr const FirstType& first() const noexcept
{
return m_first;
}
private:
FirstType m_first;
public:
SecondType second;
};
#endif
} // namespace carb
/**
* Picks the minimum of two values.
*
* Same as `std::min` but implemented without using the `min` keyword as Windows.h can sometimes `#define` it.
*
* @param left The first value to compare.
* @param right The second value to compare.
* @returns \p left if \p left is less than \p right, otherwise \p right, even if the values are equal.
*/
template <class T>
CARB_NODISCARD constexpr const T& carb_min(const T& left, const T& right) noexcept(noexcept(left < right))
{
return left < right ? left : right;
}
/**
* Picks the maximum of two values.
*
* Same as `std::max` but implemented without using the `max` keyword as Windows.h can sometimes `\#define` it.
*
* @param left The first value to compare.
* @param right The second value to compare.
* @returns \p right if \p left is less than \p right, otherwise \p left, even if the values are equal.
*/
template <class T>
CARB_NODISCARD constexpr const T& carb_max(const T& left, const T& right) noexcept(noexcept(left < right))
{
return left < right ? right : left;
}
#if CARB_POSIX || defined(DOXYGEN_BUILD)
/**
* A macro to retry operations if they return -1 and errno is set to EINTR.
* @warning The `op` expression is potentially evaluated multiple times.
* @param op The operation to retry
* @returns The return value of \p op while guaranteeing that `errno` is not `EINTR`.
*/
# define CARB_RETRY_EINTR(op) \
[&] { \
decltype(op) ret_; \
while ((ret_ = (op)) < 0 && errno == EINTR) \
{ \
} \
return ret_; \
}()
#endif
/**
* Portable way to mark unused variables as used.
*
* This tricks the compiler into thinking that the variables are used, eliminating warnings about unused variables.
*
* @param args Any variables or arguments that should be marked as unused.
*/
template <class... Args>
void CARB_UNUSED(Args&&... CARB_DOC_ONLY(args))
{
}
/** A macro to mark functionality that has not been implemented yet.
* @remarks This will abort the process with a message.
* The macro is [[noreturn]].
*/
#define CARB_UNIMPLEMENTED(msg, ...) \
do \
{ \
CARB_FATAL_UNLESS(false, (msg), ##__VA_ARGS__); \
std::terminate(); \
} while (0)
/** A macro to mark placeholder functions on MacOS while the porting effort is in progress. */
#define CARB_MACOS_UNIMPLEMENTED() CARB_UNIMPLEMENTED("Unimplemented on Mac OS")
#if defined(CARB_INCLUDE_PURIFY_NAME) && !defined(DOXYGEN_BUILD)
# ifdef __COUNTER__
# define CARB_INCLUDE_PURIFY_TEST(...) \
inline void CARB_JOIN(CARB_INCLUDE_PURIFY_NAME, __COUNTER__)() \
__VA_ARGS__ static_assert(true, "Semicolon required")
# else
# define CARB_INCLUDE_PURIFY_TEST(...) \
inline void CARB_JOIN(CARB_INCLUDE_PURIFY_NAME, __LINE__)() \
__VA_ARGS__ static_assert(true, "Semicolon required")
# endif
#else
/**
* A macro that is used only for public includes to define a function which will instantiate templates.
*
* The templates are instantiated to make sure that all of the required symbols are available for compilation.
* Example usage:
* @code{.cpp}
* CARB_INCLUDE_PURIFY_TEST({
* carb::Delegate<void()> del;
* });
* @endcode
* @note The braces must be specified inside the macro parentheses.
* @note This function is never executed, merely compiled to test include purification. Unit tests are responsible for
* full testing.
* @note This macro only produces a function if `CARB_INCLUDE_PURIFY_NAME` is set to a function name. This happens in
* Carbonite's premake5.lua file when the include purification projects are generated.
*/
# define CARB_INCLUDE_PURIFY_TEST(...)
#endif
| 78,032 |
C
| 46.726605 | 136 | 0.649413 |
omniverse-code/kit/include/carb/StartupUtils.h
|
// Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//!
//! @brief Contains @ref carb::startupFramework() and @ref carb::shutdownFramework(). Consider using @ref
//! OMNI_CORE_INIT(), which invokes these methods for you in a safe manner.
#pragma once
#include "Framework.h"
#include "crashreporter/CrashReporterUtils.h"
#include "dictionary/DictionaryUtils.h"
#include "extras/AppConfig.h"
#include "extras/CmdLineParser.h"
#include "extras/EnvironmentVariableParser.h"
#include "extras/EnvironmentVariableUtils.h"
#include "extras/Path.h"
#include "extras/VariableSetup.h"
#include "filesystem/IFileSystem.h"
#include "l10n/L10nUtils.h"
#include "logging/Log.h"
#include "logging/LoggingSettingsUtils.h"
#include "logging/StandardLogger.h"
#include "profiler/Profile.h"
#include "settings/ISettings.h"
#include "tokens/ITokens.h"
#include "tokens/TokensUtils.h"
#include "../omni/structuredlog/StructuredLogSettingsUtils.h"
#include <array>
#include <map>
#include <string>
#include <vector>
namespace carb
{
//! Parameters passed to @ref carb::startupFramework().
struct StartupFrameworkDesc
{
//! A string containing either one of two things:
//!
//! * A path to a configuration file.
//!
//! * A raw string contain the configuration (in either JSON or TOML format based on @p configFormat ).
//!
//! @ref carb::startupFramework() will first check to see if the string maps to an existing file, and if not, the
//! string is treated as a raw configuration string.
const char* configString; // Path to a config file or string with configuration data
char** argv; //!< Array of command line arguments
int argc; //!< Number of command line arguments
//! An array of search paths for plugins.
//!
//! Relative search paths are relative to the executable's directory, not the current working directory.
//!
//! These search paths will be used when loading the base set of carbonite plugins (such as carb.settings.plugin),
//! then this will be set as the default value for the @ref carb::settings::ISettings key `/pluginSearchPaths` (this
//! allows the setting to be overridden if you set it in config.toml or pass it on the command line).
//!
//! Passing an empty array will result in the executable directory being used as the default search path.
//!
//! This option is needed when the base set of Carbonite plugins are not inside of the executable's directory;
//! otherwise, `/pluginSearchPaths` could be set in config.toml or via the command line.
//!
//! Defaults to `nullptr`.
const char* const* initialPluginsSearchPaths;
size_t initialPluginsSearchPathCount; //!< Size of array of paths to search for plugins
//! Prefix of command line arguments serving as overrides for configuration values. Default is `--/`.
const char* cmdLineParamPrefix;
//! Prefix of environment variables serving as overrides for configuration values. Default is `OMNI_APPNAME_`.
const char* envVarsParamPrefix;
const char* configFormat; //!< The selected config format ("toml", "json", etc). Default is "toml".
const char* appNameOverride; //!< Override automatic app name search. Defaults to `nullptr`.
const char* appPathOverride; //!< Override automatic app path search. Defaults to `nullptr`.
bool disableCrashReporter; //!< If `true`, the crash reporter plugin will not be loaded. Defaults to `false`.
//! Returns a @ref StartupFrameworkDesc with default values.
static StartupFrameworkDesc getDefault()
{
static constexpr const char* kDefaultCmdLineParamPrefix = "--/";
static constexpr const char* kDefaultEnvVarsParamPrefix = "OMNI_APPNAME_";
static constexpr const char* kDefaultConfigFormat = "toml";
StartupFrameworkDesc result{};
result.cmdLineParamPrefix = kDefaultCmdLineParamPrefix;
result.envVarsParamPrefix = kDefaultEnvVarsParamPrefix;
result.configFormat = kDefaultConfigFormat;
return result;
}
};
/**
* Simple plugin loading function wrapper that loads plugins matching multiple patterns.
*
* Consider using @ref carb::startupFramework(), which calls this function with user defined paths via config files, the
* environment, and the command line.
*
* @param pluginNamePatterns String that contains plugin names pattern - wildcards are supported.
* @param pluginNamePatternCount Number of items in @p pluginNamePatterns.
* @param searchPaths Array of paths to look for plugins in.
* @param searchPathCount Number of paths in searchPaths array.
*/
inline void loadPluginsFromPatterns(const char* const* pluginNamePatterns,
size_t pluginNamePatternCount,
const char* const* searchPaths = nullptr,
size_t searchPathCount = 0)
{
Framework* f = getFramework();
PluginLoadingDesc desc = PluginLoadingDesc::getDefault();
desc.loadedFileWildcards = pluginNamePatterns;
desc.loadedFileWildcardCount = pluginNamePatternCount;
desc.searchPaths = searchPaths;
desc.searchPathCount = searchPathCount;
f->loadPlugins(desc);
}
/**
* Simple plugin loading function wrapper that loads plugins matching a single pattern.
*
* Consider using @ref carb::startupFramework(), which calls this function with user defined paths via config files, the
* environment, and the command line.
*
* @param pluginNamePattern String that contains a plugin pattern - wildcards are supported.
* @param searchPaths Array of paths to look for plugins in.
* @param searchPathCount Number of paths in searchPaths array.
*/
inline void loadPluginsFromPattern(const char* pluginNamePattern,
const char* const* searchPaths = nullptr,
size_t searchPathCount = 0)
{
const char* plugins[] = { pluginNamePattern };
loadPluginsFromPatterns(plugins, countOf(plugins), searchPaths, searchPathCount);
}
//! Internal
namespace detail
{
//! Loads plugins based on settings specified in the given @p settings object.
//!
//! The settings read populated a @ref carb::PluginLoadingDesc. The settings read are:
//!
//! @rst
//!
//! /pluginSearchPaths
//! Array of paths in which to search for plugins.
//!
//! /pluginSearchRecursive
//! If ``true`` recursively each path in `/pluginSearchPaths`.
//!
//! /reloadablePlugins
//! Array of plugin wildcards that mark plugins as reloadable.
//!
//! /pluginsLoaded
//! Wildcard of plugins to load.
//!
//! /pluginsExcluded
//! Wildcard of plugins that match `/pluginsLoaded` but should not be loaded.
//!
//! @endrst
//!
//! Do not use this function directly. Rather, call @ref carb::startupFramework().
inline void loadPluginsFromConfig(settings::ISettings* settings)
{
if (settings == nullptr)
return;
Framework* f = getFramework();
// Initialize the plugin loading description to default configuration,
// and override parts of it to the config values, if present.
PluginLoadingDesc loadingDesc = PluginLoadingDesc::getDefault();
// Check if plugin search paths are present in the config, and override if present
const char* kPluginSearchPathsKey = "/pluginSearchPaths";
std::vector<const char*> pluginSearchPaths(settings->getArrayLength(kPluginSearchPathsKey));
if (!pluginSearchPaths.empty())
{
settings->getStringBufferArray(kPluginSearchPathsKey, pluginSearchPaths.data(), pluginSearchPaths.size());
loadingDesc.searchPaths = pluginSearchPaths.data();
loadingDesc.searchPathCount = pluginSearchPaths.size();
}
const char* kPluginSearchRecursive = "/pluginSearchRecursive";
// Is search recursive?
if (settings->isAccessibleAs(carb::dictionary::ItemType::eBool, kPluginSearchRecursive))
{
loadingDesc.searchRecursive = settings->getAsBool(kPluginSearchRecursive);
}
// Check/override reloadable plugins if present
const char* kReloadablePluginsKey = "/reloadablePlugins";
std::vector<const char*> reloadablePluginFiles(settings->getArrayLength(kReloadablePluginsKey));
if (!reloadablePluginFiles.empty())
{
settings->getStringBufferArray(kReloadablePluginsKey, reloadablePluginFiles.data(), reloadablePluginFiles.size());
loadingDesc.reloadableFileWildcards = reloadablePluginFiles.data();
loadingDesc.reloadableFileWildcardCount = reloadablePluginFiles.size();
}
// Check/override plugins to load if present
const char* kPluginsLoadedKey = "/pluginsLoaded";
std::vector<const char*> pluginsLoaded;
if (settings->getItemType(kPluginsLoadedKey) == dictionary::ItemType::eDictionary)
{
pluginsLoaded.resize(settings->getArrayLength(kPluginsLoadedKey));
settings->getStringBufferArray(kPluginsLoadedKey, pluginsLoaded.data(), pluginsLoaded.size());
loadingDesc.loadedFileWildcards = pluginsLoaded.size() ? pluginsLoaded.data() : nullptr;
loadingDesc.loadedFileWildcardCount = pluginsLoaded.size();
}
const char* kPluginsExcludedKey = "/pluginsExcluded";
std::vector<const char*> pluginsExcluded;
if (settings->getItemType(kPluginsExcludedKey) == dictionary::ItemType::eDictionary)
{
pluginsExcluded.resize(settings->getArrayLength(kPluginsExcludedKey));
settings->getStringBufferArray(kPluginsExcludedKey, pluginsExcluded.data(), pluginsExcluded.size());
loadingDesc.excludedFileWildcards = pluginsExcluded.size() ? pluginsExcluded.data() : nullptr;
loadingDesc.excludedFileWildcardCount = pluginsExcluded.size();
}
// Load plugins based on the resulting desc
if (loadingDesc.loadedFileWildcardCount)
f->loadPlugins(loadingDesc);
}
//! Sets @ref carb::Framework's "default" plugins from the given @p settings `/defaultPlugins` key.
//!
//! In short, this function calls @ref carb::Framework::setDefaultPlugin for each plugin name in `/defaultPlugins`.
//! However, since the interface type cannot be specified, plugins listed in `/defaultPlugins` will become the default
//! plugin for \a all interfaces they provide.
//!
//! This function assumes the plugins in `/defaultPlugins` have already been loaded.
//!
//! The following keys are used from @p settings:
//!
//! @rst
//! /defaultPlugins
//! A list of plugin names. These plugins become the default plugins to use when acquire their interfaces.
//! @endrst
//!
//! Do not use this function directly. Rather, call @ref carb::startupFramework().
inline void setDefaultPluginsFromConfig(settings::ISettings* settings)
{
if (settings == nullptr)
return;
Framework* f = getFramework();
// Default plugins
const char* kDefaultPluginsKey = "/defaultPlugins";
std::vector<const char*> defaultPlugins(settings->getArrayLength(kDefaultPluginsKey));
if (!defaultPlugins.empty())
{
settings->getStringBufferArray(kDefaultPluginsKey, defaultPlugins.data(), defaultPlugins.size());
for (const char* pluginName : defaultPlugins)
{
// Set plugin as default for all interfaces it provides
const PluginDesc& pluginDesc = f->getPluginDesc(pluginName);
for (size_t i = 0; i < pluginDesc.interfaceCount; i++)
{
f->setDefaultPluginEx(g_carbClientName, pluginDesc.interfaces[i], pluginName);
}
}
}
}
#ifndef DOXYGEN_SHOULD_SKIP_THIS
// If the dict item is a special raw string then it returns pointer to the buffer past the special raw string marker
// In all other cases it returns nullptr
inline const char* getRawStringFromItem(carb::dictionary::IDictionary* dictInterface, const carb::dictionary::Item* item)
{
if (!dictInterface || !item)
{
return nullptr;
}
if (dictInterface->getItemType(item) != dictionary::ItemType::eString)
{
return nullptr;
}
const char* stringBuffer = dictInterface->getStringBuffer(item);
if (!stringBuffer)
{
return nullptr;
}
constexpr char kSpecialRawStringMarker[] = "$raw:";
constexpr size_t kMarkerLen = carb::countOf(kSpecialRawStringMarker) - 1;
if (std::strncmp(stringBuffer, kSpecialRawStringMarker, kMarkerLen) != 0)
{
return nullptr;
}
return stringBuffer + kMarkerLen;
}
class LoadSettingsHelper
{
public:
struct SupportedConfigInfo
{
const char* configFormatName;
const char* serializerPluginName;
const char* configExt;
};
LoadSettingsHelper()
{
Framework* f = getFramework();
m_fs = f->acquireInterface<filesystem::IFileSystem>();
}
struct LoadSettingsDesc
{
std::string appDir; // Application directory
std::string appName; // Application name
const char* configStringOrPath; // Configuration string that can be null, string containing configuration data
// (in selected configFormat) or a path to a config file
const extras::ConfigLoadHelper::CmdLineOptionsMap* cmdLineOptionsMap; // Mapping of the command line options
const extras::ConfigLoadHelper::PathwiseEnvOverridesMap* pathwiseEnvOverridesMap; // Mapping of path-wise
// environment variables that
// will be mapped into
// corresponding settings
const extras::ConfigLoadHelper::EnvVariablesMap* envVariablesMap; // Mapping of common environment variables
const char* const* pluginSearchPaths; // Array of directories used by the system to search for plugins
size_t pluginSearchPathCount; // Number of elements in the pluginSearchPaths
const char* cmdLineConfigPath; // Path to a file containing config override (in selected configFormat), can be
// null
const char* configFormat; // Selected configuration format that is supported by the system
inline static LoadSettingsDesc getDefault() noexcept
{
LoadSettingsDesc result{};
Framework* f = getFramework();
filesystem::IFileSystem* fs = f->acquireInterface<filesystem::IFileSystem>();
extras::Path execPathStem(extras::getPathStem(fs->getExecutablePath()));
// Initialize application path and name to the executable path and name
result.appName = execPathStem.getFilename();
result.appDir = execPathStem.getParent();
result.configFormat = "toml";
return result;
}
inline void overwriteWithNonEmptyParams(const LoadSettingsDesc& other) noexcept
{
if (!other.appDir.empty())
{
appDir = other.appDir;
}
if (!other.appName.empty())
{
appName = other.appName;
}
if (other.configStringOrPath)
{
configStringOrPath = other.configStringOrPath;
}
if (other.cmdLineOptionsMap)
{
cmdLineOptionsMap = other.cmdLineOptionsMap;
}
if (other.pathwiseEnvOverridesMap)
{
pathwiseEnvOverridesMap = other.pathwiseEnvOverridesMap;
}
if (other.envVariablesMap)
{
envVariablesMap = other.envVariablesMap;
}
if (other.pluginSearchPaths)
{
pluginSearchPaths = other.pluginSearchPaths;
pluginSearchPathCount = other.pluginSearchPathCount;
}
if (other.cmdLineConfigPath)
{
cmdLineConfigPath = other.cmdLineConfigPath;
}
if (other.configFormat)
{
configFormat = other.configFormat;
}
}
};
void loadBaseSettingsPlugins(const char* const* pluginSearchPaths, size_t pluginSearchPathCount)
{
Framework* f = getFramework();
// clang-format off
const char* plugins[] = {
"carb.dictionary.plugin",
"carb.settings.plugin",
"carb.tokens.plugin",
m_selectedConfigInfo ? m_selectedConfigInfo->serializerPluginName : "carb.dictionary.serializer-toml.plugin"
};
// clang-format on
loadPluginsFromPatterns(plugins, countOf(plugins), pluginSearchPaths, pluginSearchPathCount);
m_idict = f->tryAcquireInterface<dictionary::IDictionary>();
if (m_idict == nullptr)
{
CARB_LOG_INFO("Couldn't acquire dictionary::IDictionary interface on startup to load the settings.");
return;
}
m_settings = f->tryAcquireInterface<settings::ISettings>();
if (m_settings == nullptr)
{
CARB_LOG_INFO("Couldn't acquire settings::ISettings interface on startup to load the settings.");
}
}
class ConfigStageLoader
{
public:
static constexpr const char* kConfigSuffix = ".config";
static constexpr const char* kOverrideSuffix = ".override";
ConfigStageLoader(filesystem::IFileSystem* fs,
dictionary::ISerializer* configSerializer,
LoadSettingsHelper* helper,
const SupportedConfigInfo* selectedConfigInfo,
const extras::ConfigLoadHelper::EnvVariablesMap* envVariablesMap)
: m_fs(fs),
m_configSerializer(configSerializer),
m_helper(helper),
m_selectedConfigInfo(selectedConfigInfo),
m_envVariablesMap(envVariablesMap)
{
m_possibleConfigPathsStorage.reserve(4);
}
dictionary::Item* loadAndMergeSharedUserSpaceConfig(const extras::Path& userFolder,
dictionary::Item* combinedConfig,
std::string* sharedUserSpaceFilepath)
{
if (!userFolder.isEmpty())
{
m_possibleConfigPathsStorage.clear();
m_possibleConfigPathsStorage.emplace_back(userFolder / "omni" + kConfigSuffix +
m_selectedConfigInfo->configExt);
return tryLoadAnySettingsAndMergeIntoTarget(m_configSerializer, combinedConfig,
m_possibleConfigPathsStorage, m_envVariablesMap,
sharedUserSpaceFilepath);
}
return combinedConfig;
}
dictionary::Item* loadAndMergeAppSpecificUserSpaceConfig(const extras::Path& userFolder,
const std::string& appName,
dictionary::Item* combinedConfig,
std::string* appSpecificUserSpaceFilepath)
{
if (!userFolder.isEmpty())
{
m_possibleConfigPathsStorage.clear();
m_possibleConfigPathsStorage.emplace_back(userFolder / appName + kConfigSuffix +
m_selectedConfigInfo->configExt);
return tryLoadAnySettingsAndMergeIntoTarget(m_configSerializer, combinedConfig,
m_possibleConfigPathsStorage, m_envVariablesMap,
appSpecificUserSpaceFilepath);
}
return combinedConfig;
}
dictionary::Item* loadAndMergeLocalSpaceConfig(const std::string& appDir,
const std::string& appName,
dictionary::Item* combinedConfig,
std::string* localSpaceConfigFilepath)
{
const extras::Path cwd(m_fs->getCurrentDirectoryPath());
const extras::Path appDirPath(appDir);
const extras::Path exePath(m_fs->getExecutableDirectoryPath());
const std::string appConfig = appName + kConfigSuffix + m_selectedConfigInfo->configExt;
m_possibleConfigPathsStorage.clear();
m_possibleConfigPathsStorage.emplace_back(cwd / appConfig);
if (!appDir.empty())
{
m_possibleConfigPathsStorage.emplace_back(appDirPath / appConfig);
}
if (appDirPath != exePath)
{
m_possibleConfigPathsStorage.emplace_back(exePath / appConfig);
}
return tryLoadAnySettingsAndMergeIntoTarget(m_configSerializer, combinedConfig, m_possibleConfigPathsStorage,
m_envVariablesMap, localSpaceConfigFilepath);
}
dictionary::Item* loadAndMergeSharedUserSpaceConfigOverride(dictionary::Item* combinedConfig,
const std::string& sharedUserSpaceFilepath)
{
if (!sharedUserSpaceFilepath.empty())
{
m_possibleConfigPathsStorage.clear();
addPossiblePathOverridesForSearch(
extras::getPathStem(sharedUserSpaceFilepath), m_selectedConfigInfo->configExt);
return tryLoadAnySettingsAndMergeIntoTarget(
m_configSerializer, combinedConfig, m_possibleConfigPathsStorage, m_envVariablesMap, nullptr);
}
return combinedConfig;
}
dictionary::Item* loadAndMergeAppSpecificUserSpaceConfigOverride(dictionary::Item* combinedConfig,
const std::string& appSpecificUserSpaceFilepath)
{
if (!appSpecificUserSpaceFilepath.empty())
{
m_possibleConfigPathsStorage.clear();
addPossiblePathOverridesForSearch(
extras::getPathStem(appSpecificUserSpaceFilepath), m_selectedConfigInfo->configExt);
return tryLoadAnySettingsAndMergeIntoTarget(
m_configSerializer, combinedConfig, m_possibleConfigPathsStorage, m_envVariablesMap, nullptr);
}
return combinedConfig;
}
dictionary::Item* loadAndMergeLocalSpaceConfigOverride(dictionary::Item* combinedConfig,
const std::string& localSpaceConfigFilepath)
{
if (!localSpaceConfigFilepath.empty())
{
m_possibleConfigPathsStorage.clear();
addPossiblePathOverridesForSearch(
extras::getPathStem(localSpaceConfigFilepath), m_selectedConfigInfo->configExt);
return tryLoadAnySettingsAndMergeIntoTarget(
m_configSerializer, combinedConfig, m_possibleConfigPathsStorage, m_envVariablesMap, nullptr);
}
return combinedConfig;
}
dictionary::Item* loadAndMergeCustomConfig(dictionary::Item* combinedConfig,
const char* filepath,
dictionary::ISerializer* customSerializer = nullptr)
{
m_possibleConfigPathsStorage.clear();
m_possibleConfigPathsStorage.emplace_back(filepath);
dictionary::ISerializer* configSerializer = customSerializer ? customSerializer : m_configSerializer;
return tryLoadAnySettingsAndMergeIntoTarget(
configSerializer, combinedConfig, m_possibleConfigPathsStorage, m_envVariablesMap, nullptr);
}
private:
void addPossiblePathOverridesForSearch(const std::string& pathStem, const char* extension)
{
m_possibleConfigPathsStorage.emplace_back(pathStem + kOverrideSuffix + extension);
m_possibleConfigPathsStorage.emplace_back(pathStem + extension + kOverrideSuffix);
}
dictionary::Item* tryLoadAnySettingsAndMergeIntoTarget(dictionary::ISerializer* configSerializer,
dictionary::Item* targetDict,
const std::vector<std::string>& possibleConfigPaths,
const extras::ConfigLoadHelper::EnvVariablesMap* envVariablesMap,
std::string* loadedDictPath)
{
if (loadedDictPath)
{
loadedDictPath->clear();
}
dictionary::Item* loadedDict = nullptr;
for (const auto& curConfigPath : possibleConfigPaths)
{
const char* dictFilename = curConfigPath.c_str();
if (!m_fs->exists(dictFilename))
{
continue;
}
loadedDict = dictionary::createDictionaryFromFile(configSerializer, dictFilename);
if (loadedDict)
{
if (loadedDictPath)
{
*loadedDictPath = dictFilename;
}
CARB_LOG_INFO("Found and loaded settings from: %s", dictFilename);
break;
}
else
{
CARB_LOG_ERROR("Couldn't load the '%s' config data from file '%s'",
m_selectedConfigInfo->configFormatName, dictFilename);
break;
}
}
dictionary::IDictionary* dictionaryInterface = m_helper->getDictionaryInterface();
return extras::ConfigLoadHelper::resolveAndMergeNewDictIntoTarget(
dictionaryInterface, targetDict, loadedDict, loadedDictPath ? loadedDictPath->c_str() : nullptr,
envVariablesMap);
}
std::vector<std::string> m_possibleConfigPathsStorage;
filesystem::IFileSystem* m_fs = nullptr;
dictionary::ISerializer* m_configSerializer = nullptr;
LoadSettingsHelper* m_helper = nullptr;
const SupportedConfigInfo* m_selectedConfigInfo = nullptr;
const extras::ConfigLoadHelper::EnvVariablesMap* m_envVariablesMap = nullptr;
};
inline dictionary::ISerializer* acquireOrLoadSerializerFromConfigInfo(const LoadSettingsDesc& params,
const SupportedConfigInfo* configInfo)
{
dictionary::ISerializer* configSerializer =
getFramework()->tryAcquireInterface<dictionary::ISerializer>(configInfo->serializerPluginName);
if (configSerializer)
return configSerializer;
return loadConfigSerializerPlugin(params.pluginSearchPaths, params.pluginSearchPathCount, configInfo);
}
inline dictionary::Item* readConfigStages(const LoadSettingsDesc& params,
std::string* localSpaceConfigFilepath,
std::string* customConfigFilepath,
std::string* cmdLineConfigFilepath)
{
if (!m_configSerializer)
{
return nullptr;
}
CARB_LOG_INFO("Using '%s' format for config files.", m_selectedConfigInfo->configFormatName);
dictionary::Item* combinedConfig = nullptr;
extras::Path userFolder = extras::ConfigLoadHelper::getConfigUserFolder(params.envVariablesMap);
std::string sharedUserSpaceFilepath;
std::string appSpecificUserSpaceFilepath;
ConfigStageLoader configStageLoader(m_fs, m_configSerializer, this, m_selectedConfigInfo, params.envVariablesMap);
// Base configs
combinedConfig =
configStageLoader.loadAndMergeSharedUserSpaceConfig(userFolder, combinedConfig, &sharedUserSpaceFilepath);
combinedConfig = configStageLoader.loadAndMergeAppSpecificUserSpaceConfig(
userFolder, params.appName, combinedConfig, &appSpecificUserSpaceFilepath);
combinedConfig = configStageLoader.loadAndMergeLocalSpaceConfig(
params.appDir, params.appName, combinedConfig, localSpaceConfigFilepath);
// Overrides
combinedConfig =
configStageLoader.loadAndMergeSharedUserSpaceConfigOverride(combinedConfig, sharedUserSpaceFilepath);
combinedConfig = configStageLoader.loadAndMergeAppSpecificUserSpaceConfigOverride(
combinedConfig, appSpecificUserSpaceFilepath);
combinedConfig =
configStageLoader.loadAndMergeLocalSpaceConfigOverride(combinedConfig, *localSpaceConfigFilepath);
tokens::ITokens* tokensInterface = carb::getFramework()->tryAcquireInterface<tokens::ITokens>();
// Loading text configuration override
if (params.configStringOrPath)
{
std::string configPath;
if (tokensInterface)
{
configPath = tokens::resolveString(tokensInterface, params.configStringOrPath);
}
else
{
configPath = params.configStringOrPath;
}
if (m_fs->exists(configPath.c_str()))
{
std::string configExt = extras::Path(configPath).getExtension();
const SupportedConfigInfo* configInfo = getConfigInfoFromExtension(configExt.c_str());
dictionary::ISerializer* customSerializer = acquireOrLoadSerializerFromConfigInfo(params, configInfo);
if (customConfigFilepath)
*customConfigFilepath = configPath;
combinedConfig =
configStageLoader.loadAndMergeCustomConfig(combinedConfig, configPath.c_str(), customSerializer);
}
else
{
dictionary::Item* textConfigurationOverride =
m_configSerializer->createDictionaryFromStringBuffer(params.configStringOrPath);
if (textConfigurationOverride)
{
CARB_LOG_INFO("Loaded text configuration override");
combinedConfig = extras::ConfigLoadHelper::resolveAndMergeNewDictIntoTarget(
m_idict, combinedConfig, textConfigurationOverride, "text configuration override",
params.envVariablesMap);
}
else
{
CARB_LOG_ERROR("Couldn't process provided config string as a '%s' config file or config data",
m_selectedConfigInfo->configFormatName);
}
}
}
// Loading custom file configuration override
if (params.cmdLineConfigPath)
{
std::string configPath;
if (tokensInterface)
{
configPath = tokens::resolveString(tokensInterface, params.cmdLineConfigPath);
}
else
{
configPath = params.cmdLineConfigPath;
}
if (m_fs->exists(configPath.c_str()))
{
std::string configExt = extras::Path(configPath).getExtension();
const SupportedConfigInfo* configInfo = getConfigInfoFromExtension(configExt.c_str());
dictionary::ISerializer* customSerializer = acquireOrLoadSerializerFromConfigInfo(params, configInfo);
if (cmdLineConfigFilepath)
*cmdLineConfigFilepath = params.cmdLineConfigPath;
combinedConfig =
configStageLoader.loadAndMergeCustomConfig(combinedConfig, configPath.c_str(), customSerializer);
}
else
{
CARB_LOG_ERROR("The config file '%s' provided via command line doesn't exist", params.cmdLineConfigPath);
}
}
combinedConfig = extras::ConfigLoadHelper::applyPathwiseEnvOverrides(
m_idict, combinedConfig, params.pathwiseEnvOverridesMap, params.envVariablesMap);
combinedConfig = extras::ConfigLoadHelper::applyCmdLineOverrides(
m_idict, combinedConfig, params.cmdLineOptionsMap, params.envVariablesMap);
return combinedConfig;
}
const auto& getSupportedConfigTypes()
{
static const std::array<SupportedConfigInfo, 2> kSupportedConfigTypes = {
{ { "toml", "carb.dictionary.serializer-toml.plugin", ".toml" },
{ "json", "carb.dictionary.serializer-json.plugin", ".json" } }
};
return kSupportedConfigTypes;
}
const SupportedConfigInfo* getConfigInfoFromExtension(const char* configExtension)
{
const std::string parmsConfigExt = configExtension;
for (const auto& curConfigInfo : getSupportedConfigTypes())
{
const char* curConfigExtEnd = curConfigInfo.configExt + std::strlen(curConfigInfo.configExt);
if (std::equal(curConfigInfo.configExt, curConfigExtEnd, parmsConfigExt.begin(), parmsConfigExt.end(),
[](char l, char r) { return std::tolower(l) == std::tolower(r); }))
{
return &curConfigInfo;
}
}
return nullptr;
}
const SupportedConfigInfo* getConfigInfoFromFormatName(const char* configFormat)
{
const std::string parmsConfigFormat = configFormat;
for (const auto& curConfigInfo : getSupportedConfigTypes())
{
const char* curConfigFormatEnd = curConfigInfo.configFormatName + std::strlen(curConfigInfo.configFormatName);
if (std::equal(curConfigInfo.configFormatName, curConfigFormatEnd, parmsConfigFormat.begin(),
parmsConfigFormat.end(), [](char l, char r) { return std::tolower(l) == std::tolower(r); }))
{
return &curConfigInfo;
}
}
return nullptr;
}
void selectConfigType(const char* configFormat)
{
m_selectedConfigInfo = getConfigInfoFromFormatName(configFormat);
if (!m_selectedConfigInfo)
{
CARB_LOG_ERROR("Unsupported configuration format: %s. Falling back to %s", configFormat,
getSupportedConfigTypes()[0].configFormatName);
m_selectedConfigInfo = &getSupportedConfigTypes()[0];
}
}
static dictionary::ISerializer* loadConfigSerializerPlugin(const char* const* pluginSearchPaths,
size_t pluginSearchPathCount,
const SupportedConfigInfo* configInfo)
{
if (!configInfo)
{
return nullptr;
}
dictionary::ISerializer* configSerializer =
getFramework()->tryAcquireInterface<dictionary::ISerializer>(configInfo->serializerPluginName);
if (!configSerializer)
{
loadPluginsFromPattern(configInfo->serializerPluginName, pluginSearchPaths, pluginSearchPathCount);
configSerializer =
getFramework()->tryAcquireInterface<dictionary::ISerializer>(configInfo->serializerPluginName);
}
if (!configSerializer)
{
CARB_LOG_ERROR("Couldn't acquire ISerializer interface on startup for parsing '%s' settings.",
configInfo->configFormatName);
}
return configSerializer;
}
void loadSelectedConfigSerializerPlugin(const char* const* pluginSearchPaths, size_t pluginSearchPathCount)
{
m_configSerializer = loadConfigSerializerPlugin(pluginSearchPaths, pluginSearchPathCount, m_selectedConfigInfo);
}
void fixRawStrings(dictionary::Item* combinedConfig)
{
// Fixing the special raw strings
auto rawStringsFixer = [&](dictionary::Item* item, uint32_t elementData, void* userData) {
CARB_UNUSED(elementData, userData);
const char* rawString = getRawStringFromItem(m_idict, item);
if (!rawString)
{
return 0;
}
// buffering the value to be implementation-safe
const std::string value(rawString);
m_idict->setString(item, value.c_str());
return 0;
};
const auto getChildByIndexMutable = [](dictionary::IDictionary* dict, dictionary::Item* item, size_t index) {
return dict->getItemChildByIndexMutable(item, index);
};
dictionary::walkDictionary(m_idict, dictionary::WalkerMode::eIncludeRoot, combinedConfig, 0, rawStringsFixer,
nullptr, getChildByIndexMutable);
}
dictionary::IDictionary* getDictionaryInterface() const
{
return m_idict;
}
dictionary::ISerializer* getConfigSerializerInterface() const
{
return m_configSerializer;
}
settings::ISettings* getSettingsInterface() const
{
return m_settings;
}
dictionary::Item* createEmptyDict(const char* name = "<config>")
{
dictionary::Item* item = m_idict->createItem(nullptr, name, dictionary::ItemType::eDictionary);
if (!item)
{
CARB_LOG_ERROR("Couldn't create empty configuration");
}
return item;
};
private:
filesystem::IFileSystem* m_fs = nullptr;
dictionary::IDictionary* m_idict = nullptr;
dictionary::ISerializer* m_configSerializer = nullptr;
settings::ISettings* m_settings = nullptr;
const SupportedConfigInfo* m_selectedConfigInfo = nullptr;
};
/**
* Helper function to initialize the settings and tokens plugins from different configuration sources
*/
inline void loadSettings(const LoadSettingsHelper::LoadSettingsDesc& settingsDesc)
{
Framework* f = getFramework();
// Preparing settings parameters
LoadSettingsHelper::LoadSettingsDesc params = LoadSettingsHelper::LoadSettingsDesc::getDefault();
params.overwriteWithNonEmptyParams(settingsDesc);
LoadSettingsHelper loadSettingsHelper;
loadSettingsHelper.selectConfigType(params.configFormat);
loadSettingsHelper.loadBaseSettingsPlugins(params.pluginSearchPaths, params.pluginSearchPathCount);
filesystem::IFileSystem* fs = f->acquireInterface<filesystem::IFileSystem>();
tokens::ITokens* tokensInterface = f->tryAcquireInterface<tokens::ITokens>();
// Initializing tokens
if (tokensInterface)
{
const char* kExePathToken = "exe-path";
const char* kExeFilenameToken = "exe-filename";
carb::extras::Path exeFullPath = fs->getExecutablePath();
tokensInterface->setInitialValue(kExePathToken, exeFullPath.getParent().getStringBuffer());
tokensInterface->setInitialValue(kExeFilenameToken, exeFullPath.getFilename().getStringBuffer());
}
settings::ISettings* settings = loadSettingsHelper.getSettingsInterface();
std::string localSpaceConfigFilepath;
std::string customConfigFilepath;
std::string cmdLineConfigFilepath;
if (settings)
{
loadSettingsHelper.loadSelectedConfigSerializerPlugin(params.pluginSearchPaths, params.pluginSearchPathCount);
dictionary::Item* combinedConfig = nullptr;
combinedConfig = loadSettingsHelper.readConfigStages(
params, &localSpaceConfigFilepath, &customConfigFilepath, &cmdLineConfigFilepath);
if (!combinedConfig)
{
dictionary::IDictionary* dictionaryInterface = loadSettingsHelper.getDictionaryInterface();
CARB_LOG_INFO("Using empty configuration for settings as no other sources created it.");
combinedConfig = dictionaryInterface->createItem(nullptr, "<settings>", dictionary::ItemType::eDictionary);
}
if (!combinedConfig)
{
CARB_LOG_ERROR("Couldn't initialize settings because no configuration were created.");
}
else
{
loadSettingsHelper.fixRawStrings(combinedConfig);
// Making the settings from the result dictionary
settings->initializeFromDictionary(combinedConfig);
}
}
else
{
CARB_LOG_INFO("Couldn't acquire ISettings interface on startup to load settings.");
}
// Initializing tokens
if (tokensInterface)
{
const char* kLocalSpaceConfigPathToken = "local-config-path";
const char* kLocalSpaceConfigPathTokenStr = "${local-config-path}";
const char* kCustomConfigPathToken = "custom-config-path";
const char* kCmdLineConfigPathToken = "cli-config-path";
if (!localSpaceConfigFilepath.empty())
{
tokensInterface->setInitialValue(kLocalSpaceConfigPathToken, localSpaceConfigFilepath.c_str());
}
else
{
tokensInterface->setInitialValue(kLocalSpaceConfigPathToken, fs->getCurrentDirectoryPath());
}
if (!customConfigFilepath.empty())
{
tokensInterface->setInitialValue(kCustomConfigPathToken, customConfigFilepath.c_str());
}
else
{
tokensInterface->setInitialValue(kCustomConfigPathToken, kLocalSpaceConfigPathTokenStr);
}
if (!cmdLineConfigFilepath.empty())
{
tokensInterface->setInitialValue(kCmdLineConfigPathToken, cmdLineConfigFilepath.c_str());
}
else
{
tokensInterface->setInitialValue(kCmdLineConfigPathToken, kLocalSpaceConfigPathTokenStr);
}
}
else
{
CARB_LOG_INFO("Couldn't acquire tokens interface and initialize default tokens.");
}
}
#endif // DOXYGEN_SHOULD_SKIP_THIS
} // namespace detail
//! Loads the framework configuration based on a slew of input parameters.
//!
//! First see @ref carb::StartupFrameworkDesc for an idea of the type of data this function accepts.
//!
//! At a high-level this function:
//!
//! - Determines application path from CLI args and env vars (see @ref carb::extras::getAppPathAndName()).
//! - Sets application path as filesystem root
//! - Loads plugins for settings: *carb.settings.plugin*, *carb.dictionary.plugin*, *carb.tokens.plugins* and any
//! serializer plugin.
//! - Searches for config file, loads it and applies CLI args overrides.
//!
//! Rather than this function, consider using @ref OMNI_CORE_INIT(), which handles both starting and shutting down the
//! framework for you in your application.
inline void loadFrameworkConfiguration(const StartupFrameworkDesc& params)
{
Framework* f = getFramework();
const StartupFrameworkDesc& defaultStartupFrameworkDesc = StartupFrameworkDesc::getDefault();
const char* cmdLineParamPrefix = params.cmdLineParamPrefix;
if (!cmdLineParamPrefix)
{
cmdLineParamPrefix = defaultStartupFrameworkDesc.cmdLineParamPrefix;
}
const char* envVarsParamPrefix = params.envVarsParamPrefix;
if (!envVarsParamPrefix)
{
envVarsParamPrefix = defaultStartupFrameworkDesc.envVarsParamPrefix;
}
const char* configFormat = params.configFormat;
if (!configFormat)
{
configFormat = defaultStartupFrameworkDesc.configFormat;
}
char** const argv = params.argv;
const int argc = params.argc;
extras::CmdLineParser cmdLineParser(cmdLineParamPrefix);
cmdLineParser.parse(argv, argc);
const extras::CmdLineParser::Options& args = cmdLineParser.getOptions();
const char* cmdLineConfigPath = nullptr;
bool verboseConfiguration = false;
int32_t startLogLevel = logging::getLogging()->getLevelThreshold();
if (argv && argc > 0)
{
auto findOptionIndex = [=](const char* option) {
for (int i = 0; i < argc; ++i)
{
const char* curArg = argv[i];
if (curArg && !strcmp(curArg, option))
{
return i;
}
}
return -1;
};
auto findOptionValue = [=](const char* option) -> const char* {
const int optionIndex = findOptionIndex(option);
if (optionIndex == -1)
{
return nullptr;
}
if (optionIndex >= argc - 1)
{
CARB_LOG_ERROR("Argument not present for the '%s' option", option);
}
return argv[optionIndex + 1];
};
// Parsing verbose configuration option
const char* const kVerboseConfigKey = "--verbose-config";
verboseConfiguration = findOptionIndex(kVerboseConfigKey) != -1;
if (verboseConfiguration)
{
logging::getLogging()->setLevelThreshold(logging::kLevelVerbose);
}
// Parsing cmd line for "--config-path" argument
const char* const kConfigPathKey = "--config-path";
cmdLineConfigPath = findOptionValue(kConfigPathKey);
if (cmdLineConfigPath)
{
CARB_LOG_INFO("Using '%s' as the value for '%s'", cmdLineConfigPath, kConfigPathKey);
}
// Parsing config format from the command line
const char* kConfigFormatKey = "--config-format";
const char* const configFormatValue = findOptionValue(kConfigFormatKey);
if (configFormatValue)
{
configFormat = configFormatValue;
}
}
carb::extras::EnvironmentVariableParser envVarsParser(envVarsParamPrefix);
envVarsParser.parse();
filesystem::IFileSystem* fs = f->acquireInterface<filesystem::IFileSystem>();
// Prepare application path and name, which will be used to initialize the IFileSystem default root folder,
// and also as one of the variants of configuration file name and location.
std::string appPath, appName;
extras::getAppPathAndName(args, appPath, appName);
// If explicitly specified - override this search logic. That means an application doesn't give a control over
// app path and/or app name through settings and env vars.
if (params.appNameOverride)
appName = params.appNameOverride;
if (params.appPathOverride)
appPath = params.appPathOverride;
CARB_LOG_INFO("App path: %s, name: %s", appPath.c_str(), appName.c_str());
// set the application path for the process. This will be one of the locations we search for
// the config file by default.
fs->setAppDirectoryPath(appPath.c_str());
// Loading settings from config and command line.
{
detail::LoadSettingsHelper::LoadSettingsDesc loadSettingsParams =
detail::LoadSettingsHelper::LoadSettingsDesc::getDefault();
loadSettingsParams.appDir = appPath;
loadSettingsParams.appName = appName;
loadSettingsParams.configStringOrPath = params.configString;
loadSettingsParams.cmdLineOptionsMap = &args;
loadSettingsParams.pathwiseEnvOverridesMap = &envVarsParser.getOptions();
loadSettingsParams.envVariablesMap = &envVarsParser.getEnvVariables();
loadSettingsParams.pluginSearchPaths = params.initialPluginsSearchPaths;
loadSettingsParams.pluginSearchPathCount = params.initialPluginsSearchPathCount;
loadSettingsParams.cmdLineConfigPath = cmdLineConfigPath;
loadSettingsParams.configFormat = configFormat;
detail::loadSettings(loadSettingsParams);
}
// restoring the starting log level
if (verboseConfiguration)
{
logging::getLogging()->setLevelThreshold(startLogLevel);
}
}
//! Configures the framework given a slew of input parameters.
//!
//! First see @ref carb::StartupFrameworkDesc for an idea of the type of data this function accepts.
//!
//! At a high-level this function:
//!
//! - Configures logging with config file
//! - Loads plugins according to config file with (see \ref detail::loadPluginsFromConfig())
//! - Configures default plugins according to config file (see \ref detail::setDefaultPluginsFromConfig())
//! - Starts the default profiler (if loaded)
//!
//! Rather than this function, consider using @ref OMNI_CORE_INIT(), which handles both starting and shutting down the
//! framework for you in your application.
inline void configureFramework(const StartupFrameworkDesc& params)
{
Framework* f = getFramework();
if (!params.disableCrashReporter)
{
// Startup the crash reporter
loadPluginsFromPattern(
"carb.crashreporter-*", params.initialPluginsSearchPaths, params.initialPluginsSearchPathCount);
crashreporter::registerCrashReporterForClient();
}
auto settings = f->tryAcquireInterface<carb::settings::ISettings>();
// Configure logging plugin and its default logger
logging::configureLogging(settings);
logging::configureDefaultLogger(settings);
omni::structuredlog::configureStructuredLogging(settings);
// Uploading leftover dumps asynchronously
if (settings != nullptr)
{
if (!params.disableCrashReporter)
{
const char* const kStarupDumpsUploadKey = "/app/uploadDumpsOnStartup";
settings->setDefaultBool(kStarupDumpsUploadKey, true);
if (settings->getAsBool(kStarupDumpsUploadKey))
{
crashreporter::sendAndRemoveLeftOverDumpsAsync();
}
}
// specify the plugin search paths in settings so that loadPluginsFromConfig()
// will have the search paths to look through
const char* kPluginSearchPathsKey = "/pluginSearchPaths";
// only set this if nothing else has been manually set
settings->setDefaultStringArray(
kPluginSearchPathsKey, params.initialPluginsSearchPaths, params.initialPluginsSearchPathCount);
}
// Load plugins using supplied configuration
detail::loadPluginsFromConfig(settings);
// Configure default plugins as present in the config
detail::setDefaultPluginsFromConfig(settings);
#if !CARB_PLATFORM_MACOS // CC-669: avoid registering this on Mac OS since it's unimplemented
// Starting up profiling
// This way of registering profiler allows to enable/disable profiling in the config file, by
// allowing/denying to load profiler plugin.
carb::profiler::registerProfilerForClient();
CARB_PROFILE_STARTUP();
#endif
carb::l10n::registerLocalizationForClient();
}
//! Starts/Configures the framework given a slew of input parameters.
//!
//! First see @ref carb::StartupFrameworkDesc for an idea of the type of data this function accepts.
//!
//! At a high-level this function:
//!
//! - Calls \ref loadFrameworkConfiguration(), which:
//! - Determines application path from CLI args and env vars (see @ref carb::extras::getAppPathAndName()).
//! - Sets application path as filesystem root
//! - Loads plugins for settings: *carb.settings.plugin*, *carb.dictionary.plugin*, *carb.tokens.plugins* and any
//! serializer plugin.
//! - Searches for config file, loads it and applies CLI args overrides.
//! - Calls \ref configureFramework(), which:
//! - Configures logging with config file
//! - Loads plugins according to config file
//! - Configures default plugins according to config file
//! - Starts the default profiler (if loaded)
//!
//! Rather than this function, consider using @ref OMNI_CORE_INIT(), which handles both starting and shutting down the
//! framework for you in your application.
inline void startupFramework(const StartupFrameworkDesc& params)
{
loadFrameworkConfiguration(params);
configureFramework(params);
}
//! Tears down the Carbonite framework.
//!
//! At a high level, this function:
//! - Shuts down the profiler system (if running)
//! - Calls \ref profiler::deregisterProfilerForClient(), \ref crashreporter::deregisterCrashReporterForClient(), and
//! l10n::deregisterLocalizationForClient().
//!
//! \note It is not necessary to manually call this function if \ref OMNI_CORE_INIT is used, since that macro will
//! ensure that the Framework is released and shut down.
inline void shutdownFramework()
{
CARB_PROFILE_SHUTDOWN();
profiler::deregisterProfilerForClient();
crashreporter::deregisterCrashReporterForClient();
carb::l10n::deregisterLocalizationForClient();
}
} // namespace carb
| 53,299 |
C
| 40.349884 | 128 | 0.641194 |
omniverse-code/kit/include/carb/Types.h
|
// Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//!
//! @brief Common types used through-out Carbonite.
#pragma once
#include "Interface.h"
#include "Strong.h"
#include "../omni/core/OmniAttr.h"
#include <cstddef>
#include <cstdint>
namespace omni
{
namespace core
{
OMNI_DECLARE_INTERFACE(ITypeFactory) // forward declaration for entry inPluginFrameworkDesc
}
namespace log
{
class ILog; // forward declaration for entry in PluginFrameworkDesc
}
namespace structuredlog
{
class IStructuredLog;
}
} // namespace omni
//! The main Carbonite namespace.
namespace carb
{
//! Defines the plugin hot reloading (auto reload) behavior.
//!
//! @rst
//! .. deprecated:: 132.0
//! Hot reloading support has been removed. No replacement will be provided.
//! @endrst
enum class PluginHotReload
{
eDisabled,
eEnabled
};
/**
* Defines a descriptor for the plugin implementation, to be provided to the macro CARB_PLUGIN_IMPL.
*/
struct PluginImplDesc
{
const char* name; //!< Name of the plugin (e.g. "carb.dictionary.plugin"). Must be globally unique.
const char* description; //!< Helpful text describing the plugin. Use for debugging/tools.
const char* author; //!< Author (e.g. "NVIDIA").
//! If hot reloading is supported by the plugin.
//! @rst
//! .. deprecated:: 132.0
//! Hot reloading support has been removed. No replacement will be provided.
//! @endrst
PluginHotReload hotReload;
const char* build; //!< Build version of the plugin.
};
CARB_ASSERT_INTEROP_SAFE(PluginImplDesc);
//! Defines a struct to be filled by a plugin to provide the framework with all information about it.
//! @note This struct has been superseded by PluginRegistryEntry2 but exists for historical and backwards-compatibility.
//! In the past, this struct was filled by the macro CARB_PLUGIN_IMPL.
struct PluginRegistryEntry
{
PluginImplDesc implDesc; //!< Textual information about the plugin (name, desc, etc).
//! Entry in an array of interfaces implemented by the plugin.
struct Interface
{
InterfaceDesc desc; //!< An interface in the plugin.
const void* ptr; //!< Pointer to the interface's `struct`.
size_t size; //!< Size of the interface's `struct`.
};
Interface* interfaces; //!< Pointer to an array of interfaces implemented by the plugin.
size_t interfaceCount; //!< Number of interfaces in the @p interfaces array.
};
CARB_ASSERT_INTEROP_SAFE(PluginRegistryEntry);
CARB_ASSERT_INTEROP_SAFE(PluginRegistryEntry::Interface);
//! Defines a struct to be filled by a plugin to provide the framework with all information about it.
//! This struct is automatically created and filled by the macro CARB_PLUGIN_IMPL.
struct PluginRegistryEntry2
{
size_t sizeofThisStruct; //!< Must reflect `sizeof(PluginRegistryEntry2)`; used as a version for this struct.
PluginImplDesc implDesc; //!< Textual information about the plugin (name, desc, etc).
//! Entry in an array of interfaces implemented by the plugin.
struct Interface2
{
size_t sizeofThisStruct; //!< Must reflect `sizeof(Interface2)`; used as a version for this struct.
InterfaceDesc desc; //!< An interface in the plugin.
size_t size; //!< Required size for the interface (must be the maximum size for all supported versions)
size_t align; //!< Required alignment for the interface
//! Constructor function for this interface within the plugin (auto-generated by \ref CARB_PLUGIN_IMPL).
//!
//! Called by the framework to construct the interface.
//! @param p The buffer (guaranteed to be at least `size` bytes) to construct the interface into.
void(CARB_ABI* Constructor)(void* p);
union
{
//! Destructor function for this interface within the plugin (auto-generated by \ref CARB_PLUGIN_IMPL).
//!
//! This union member is selected if `VersionedConstructor` is `nullptr`.
//!
//! Called by the framework to destruct the interface before unloading the plugin.
//! @param p The buffer previously passed to \ref Constructor that contains the interface.
void(CARB_ABI* Destructor)(void* p);
//! Versioned destructor for this interface within the plugin.
//!
//! This union member is selected if `VersionedConstructor` is not `nullptr`.
//!
//! This function is typically the user-provided function \ref destroyInterface; if that function is not
//! provided no destruction happens.
//! @param v The version of the interface, as set in the `v` parameter for \ref VersionedConstructor before
//! that function returns.
//! @param p The interface buffer that was originally passed to \ref VersionedConstructor.
void(CARB_ABI* VersionedDestructor)(Version v, void* p); //!< Destructor with version
};
//! Versioned constructor function for this interface within the plugin.
//!
//! This function is typically \ref fillInterface(carb::Version*, void*).
//! @warning This function must not fail when `desc.version` is requested.
//!
//! @param v When called, the version requested. Before returning, the function should write the version that is
//! being constructed into \p p.
//! @param p A buffer (guaranteed to be at least `size` bytes) to construct the interface into.
//! @retval `true` if the requested version was available and constructed into \p p.
//! @retval `false` if the requested version is not available.
bool(CARB_ABI* VersionedConstructor)(Version* v, void* p);
// Internal note: This struct can be modified via the same rules for PluginRegistryEntry2 below.
};
Interface2* interfaces; //!< Pointer to an array of interfaces implemented by the plugin.
size_t interfaceCount; //!< Number of interfaces in the @p interfaces array.
// Internal note: This struct can be modified without changing the carbonite framework version, provided that new
// members are only added to the end of the struct and existing members are not modified. The version can then be
// determined by the sizeofThisStruct member. However, if it is modified, please add a new
// carb.frameworktest.*.plugin (see ex2initial in premake5.lua for an example).
};
CARB_ASSERT_INTEROP_SAFE(PluginRegistryEntry2);
CARB_ASSERT_INTEROP_SAFE(PluginRegistryEntry2::Interface2);
/**
* Defines a struct which contains all key information about a plugin loaded into memory.
*/
struct PluginDesc
{
PluginImplDesc impl; //!< Name, description, etc.
const InterfaceDesc* interfaces; //!< Array of interfaces implemented by the plugin.
size_t interfaceCount; //!< Number of interfaces implemented by the plugin.
const InterfaceDesc* dependencies; //!< Array of interfaces on which the plugin depends.
size_t dependencyCount; //!< Number of interfaces on which the plugin depends.
const char* libPath; //!< File from which the plugin was loaded.
};
CARB_ASSERT_INTEROP_SAFE(PluginDesc);
//! Lets clients of a plugin know both just before and just after that the plugin is being reloaded.
enum class PluginReloadState
{
eBefore, //!< The plugin is about to be reloaded.
eAfter //!< The plugin has been reloaded.
};
//! Pass to each plugin's @ref OnPluginRegisterExFn during load. Allows the plugin to grab global Carbonite state such
//! as the @ref carb::Framework singleton.
struct PluginFrameworkDesc
{
struct Framework* framework; //!< Owning carb::Framework. Never `nullptr`.
omni::core::ITypeFactory* omniTypeFactory; //!< omni::core::ITypeFactory singleton. May be `nullptr`.
omni::log::ILog* omniLog; //!< omni::log::ILog singleton. May be `nullptr`.
omni::structuredlog::IStructuredLog* omniStructuredLog; //!< omni::structuredlog::IStructuredLog singleton. May be
//!< `nullptr`.
//! Reserved space for future fields. If a new field is added, subtract 1 from this array.
//!
//! The fields above must never be removed though newer implementations of carb.dll may decide to populate them with
//! nullptr.
//!
//! When a newer plugin is loaded by an older carb.dll, these fields will be nullptr. It is up to the newer plugin
//! (really CARB_PLUGIN_IMPL_WITH_INIT()) to handle this.
void* Reserved[28];
};
static_assert(sizeof(PluginFrameworkDesc) == (sizeof(void*) * 32),
"sizeof(PluginFrameworkDesc) is unexpected. did you add a new field improperly?"); // contact ncournia for
// questions
/**
* Defines a shared object handle.
*/
struct CARB_ALIGN_AS(8) SharedHandle
{
union
{
void* handlePointer; ///< A user-defined pointer.
void* handleWin32; ///< A Windows/NT HANDLE. Defined as void* instead of "HANDLE" to avoid requiring windows.h.
int handleFd; ///< A file descriptor (FD), POSIX handle.
};
};
//! @defgroup CarbonitePluginExports Functions exported from Carbonite plugins. Use @ref CARB_PLUGIN_IMPL to have
//! reasonable default implementations of these function implemented for you in your plugin.
//! Required. Returns the plugin's required @ref carb::Framework version.
//
//! Use @ref CARB_PLUGIN_IMPL to have this function generated for your plugin.
//!
//! Most users will not have a need to define this function, as it is defined by default via @ref CARB_PLUGIN_IMPL.
//!
//! @ingroup CarbonitePluginExports
//!
//! @see carbGetFrameworkVersion
typedef Version(CARB_ABI* GetFrameworkVersionFn)();
//! Either this or OnPluginRegisterExFn or OnPluginRegisterEx2Fn are required. Populates the given @ref
//! carb::PluginRegistryEntry with the plugin's information.
//!
//! Prefer using @ref OnPluginRegisterExFn instead of this function.
//!
//! Most users will not have a need to define this function, as it is defined by default via @ref CARB_PLUGIN_IMPL.
//!
//! @ingroup CarbonitePluginExports
//!
//! @see carbOnPluginRegister
typedef void(CARB_ABI* OnPluginRegisterFn)(Framework* framework, PluginRegistryEntry* outEntry);
//! Either this or OnPluginRegisterFn or OnPluginRegisterEx2 are required. Populates the given @ref
//! carb::PluginRegistryEntry with the plugin's information.
//!
//! Use @ref CARB_PLUGIN_IMPL to have this function generated for your plugin.
//!
//! @ingroup CarbonitePluginExports
//!
//! @see carbOnPluginRegisterEx
typedef void(CARB_ABI* OnPluginRegisterExFn)(PluginFrameworkDesc* framework, PluginRegistryEntry* outEntry);
//! Either this or OnPluginRegisterEx2Fn or OnPluginRegisterFn are required. Populates the given
//! carb::PluginRegistryEntry2 with the plugin's information.
//!
//! Use @ref CARB_PLUGIN_IMPL to have this function generated for your plugin.
//!
//! @ingroup CarbonitePluginExports
//!
//! @see carbOnPluginRegisterEx2
typedef void(CARB_ABI* OnPluginRegisterEx2Fn)(PluginFrameworkDesc* framework, PluginRegistryEntry2* outEntry);
//! Optional. Called after @ref OnPluginRegisterExFn.
//!
//! Most users will not have a need to define this function, as it is defined by default via @ref CARB_PLUGIN_IMPL.
//!
//! @ingroup CarbonitePluginExports
//!
//! @see carbOnPluginPreStartup
typedef void(CARB_ABI* OnPluginPreStartupFn)();
//! Optional. Called after @ref OnPluginPreStartupFn.
//!
//! Prefer using @ref OnPluginStartupExFn instead of this function since @ref OnPluginStartupExFn return a value that
//! will cause the plugin be unloaded.
//!
//! @ingroup CarbonitePluginExports
//!
//! @see carbOnPluginStartup
typedef void(CARB_ABI* OnPluginStartupFn)();
//! Optional. Called after @ref OnPluginPreStartupFn.
//!
//! This is the main user defined function for running startup code in your plugin.
//!
//! @returns Returns `true` if the startup was successful. If `false` is returned, the plugin will be immediately
//! unloaded (only @ref OnPluginPostShutdownFn is called).
//!
//! @ingroup CarbonitePluginExports
//!
//! @see carbOnPluginStartupEx
typedef bool(CARB_ABI* OnPluginStartupExFn)();
//! Optional. Called after @ref OnPluginStartupExFn.
//!
//! Called when the @ref carb::Framework is unloading the plugin. If the framework is released with
//! carb::quickReleaseFrameworkAndTerminate() and OnPluginQuickShutdownFn is available for plugin, this function is not
//! called.
//!
//! This is the main user defined function for running shutdown code in your plugin.
//!
//! @ingroup CarbonitePluginExports
//!
//! @see carbOnPluginShutdown
typedef void(CARB_ABI* OnPluginShutdownFn)();
//! Optional. Called if provided in lieu of OnPluginShutdownFn when the @ref carb::quickReleaseFrameworkAndTerminate()
//! is performing a quick shutdown.
//!
//! This function should save any state necessary, and close and flush any I/O, returning as quickly as possible. This
//! function is not called if the plugin is unloaded normally or through carb::releaseFramework().
//!
//! @note If carb::quickReleaseFrameworkAndTerminate() is called, OnPluginQuickShutdownFn is called if it is available.
//! If the function does not exist, OnPluginShutdownFn is called instead. OnPluginPostShutdownFn is always called.
//!
//! @ingroup CarbonitePluginExports
//!
//! @see carbOnPluginQuickShutdown
typedef void(CARB_ABI* OnPluginQuickShutdownFn)();
//! Optional. Called after @ref OnPluginShutdownFn.
//!
//! Called when the @ref carb::Framework is unloading the plugin.
//!
//! Most users will not have a need to define this function, as it is defined by default via @ref CARB_PLUGIN_IMPL.
//!
//! @ingroup CarbonitePluginExports
//!
//! @see carbOnPluginPostShutdown
typedef void(CARB_ABI* OnPluginPostShutdownFn)();
//! Optional. Returns a static list of interfaces this plugin depends upon.
//!
//! Use @ref CARB_PLUGIN_IMPL_DEPS to have this function generated for you.
//!
//! @ingroup CarbonitePluginExports
//!
//! @see carbGetPluginDeps
typedef void(CARB_ABI* GetPluginDepsFn)(InterfaceDesc** interfaceDesc, size_t* count);
//! Optional.
//!
//! @ingroup CarbonitePluginExports
//!
//! @see carbOnReloadDependency
typedef void(CARB_ABI* OnReloadDependencyFn)(PluginReloadState reloadState, void* pluginInterface, PluginImplDesc desc);
//! Two component `float` vector.
struct Float2
{
float x; //!< x-component
float y; //!< y-component
};
//! Three component `float` vector.
struct Float3
{
float x; //!< x-component
float y; //!< y-component
float z; //!< z-component
};
//! Four component `float` vector.
struct Float4
{
float x; //!< x-component
float y; //!< y-component
float z; //!< z-component
float w; //!< w-component
};
//! Two component `double` vector.
struct Double2
{
double x; //!< x-component
double y; //!< y-component
};
//! Three component `double` vector.
struct Double3
{
double x; //!< x-component
double y; //!< y-component
double z; //!< z-component
};
//! Four component `double` vector.
struct Double4
{
double x; //!< x-component
double y; //!< y-component
double z; //!< z-component
double w; //!< w-component
};
//! RGBA color with templated data type.
template <typename T>
struct Color
{
T r; //!< Red
T g; //!< Green
T b; //!< Blue
T a; //!< Alpha (transparency)
};
//! RGB `float` color.
struct ColorRgb
{
float r; //!< Red
float g; //!< Green
float b; //!< Blue
};
//! RGBA `float` color.
struct ColorRgba
{
float r; //!< Red
float g; //!< Green
float b; //!< Blue
float a; //!< Alpha (transparency)
};
//! RGB `double` color.
struct ColorRgbDouble
{
double r; //!< Red
double g; //!< Green
double b; //!< Blue
};
//! RGBA `double` color.
struct ColorRgbaDouble
{
double r; //!< Red
double g; //!< Green
double b; //!< Blue
double a; //!< Alpha (transparency)
};
//! Two component `int32_t` vector.
struct Int2
{
int32_t x; //!< x-component
int32_t y; //!< y-component
};
//! Three component `int32_t` vector.
struct Int3
{
int32_t x; //!< x-component
int32_t y; //!< y-component
int32_t z; //!< z-component
};
//! Four component `int32_t` vector.
struct Int4
{
int32_t x; //!< x-component
int32_t y; //!< y-component
int32_t z; //!< z-component
int32_t w; //!< w-component
};
//! Two component `uint32_t` vector.
struct Uint2
{
uint32_t x; //!< x-component
uint32_t y; //!< y-component
};
//! Three component `uint32_t` vector.
struct Uint3
{
uint32_t x; //!< x-component
uint32_t y; //!< y-component
uint32_t z; //!< z-component
};
//! Four component `uint32_t` vector.
struct Uint4
{
uint32_t x; //!< x-component
uint32_t y; //!< y-component
uint32_t z; //!< z-component
uint32_t w; //!< w-component
};
//! A representation that can combine four character codes into a single 32-bit value for quick comparison.
//! @see CARB_MAKE_FOURCC
using FourCC = uint32_t;
//! A macro for producing a carb::FourCC value from four characters.
#define CARB_MAKE_FOURCC(a, b, c, d) \
((FourCC)(uint8_t)(a) | ((FourCC)(uint8_t)(b) << 8) | ((FourCC)(uint8_t)(c) << 16) | ((FourCC)(uint8_t)(d) << 24))
/**
* Timeout constant
*/
constexpr uint32_t kTimeoutInfinite = CARB_UINT32_MAX;
//! A handle type for \ref Framework::addLoadHook() and \ref Framework::removeLoadHook()
CARB_STRONGTYPE(LoadHookHandle, size_t);
//! A value indicating an invalid load hook handle.
constexpr LoadHookHandle kInvalidLoadHook{};
//! An enum that describes a binding registration for \ref carb::Framework::registerScriptBinding().
enum class BindingType : uint32_t
{
Owner, //!< The given client owns a script language; any interfaces acquired within the script language will be
//!< considered as dependencies of the script language.
Binding, //!< The given client is a binding for the given script language. Any interfaces acquired by the binding
//!< will be considered as dependencies of all owners of the script language.
};
} // namespace carb
// these types used to be in this file but didn't really belong. we continue to include these type in this file for
// backward-compat.
#include "RenderingTypes.h"
| 18,881 |
C
| 34.293458 | 120 | 0.689158 |
omniverse-code/kit/include/carb/RString.h
|
// Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//!
//! @brief Registered String utility. See carb::RString for more info.
#pragma once
#include "Defines.h"
#define RSTRINGENUM_FROM_RSTRING_H
#include "RStringEnum.inl"
#undef RSTRINGENUM_FROM_RSTRING_H
#include <memory> // for std::owner_before
#include <ostream> // for std::basic_ostream
#include <stdint.h>
#include <string>
#include <typeindex> // for std::hash
namespace carb
{
//! Operations for RString (and variant classes) constructor.
enum class RStringOp
{
//! Attempt to find a matching registered string, or register a new string if not found.
eRegister,
//! Only attempt to find a matching registered string. If the string cannot be found, the RString will be empty and
//! will return `true` to RString::isEmpty().
eFindExisting,
};
//! Internal definition detail.
namespace detail
{
//! @private
struct RStringBase
{
//! @private
CARB_VIZ uint32_t m_stringId : 31;
//! @private
unsigned m_uncased : 1;
};
//! @private
// NOTE: In order to satisfy the StandardLayoutType named requirement (required for ABI safety), all non-static data
// members and bit-fields must be declared in the same class. As such, this class must match RStringBase, but cannot
// inherit from RStringBase.
struct RStringKeyBase
{
//! @private
CARB_VIZ uint32_t m_stringId : 31;
//! @private
unsigned m_uncased : 1;
//! @private
CARB_VIZ int32_t m_number;
};
// Validate assumptions
CARB_ASSERT_INTEROP_SAFE(RStringBase);
CARB_ASSERT_INTEROP_SAFE(RStringKeyBase);
static_assert(offsetof(RStringKeyBase, m_number) == sizeof(RStringBase), "Offset error");
/**
* The base class for all registered string classes: RString, RStringU, RStringKey, and RStringUKey.
*
* @tparam Uncased `true` if representing an "un-cased" (i.e. case-insensitive) registered string; `false` otherwise.
*/
template <bool Uncased, class Base = RStringBase>
class RStringTraits : protected Base
{
public:
/**
* Constant that indicates whether this is "un-cased" (i.e. case-insensitive).
*/
static constexpr bool IsUncased = Uncased;
//! @private
constexpr RStringTraits() noexcept;
//! @private
constexpr RStringTraits(eRString staticString) noexcept;
//! @private
RStringTraits(const char* str, RStringOp op);
//! @private
RStringTraits(const char* str, size_t len, RStringOp op);
//! @private
RStringTraits(const std::string& str, RStringOp op);
//! @private
RStringTraits(uint32_t stringId) noexcept;
/**
* Checks to see if this registered string has been corrupted.
*
* @note It is not possible for this registered string to become corrupted through normal use of the API. It could
* be caused by bad casts or use-after-free.
*
* @returns `true` if `*this` represents a valid registered string; `false` if `*this` is corrupted.
*/
bool isValid() const noexcept;
/**
* Checks to see if this registered string represents the "" (empty) value.
*
* @returns `true` if `*this` is default-initialized or initialized to eRString::Empty; `false` otherwise.
*/
constexpr bool isEmpty() const noexcept;
/**
* Checks to see if this registered string represents an "un-cased" (i.e. case-insensitive) registered string.
*
* @returns `true` if `*this` is "un-cased" (i.e. case-insensitive); `false` if case-sensitive.
*/
constexpr bool isUncased() const noexcept;
/**
* Returns the registered string ID. This ID is only useful for debugging purposes and should not be used for
* comparisons.
*
* @returns The string ID for this registered string.
*/
constexpr uint32_t getStringId() const noexcept;
/**
* Returns the hash value as by `carb::hashString(this->c_str())`.
*
* @note This value is computed once for a registered string and cached, so this operation is generally very fast.
*
* @returns The hash value as computed by `carb::hashString(this->c_str())`.
*/
size_t getHash() const;
/**
* Returns the hash value as by `carb::hashLowercaseString(this->c_str())`.
*
* @note This value is pre-computed for registered strings and cached, so this operation is always O(1).
*
* @returns The hash value as computed by `carb::hashLowercaseString(this->c_str())`.
*/
size_t getUncasedHash() const noexcept;
/**
* Resolves this registered string to a C-style NUL-terminated string.
*
* @note This operation is O(1).
*
* @returns The C-style string previously registered.
*/
const char* c_str() const noexcept;
/**
* An alias for c_str(); resolves this registered string to a C-style NUL-terminated string.
*
* @note This operation is O(1).
*
* @returns The C-style string previously registered.
*/
const char* data() const noexcept;
/**
* Returns the length of the registered string. If the string contains embedded NUL ('\0') characters this may
* differ from `std::strlen(c_str())`.
*
* @note This operation is O(1).
*
* @returns The length of the registered string not including the NUL terminator.
*/
size_t length() const noexcept;
#ifndef DOXYGEN_BUILD
/**
* Resolves this registered string to a `std::string`.
*
* @returns A `std::string` containing a copy of the previously registered string.
*/
std::string toString() const;
#endif
/**
* Equality comparison between this registered string and another.
*
* @param other Another registered string.
* @returns `true` if `*this` and `other` represent the same registered string; `false` otherwise.
*/
bool operator==(const RStringTraits& other) const;
/**
* Inequality comparison between this registered string and another.
*
* @param other Another registered string.
* @returns `false` if `*this` and `other` represent the same registered string; `true` otherwise.
*/
bool operator!=(const RStringTraits& other) const;
/**
* Checks whether this registered string is stably (but not lexicographically) ordered before another registered
* string.
*
* This ordering is to make registered strings usable as keys in ordered associative containers in O(1) time.
*
* @note This is NOT a lexicographical comparison; for that use one of the compare() functions. To reduce ambiguity
* between a strict ordering and lexicographical comparison there is no `operator<` function for this string class.
* While a lexicographical comparison would be O(n), this comparison is O(1).
*
* @param other Another registered string.
* @returns `true` if `*this` should be ordered-before @p other; `false` otherwise.
*/
bool owner_before(const RStringTraits& other) const;
/**
* Lexicographically compares this registered string with another.
*
* @note If either `*this` or @p other is "un-cased" (i.e. case-insensitive), a case-insensitive compare is
* performed.
*
* @tparam OtherUncased `true` if @p other is "un-cased" (i.e. case-insensitive); `false` otherwise.
* @param other Another registered string to compare against.
* @returns `0` if the strings are equal, `>0` if @p other is lexicographically ordered before `*this`, or `<0` if
* `*this` is lexicographically ordered before @p other. See note above regarding case-sensitivity.
*/
template <bool OtherUncased, class OtherBase>
int compare(const RStringTraits<OtherUncased, OtherBase>& other) const;
/**
* Lexicographically compares this registered string with a C-style string.
*
* @note If `*this` is "un-cased" (i.e. case-insensitive), a case-insensitive compare is performed.
*
* @param s A C-style string to compare against.
* @returns `0` if the strings are equal, `>0` if @p s is lexicographically ordered before `*this`, or `<0` if
* `*this` is lexicographically ordered before @p s. See note above regarding case-sensitivity.
*/
int compare(const char* s) const;
/**
* Lexicographically compares a substring of this registered string with a C-style string.
*
* @note If `*this` is "un-cased" (i.e. case-insensitive), a case-insensitive compare is performed.
*
* @param pos The starting offset of the registered string represented by `*this`. Must less-than-or-equal-to the
* length of the registered string.
* @param count The length from @p pos to use in the comparison. This value is automatically clamped to the end of
* the registered string.
* @param s A C-style string to compare against.
* @returns `0` if the strings are equal, `>0` if @p s is lexicographically ordered before the substring of `*this`,
* or `<0` if the substring of `*this` is lexicographically ordered before @p s. See note above regarding
* case-sensitivity.
*/
int compare(size_t pos, size_t count, const char* s) const;
/**
* Lexicographically compares a substring of this registered string with a C-style string.
*
* @note If `*this` is "un-cased" (i.e. case-insensitive), a case-insensitive compare is performed.
*
* @param pos The starting offset of the registered string represented by `*this`. Must less-than-or-equal-to the
* length of the registered string.
* @param count The length from @p pos to use in the comparison. This value is automatically clamped to the end of
* the registered string.
* @param s A C-style string to compare against.
* @param len The number of characters of @p s to compare against.
* @returns `0` if the strings are equal, `>0` if @p s is lexicographically ordered before the substring of `*this`,
* or `<0` if the substring of `*this` is lexicographically ordered before @p s. See note above regarding
* case-sensitivity.
*/
int compare(size_t pos, size_t count, const char* s, size_t len) const;
/**
* Lexicographically compares this registered string with a string.
*
* @note If `*this` is "un-cased" (i.e. case-insensitive), a case-insensitive compare is performed.
*
* @param s A string to compare against.
* @returns `0` if the strings are equal, `>0` if @p s is lexicographically ordered before `*this`, or `<0` if
* `*this` is lexicographically ordered before @p s. See note above regarding case-sensitivity.
*/
int compare(const std::string& s) const;
/**
* Lexicographically compares a substring of this registered string with a string.
*
* @note If `*this` is "un-cased" (i.e. case-insensitive), a case-insensitive compare is performed.
*
* @param pos The starting offset of the registered string represented by `*this`. Must less-than-or-equal-to the
* length of the registered string.
* @param count The length from @p pos to use in the comparison. This value is automatically clamped to the end of
* the registered string.
* @param s A string to compare against.
* @returns `0` if the strings are equal, `>0` if @p s is lexicographically ordered before the substring of `*this`,
* or `<0` if the substring of `*this` is lexicographically ordered before @p s. See note above regarding
* case-sensitivity.
*/
int compare(size_t pos, size_t count, const std::string& s) const;
};
} // namespace detail
class RString;
class RStringU;
class RStringKey;
class RStringUKey;
/**
* Carbonite registered strings.
*
* The Carbonite framework has a rich <a href="https://en.wikipedia.org/wiki/String_interning">string-interning</a>
* interface that is very easily used through the RString (and other) classes. This implements a <a
* href="https://en.wikipedia.org/wiki/Flyweight_pattern">Flyweight pattern</a> for strings. The registered string
* interface is fully @rstref{ABI-safe <abi-compatibility>} due to versioning, and can even be used in an application
* prior to the `main()`, `WinMain()` or `DllMain()` functions being called. Furthermore, the API is fully thread-safe.
*
* Registered strings have pre-computed hashes which make them ideal for identifiers and map keys, and string
* (in-)equality checks are O(1) constant time. For ordered containers, registered strings have an `owner_before()`
* function that can be used for stable (though not lexicographical) ordering. If lexicographical ordering is desired,
* O(n) `compare()` functions are provided.
*
* Variations exist around case-sensitivity. The RStringU class (the U stands for "un-cased" which is used in this API
* to denote case-insensitivity) is used to register a string that will compare in a case-insensitive manner. Although
* RString and RStringU cannot be directly compared for equality, RString::toUncased() exists to explicitly create a
* case-insensitive RStringU from an RString which can then be compared.
*
* Variations also exist around using registered strings as a key value. It can be useful to have an associated number
* to denote multiple instances of a registered string: hence the RStringKey and RStringUKey classes.
*
* To register a string, pass a string to the RString constructor RAII-style. Strings that are registered stay as such
* for the entire run of the application; strings are never unregistered. Registered strings are stored in a named
* section of shared memory accessible by all modules loaded by an application. The memory for registered strings is
* allocated directly from the operating system to avoid cross-DLL heap issues.
*
* @note Registered strings are a limited resource, but there exists slots for approximately two million strings.
*
* Variations:
* * RStringU - an "un-cased" (i.e. case-insensitive) registered string
* * RStringKey - Adds a numeric component to RString to create an identifier or key.
* * RStringUKey - Adds a numeric component to RStringU to create an identifier or key that is case-insensitive.
*/
class CARB_VIZ RString final : public detail::RStringTraits<false>
{
using Base = detail::RStringTraits<false>;
public:
/**
* Constant that indicates whether this is "un-cased" (i.e. case-insensitive) (will always be `false`).
*/
using Base::IsUncased;
/**
* Default constructor. isEmpty() will report `true`.
*/
constexpr RString() noexcept;
/**
* Initializes this registered string to one of the static pre-defined registered strings.
* @param staticString The pre-defined registered string to use.
*/
constexpr RString(eRString staticString) noexcept;
/**
* Finds or registers a new string.
* @param str The string to find or register.
* @param op The operation to perform. If directed to RStringOp::eFindExisting and the string has not been
* previously registered, `*this` is initialized as if with the default constructor.
*/
explicit RString(const char* str, RStringOp op = RStringOp::eRegister);
/**
* Finds or registers a new counted string.
* @note While generally not recommended, passing @p len allows the given string to contain embedded NUL ('\0')
* characters.
* @param str The string to find or register.
* @param len The number of characters of @p str to include.
* @param op The operation to perform. If directed to RStringOp::eFindExisting and the string has not been
* previously registered, `*this` is initialized as if with the default constructor.
*/
explicit RString(const char* str, size_t len, RStringOp op = RStringOp::eRegister);
/**
* Finds or registers a new `std::string`.
* @note If @p str contains embedded NUL ('\0') characters, the RString will contain the embedded NUL characters as
* well.
* @param str The `std::string` to find or register.
* @param op The operation to perform. If directed to RStringOp::eFindExisting and the string has not been
* previously registered, `*this` is initialized as if with the default constructor.
*/
explicit RString(const std::string& str, RStringOp op = RStringOp::eRegister);
/**
* Truncates RStringKey into only the registered string portion.
* @param other The RStringKey to truncate.
*/
explicit RString(const RStringKey& other) noexcept;
/**
* Converts this registered string into an "un-cased" (i.e. case-insensitive) registered string.
*
* @note The returned string may differ in case to `*this` when retrieved with c_str() or toString().
*
* @returns An "un-cased" (i.e. case-insensitive) string that matches `*this` when compared in a case-insensitive
* manner.
*/
RStringU toUncased() const noexcept;
/**
* Returns a copy of this registered string.
* @note This function exists for compatibility with the RStringKey interface.
* @returns `*this` since this string already has no number component.
*/
RString truncate() const noexcept;
/**
* Appends a number to the registered string to form a RStringKey.
*
* @param number An optional number to append (default = `0`).
* @returns An RStringKey based on `*this` and the provided number.
*/
RStringKey toRStringKey(int32_t number = 0) const;
/**
* Equality comparison between this registered string and another.
*
* @param other Another registered string.
* @returns `true` if `*this` and `other` represent the same registered string; `false` otherwise.
*/
bool operator==(const RString& other) const noexcept;
/**
* Inequality comparison between this registered string and another.
*
* @param other Another registered string.
* @returns `false` if `*this` and `other` represent the same registered string; `true` otherwise.
*/
bool operator!=(const RString& other) const noexcept;
/**
* Checks whether this registered string is stably (but not lexicographically) ordered before another registered
* string.
*
* This ordering is to make registered strings usable as keys in ordered associative containers in O(1) time.
*
* @note This is NOT a lexicographical comparison; for that use one of the compare() functions. To reduce ambiguity
* between a strict ordering and lexicographical comparison there is no `operator<` function for this string class.
* While a lexicographical comparison would be O(n), this comparison is O(1).
*
* @param other Another registered string.
* @returns `true` if `*this` should be ordered-before @p other; `false` otherwise.
*/
bool owner_before(const RString& other) const noexcept;
};
/**
* Case-insensitive registered string.
*
* The "U" stands for "un-cased".
*
* See RString for system-level information. This class differs from RString in that it performs case-insensitive
* operations.
*
* Since the desire is for equality comparisons to be speed-of-light (i.e. O(1) numeric comparisons), the first string
* registered insensitive to casing is chosen as an "un-cased authority" and if any strings registered through RStringU
* later match that string (in a case-insensitive manner), that authority string will be chosen instead. This also means
* that when RStringU is used to register a string and then that string is retrieved with RStringU::c_str(), the casing
* in the returned string might not match what was registered.
*/
class CARB_VIZ RStringU final : public detail::RStringTraits<true>
{
using Base = detail::RStringTraits<true>;
public:
/**
* Constant that indicates whether this is "un-cased" (i.e. case-insensitive) (will always be `true`).
*/
using Base::IsUncased;
/**
* Default constructor. isEmpty() will report `true`.
*/
constexpr RStringU() noexcept;
/**
* Initializes this registered string to one of the static pre-defined registered strings.
* @param staticString The pre-defined registered string to use.
*/
constexpr RStringU(eRString staticString) noexcept;
/**
* Finds or registers a new case-insensitive string.
*
* @note The casing of the string actually used may be different than @p str when reported by c_str() or toString().
*
* @param str The string to find or register.
* @param op The operation to perform. If directed to RStringOp::eFindExisting and the string has not been
* previously registered, `*this` is initialized as if with the default constructor.
*/
explicit RStringU(const char* str, RStringOp op = RStringOp::eRegister);
/**
* Finds or registers a new counted case-insensitive string.
* @note While generally not recommended, passing @p len allows the given string to contain embedded NUL ('\0')
* characters.
* @note The casing of the string actually used may be different than @p str when reported by c_str() or toString().
* @param str The string to find or register.
* @param len The number of characters of @p str to include.
* @param op The operation to perform. If directed to RStringOp::eFindExisting and the string has not been
* previously registered, `*this` is initialized as if with the default constructor.
*/
explicit RStringU(const char* str, size_t len, RStringOp op = RStringOp::eRegister);
/**
* Finds or registers a new case-insensitive `std::string`.
* @note If @p str contains embedded NUL ('\0') characters, the RString will contain the embedded NUL characters as
* well.
* @note The casing of the string actually used may be different than @p str when reported by c_str() or toString().
* @param str The `std::string` to find or register.
* @param op The operation to perform. If directed to RStringOp::eFindExisting and the string has not been
* previously registered, `*this` is initialized as if with the default constructor.
*/
explicit RStringU(const std::string& str, RStringOp op = RStringOp::eRegister);
/**
* Converts a registered string into an "un-cased" (i.e. case-insensitive) registered string.
* @param other The RString to convert.
*/
explicit RStringU(const RString& other);
/**
* Truncates RStringUKey into only the registered string portion.
* @param other The RStringUKey to truncate.
*/
explicit RStringU(const RStringUKey& other);
/**
* Returns a copy of this registered string.
* @note This function exists for compatibility with the RString interface.
* @returns `*this` since this string is already "un-cased" (i.e. case-insensitive).
*/
RStringU toUncased() const noexcept;
/**
* Returns a copy of this registered string.
* @note This function exists for compatibility with the RStringKey interface.
* @returns `*this` since this string already has no number component.
*/
RStringU truncate() const noexcept;
/**
* Appends a number to the registered string to form a RStringUKey.
*
* @param number An optional number to append (default = `0`).
* @returns An RStringUKey based on `*this` and the provided number.
*/
RStringUKey toRStringKey(int32_t number = 0) const;
/**
* Equality comparison between this registered string and another.
*
* @note A case-insensitive compare is performed.
*
* @param other Another registered string.
* @returns `true` if `*this` and `other` represent the same registered string; `false` otherwise.
*/
bool operator==(const RStringU& other) const noexcept;
/**
* Inequality comparison between this registered string and another.
*
* @note A case-insensitive compare is performed.
*
* @param other Another registered string.
* @returns `false` if `*this` and `other` represent the same registered string; `true` otherwise.
*/
bool operator!=(const RStringU& other) const noexcept;
/**
* Checks whether this registered string is stably (but not lexicographically) ordered before another registered
* string.
*
* This ordering is to make registered strings usable as keys in ordered associative containers in O(1) time.
*
* @note This is NOT a lexicographical comparison; for that use one of the compare() functions. To reduce ambiguity
* between a strict ordering and lexicographical comparison there is no `operator<` function for this string class.
* While a lexicographical comparison would be O(n), this comparison is O(1).
*
* @param other Another registered string.
* @returns `true` if `*this` should be ordered-before @p other; `false` otherwise.
*/
bool owner_before(const RStringU& other) const noexcept;
};
/**
* A registered string key.
*
* See \ref RString for high-level information about the registered string system.
*
* RStringKey is formed by appending a numeric component to a registered string. This numeric component can be used as a
* unique instance identifier alongside the registered string. Additionally, the RStringKey::toString() function will
* append a non-zero numeric component following an underscore.
*/
class CARB_VIZ RStringKey final : public detail::RStringTraits<false, detail::RStringKeyBase>
{
using Base = detail::RStringTraits<false, detail::RStringKeyBase>;
public:
/**
* Constant that indicates whether this is "un-cased" (i.e. case-insensitive) (will always be `false`).
*/
using Base::IsUncased;
/**
* Default constructor. isEmpty() will report `true` and getNumber() will return `0`.
*/
constexpr RStringKey() noexcept;
/**
* Initializes this registered string to one of the static pre-defined registered strings.
* @param staticString The pre-defined registered string to use.
* @param number The number that will be returned by getNumber().
*/
constexpr RStringKey(eRString staticString, int32_t number = 0) noexcept;
/**
* Finds or registers a new string.
* @param str The string to find or register.
* @param op The operation to perform. If directed to RStringOp::eFindExisting and the string has not been
* previously registered, `*this` is initialized as if with the default constructor.
*/
explicit RStringKey(const char* str, RStringOp op = RStringOp::eRegister);
/**
* Finds or registers a new string with a given number component.
* @param number The number that will be returned by getNumber().
* @param str The string to find or register.
* @param op The operation to perform. If directed to RStringOp::eFindExisting and the string has not been
* previously registered, `*this` is initialized as if with the default constructor.
*/
RStringKey(int32_t number, const char* str, RStringOp op = RStringOp::eRegister);
/**
* Finds or registers a new counted string.
* @note While generally not recommended, passing @p len allows the given string to contain embedded NUL ('\0')
* characters.
* @param str The string to find or register.
* @param len The number of characters of @p str to include.
* @param op The operation to perform. If directed to RStringOp::eFindExisting and the string has not been
* previously registered, `*this` is initialized as if with the default constructor.
*/
explicit RStringKey(const char* str, size_t len, RStringOp op = RStringOp::eRegister);
/**
* Finds or registers a new counted string with a given number component.
* @note While generally not recommended, passing @p len allows the given string to contain embedded NUL ('\0')
* characters.
* @param number The number that will be returned by getNumber().
* @param str The string to find or register.
* @param len The number of characters of @p str to include.
* @param op The operation to perform. If directed to RStringOp::eFindExisting and the string has not been
* previously registered, `*this` is initialized as if with the default constructor.
*/
explicit RStringKey(int32_t number, const char* str, size_t len, RStringOp op = RStringOp::eRegister);
/**
* Finds or registers a new `std::string`.
* @note If @p str contains embedded NUL ('\0') characters, the RString will contain the embedded NUL characters as
* well.
* @param str The `std::string` to find or register.
* @param op The operation to perform. If directed to RStringOp::eFindExisting and the string has not been
* previously registered, `*this` is initialized as if with the default constructor.
*/
explicit RStringKey(const std::string& str, RStringOp op = RStringOp::eRegister);
/**
* Finds or registers a new `std::string` with a number component.
* @note If @p str contains embedded NUL ('\0') characters, the RString will contain the embedded NUL characters as
* well.
* @param number The number that will be returned by getNumber().
* @param str The `std::string` to find or register.
* @param op The operation to perform. If directed to RStringOp::eFindExisting and the string has not been
* previously registered, `*this` is initialized as if with the default constructor.
*/
explicit RStringKey(int32_t number, const std::string& str, RStringOp op = RStringOp::eRegister);
/**
* Appends a number component to a registered string to form a key.
* @param str The registered string to decorate.
* @param number The number that will be returned by getNumber().
*/
RStringKey(const RString& str, int32_t number = 0);
/**
* Converts this registered string key into an "un-cased" (i.e. case-insensitive) registered string key.
*
* @note The returned string may differ in case to `*this` when retrieved with c_str() or toString().
*
* @returns An "un-cased" (i.e. case-insensitive) string that matches `*this` when compared in a case-insensitive
* manner. The returned registered string key will have the same number component as `*this`.
*/
RStringUKey toUncased() const noexcept;
/**
* Returns a registered string without the number component.
* @returns A registered string that matches `*this` without a number component.
*/
RString truncate() const noexcept;
/**
* Equality comparison between this registered string key and another.
*
* @param other Another registered string.
* @returns `true` if `*this` and `other` represent the same registered string and have matching number components;
* `false` otherwise.
*/
bool operator==(const RStringKey& other) const noexcept;
/**
* Inequality comparison between this registered string key and another.
*
* @param other Another registered string.
* @returns `false` if `*this` and `other` represent the same registered string and have matching number components;
* `true` otherwise.
*/
bool operator!=(const RStringKey& other) const noexcept;
/**
* Checks whether this registered string key is stably (but not lexicographically) ordered before another registered
* string. The number component is also compared and keys with a lower number component will be ordered before.
*
* This ordering is to make registered strings usable as keys in ordered associative containers in O(1) time.
*
* @note This is NOT a lexicographical comparison; for that use one of the compare() functions. To reduce ambiguity
* between a strict ordering and lexicographical comparison there is no `operator<` function for this string class.
* While a lexicographical comparison would be O(n), this comparison is O(1).
*
* @param other Another registered string.
* @returns `true` if `*this` should be ordered-before @p other; `false` otherwise.
*/
bool owner_before(const RStringKey& other) const noexcept;
#ifndef DOXYGEN_BUILD // Sphinx warns about Duplicate C++ declaration
/**
* Returns the hash value as by `carb::hashString(this->truncate().c_str())` combined with the number component.
*
* @note This value is computed once for a registered string and cached, so this operation is generally very fast.
*
* @returns The hash value as computed by `carb::hashString(this->truncate().c_str())`.
*/
size_t getHash() const;
/**
* Returns the hash value as by `carb::hashLowercaseString(this->truncate().c_str())` combined with the number
* component.
*
* @note This value is pre-computed for registered strings and cached, so this operation is always O(1).
*
* @returns The hash value as computed by `carb::hashLowercaseString(this->truncate().c_str())`.
*/
size_t getUncasedHash() const noexcept;
#endif
/**
* Returns a string containing the registered string, and if getNumber() is not zero, the number appended.
*
* Example: RStringKey(eRString::RS_carb, 1).toString() would produce "carb_1".
* @returns A string containing the registered string. If getNumber() is non-zero, an underscore and the number are
* appended.
*/
std::string toString() const;
/**
* Returns the number component of this key.
* @returns The number component previously specified in the constructor or with setNumber() or via number().
*/
int32_t getNumber() const noexcept;
/**
* Sets the number component of this key.
* @param num The new number component.
*/
void setNumber(int32_t num) noexcept;
/**
* Direct access to the number component for manipulation or atomic operations via `atomic_ref`.
* @returns A reference to the number component.
*/
int32_t& number() noexcept;
private:
// Hide these functions since they are incomplete
using Base::c_str;
using Base::data;
using Base::length;
};
/**
* A case-insensitive registered string key.
*
* See \ref RString for high-level information about the registered string system.
*
* RStringUKey is formed by appending a numeric component to an "un-cased" (i.e. case-insensitive) registered string.
* This numeric component can be used as a unique instance identifier alongside the registered string. Additionally, the
* RStringUKey::toString() function will append a non-zero numeric component following an underscore.
*/
class CARB_VIZ RStringUKey final : public detail::RStringTraits<true, detail::RStringKeyBase>
{
using Base = detail::RStringTraits<true, detail::RStringKeyBase>;
public:
/**
* Constant that indicates whether this is "un-cased" (i.e. case-insensitive) (will always be `true`).
*/
using Base::IsUncased;
/**
* Default constructor. isEmpty() will report `true` and getNumber() will return `0`.
*/
constexpr RStringUKey() noexcept;
/**
* Initializes this registered string to one of the static pre-defined registered strings.
* @param staticString The pre-defined registered string to use.
* @param number The number that will be returned by getNumber().
*/
constexpr RStringUKey(eRString staticString, int32_t number = 0) noexcept;
/**
* Finds or registers a new case-insensitive string.
*
* @note The casing of the string actually used may be different than @p str when reported by c_str() or toString().
*
* @param str The string to find or register.
* @param op The operation to perform. If directed to RStringOp::eFindExisting and the string has not been
* previously registered, `*this` is initialized as if with the default constructor.
*/
RStringUKey(const char* str, RStringOp op = RStringOp::eRegister);
/**
* Finds or registers a new string with a given number component.
* @param number The number that will be returned by getNumber().
* @param str The string to find or register.
* @param op The operation to perform. If directed to RStringOp::eFindExisting and the string has not been
* previously registered, `*this` is initialized as if with the default constructor.
*/
RStringUKey(int32_t number, const char* str, RStringOp op = RStringOp::eRegister);
/**
* Finds or registers a new counted case-insensitive string.
* @note While generally not recommended, passing @p len allows the given string to contain embedded NUL ('\0')
* characters.
* @note The casing of the string actually used may be different than @p str when reported by c_str() or toString().
* @param str The string to find or register.
* @param len The number of characters of @p str to include.
* @param op The operation to perform. If directed to RStringOp::eFindExisting and the string has not been
* previously registered, `*this` is initialized as if with the default constructor.
*/
explicit RStringUKey(const char* str, size_t len, RStringOp op = RStringOp::eRegister);
/**
* Finds or registers a new counted case-insensitive string with a given number component.
* @note While generally not recommended, passing @p len allows the given string to contain embedded NUL ('\0')
* characters.
* @note The casing of the string actually used may be different than @p str when reported by c_str() or toString().
* @param number The number that will be returned by getNumber().
* @param str The string to find or register.
* @param len The number of characters of @p str to include.
* @param op The operation to perform. If directed to RStringOp::eFindExisting and the string has not been
* previously registered, `*this` is initialized as if with the default constructor.
*/
explicit RStringUKey(int32_t number, const char* str, size_t len, RStringOp op = RStringOp::eRegister);
/**
* Finds or registers a new case-insensitive `std::string`.
* @note If @p str contains embedded NUL ('\0') characters, the RString will contain the embedded NUL characters as
* well.
* @note The casing of the string actually used may be different than @p str when reported by c_str() or toString().
* @param str The `std::string` to find or register.
* @param op The operation to perform. If directed to RStringOp::eFindExisting and the string has not been
* previously registered, `*this` is initialized as if with the default constructor.
*/
explicit RStringUKey(const std::string& str, RStringOp op = RStringOp::eRegister);
/**
* Finds or registers a new case-insensitive `std::string` with a number component.
* @note If @p str contains embedded NUL ('\0') characters, the RString will contain the embedded NUL characters as
* well.
* @note The casing of the string actually used may be different than @p str when reported by c_str() or toString().
* @param number The number that will be returned by getNumber().
* @param str The `std::string` to find or register.
* @param op The operation to perform. If directed to RStringOp::eFindExisting and the string has not been
* previously registered, `*this` is initialized as if with the default constructor.
*/
explicit RStringUKey(int32_t number, const std::string& str, RStringOp op = RStringOp::eRegister);
/**
* Appends a number component to a registered string to form a key.
* @param str The registered string to decorate.
* @param number The number that will be returned by getNumber().
*/
RStringUKey(const RStringU& str, int32_t number = 0);
/**
* Converts a registered string key into an "un-cased" (i.e. case-insensitive) registered string key.
* @param other The RStringKey to convert. The number component is maintained.
*/
explicit RStringUKey(const RStringKey& other);
/**
* Returns a copy of this registered string key.
* @note This function exists for compatibility with the RStringKey interface.
* @returns `*this` since this string is already "un-cased" (i.e. case-insensitive). The number component will be
* the same as the number for `*this`.
*/
RStringUKey toUncased() const noexcept;
/**
* Returns a registered string without the number component.
* @returns A registered string that matches `*this` without a number component.
*/
RStringU truncate() const noexcept;
/**
* Equality comparison between this registered string key and another.
*
* @note A case-insensitive compare is performed.
*
* @param other Another registered string.
* @returns `true` if `*this` and `other` represent the same registered string and have matching number components;
* `false` otherwise.
*/
bool operator==(const RStringUKey& other) const noexcept;
/**
* Inequality comparison between this registered string key and another.
*
* @note A case-insensitive compare is performed.
*
* @param other Another registered string.
* @returns `false` if `*this` and `other` represent the same registered string and have matching number components;
* `true` otherwise.
*/
bool operator!=(const RStringUKey& other) const noexcept;
/**
* Checks whether this registered string key is stably (but not lexicographically) ordered before another registered
* string. The number component is also compared and keys with a lower number component will be ordered before.
*
* This ordering is to make registered strings usable as keys in ordered associative containers in O(1) time.
*
* @note This is NOT a lexicographical comparison; for that use one of the compare() functions. To reduce ambiguity
* between a strict ordering and lexicographical comparison there is no `operator<` function for this string class.
* While a lexicographical comparison would be O(n), this comparison is O(1).
*
* @param other Another registered string.
* @returns `true` if `*this` should be ordered-before @p other; `false` otherwise.
*/
bool owner_before(const RStringUKey& other) const noexcept;
#ifndef DOXYGEN_BUILD // Sphinx warns about Duplicate C++ declaration
/**
* Returns the hash value as by `carb::hashString(this->truncate().c_str())` combined with the number component.
*
* @note This value is computed once for a registered string and cached, so this operation is generally very fast.
*
* @returns The hash value as computed by `carb::hashString(this->truncate().c_str())`.
*/
size_t getHash() const;
/**
* Returns the hash value as by `carb::hashLowercaseString(this->truncate().c_str())` combined with the number
* component.
*
* @note This value is pre-computed for registered strings and cached, so this operation is always O(1).
*
* @returns The hash value as computed by `carb::hashLowercaseString(this->truncate().c_str())`.
*/
size_t getUncasedHash() const noexcept;
#endif
/**
* Returns a string containing the registered string, and if getNumber() is not zero, the number appended.
*
* Example: RStringUKey(eRString::RS_carb, 1).toString() would produce "carb_1".
* @returns A string containing the registered string. If getNumber() is non-zero, an underscore and the number are
* appended.
*/
std::string toString() const;
/**
* Returns the number component of this key.
* @returns The number component previously specified in the constructor or with setNumber() or via number().
*/
int32_t getNumber() const noexcept;
/**
* Sets the number component of this key.
* @param num The new number component.
*/
void setNumber(int32_t num) noexcept;
/**
* Direct access to the number component for manipulation or atomic operations via `atomic_ref`.
* @returns A reference to the number component.
*/
int32_t& number() noexcept;
private:
// Hide these functions since they are incomplete
using Base::c_str;
using Base::data;
using Base::length;
};
// Can use ADL specialization for global operator<< for stream output
/**
* Global stream output operator for RString.
* @param o The output stream to write to.
* @param s The registered string to output.
* @returns The output stream, @p o.
*/
template <class CharT, class Traits>
::std::basic_ostream<CharT, Traits>& operator<<(::std::basic_ostream<CharT, Traits>& o, const RString& s)
{
o << s.c_str();
return o;
}
/**
* Global stream output operator for RStringU.
* @param o The output stream to write to.
* @param s The registered string to output.
* @returns The output stream, @p o.
*/
template <class CharT, class Traits>
::std::basic_ostream<CharT, Traits>& operator<<(::std::basic_ostream<CharT, Traits>& o, const RStringU& s)
{
o << s.c_str();
return o;
}
/**
* Global stream output operator for RStringKey.
* @param o The output stream to write to.
* @param s The registered string to output.
* @returns The output stream, @p o.
*/
template <class CharT, class Traits>
::std::basic_ostream<CharT, Traits>& operator<<(::std::basic_ostream<CharT, Traits>& o, const RStringKey& s)
{
o << s.toString();
return o;
}
/**
* Global stream output operator for RStringUKey.
* @param o The output stream to write to.
* @param s The registered string to output.
* @returns The output stream, @p o.
*/
template <class CharT, class Traits>
::std::basic_ostream<CharT, Traits>& operator<<(::std::basic_ostream<CharT, Traits>& o, const RStringUKey& s)
{
o << s.toString();
return o;
}
} // namespace carb
// Specializations for std::hash and std::owner_less per type
/**
* RString specialization for `std::hash`.
*/
template <>
struct std::hash<::carb::RString>
{
/**
* Returns the hash
* @param v The registered string.
* @returns The hash as via the getHash() function.
*/
size_t operator()(const ::carb::RString& v) const
{
return v.getHash();
}
};
/**
* RString specialization for `std::owner_less`.
*/
template <>
struct std::owner_less<::carb::RString>
{
/**
* Returns true if @p lhs should be ordered-before @p rhs.
* @param lhs A registered string.
* @param rhs A registered string.
* @returns `true` if @p lhs should be ordered-before @p rhs; `false` otherwise.
*/
bool operator()(const ::carb::RString& lhs, const ::carb::RString& rhs) const
{
return lhs.owner_before(rhs);
}
};
/**
* RStringU specialization for `std::hash`.
*/
template <>
struct std::hash<::carb::RStringU>
{
/**
* Returns the hash
* @param v The registered string.
* @returns The hash as via the getHash() function.
*/
size_t operator()(const ::carb::RStringU& v) const
{
return v.getHash();
}
};
/**
* RStringU specialization for `std::owner_less`.
*/
template <>
struct std::owner_less<::carb::RStringU>
{
/**
* Returns true if @p lhs should be ordered-before @p rhs.
* @param lhs A registered string.
* @param rhs A registered string.
* @returns `true` if @p lhs should be ordered-before @p rhs; `false` otherwise.
*/
bool operator()(const ::carb::RStringU& lhs, const ::carb::RStringU& rhs) const
{
return lhs.owner_before(rhs);
}
};
/**
* RStringKey specialization for `std::hash`.
*/
template <>
struct std::hash<::carb::RStringKey>
{
/**
* Returns the hash
* @param v The registered string.
* @returns The hash as via the getHash() function.
*/
size_t operator()(const ::carb::RStringKey& v) const
{
return v.getHash();
}
};
/**
* RStringKey specialization for `std::owner_less`.
*/
template <>
struct std::owner_less<::carb::RStringKey>
{
/**
* Returns true if @p lhs should be ordered-before @p rhs.
* @param lhs A registered string.
* @param rhs A registered string.
* @returns `true` if @p lhs should be ordered-before @p rhs; `false` otherwise.
*/
bool operator()(const ::carb::RStringKey& lhs, const ::carb::RStringKey& rhs) const
{
return lhs.owner_before(rhs);
}
};
/**
* RStringUKey specialization for `std::hash`.
*/
template <>
struct std::hash<::carb::RStringUKey>
{
/**
* Returns the hash
* @param v The registered string.
* @returns The hash as via the getHash() function.
*/
size_t operator()(const ::carb::RStringUKey& v) const
{
return v.getHash();
}
};
/**
* RStringUKey specialization for `std::owner_less`.
*/
template <>
struct std::owner_less<::carb::RStringUKey>
{
/**
* Returns true if @p lhs should be ordered-before @p rhs.
* @param lhs A registered string.
* @param rhs A registered string.
* @returns `true` if @p lhs should be ordered-before @p rhs; `false` otherwise.
*/
bool operator()(const ::carb::RStringUKey& lhs, const ::carb::RStringUKey& rhs) const
{
return lhs.owner_before(rhs);
}
};
#include "RString.inl"
| 49,729 |
C
| 39.762295 | 120 | 0.683102 |
omniverse-code/kit/include/carb/Memory.h
|
// Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//!
//! @brief DLL Boundary safe memory management functions
#pragma once
#include "Defines.h"
#include "Types.h"
#include "cpp/Bit.h"
#include "detail/DeferredLoad.h"
//! Internal function used by all other allocation functions.
//!
//! This function is the entry point into `carb.dll`/`libcarb.so` for @ref carb::allocate(), @ref carb::deallocate(),
//! and @ref carb::reallocate(). There are four modes to this function:
//! - If @p p is `nullptr` and @p size is `0`, no action is taken and `nullptr` is returned.
//! - If @p p is not `nullptr` and @p size is `0`, the given pointer is deallocated and `nullptr` is returned.
//! - If @p p is `nullptr` and @p size is non-zero, memory of the requested @p size and alignment specified by @p align
//! is allocated and returned. If an allocation error occurs, `nullptr` is returned.
//! - If @p p is not `nullptr` and @p size is non-zero, the memory is reallocated and copied (as if by `std::memcpy`) to
//! the new memory block, which is returned. If @p p can be resized in situ, the same @p p value is returned. If an
//! error occurs, `nullptr` is returned.
//!
//! @note Using this function requires explicitly linking with `carb.dll`/`libcarb.so` if @ref CARB_REQUIRE_LINKED is
//! `1`. Otherwise, the caller must ensure that `carb.dll`/`libcarb.so` is already loaded before calling this function.
//! Use in situations where the Carbonite Framework is already loaded (i.e. plugins) does not require explicitly linking
//! against Carbonite as this function will be found dynamically at runtime.
//!
//! @warning Do not call this function directly. Instead call @ref carb::allocate(), @ref carb::deallocate(), or
//! @ref carb::reallocate()
//!
//! @see carb::allocate() carb::reallocate() carb::deallocate()
//! @param p The pointer to re-allocate or free. May be `nullptr`. See explanation above.
//! @param size The requested size of the memory region in bytes. See explanation above.
//! @param align The requested alignment of the memory region in bytes. Must be a power of two. See explanation above.
//! @returns Allocated memory, or `nullptr` upon deallocation, or `nullptr` on allocation when an error occurs.
#if CARB_REQUIRE_LINKED
CARB_DYNAMICLINK void* carbReallocate(void* p, size_t size, size_t align);
#else
CARB_DYNAMICLINK void* carbReallocate(void* p, size_t size, size_t align) CARB_ATTRIBUTE(weak);
#endif
namespace carb
{
//! \cond DEV
namespace detail
{
CARB_DETAIL_DEFINE_DEFERRED_LOAD(getCarbReallocate, carbReallocate, (void* (*)(void*, size_t, size_t)));
} // namespace detail
//! \endcond
//! Allocates a block of memory.
//!
//! @note Any plugin (or the executable) may @ref allocate the memory and a different plugin (or the executable) may
//! @ref deallocate or @ref reallocate it.
//!
//! @note If carb.dll/libcarb.so is not loaded, this function will always return `nullptr`.
//!
//! @param size The size of the memory block requested, in bytes. Specifying '0' will return a valid pointer that
//! can be passed to @ref deallocate but cannot be used to store any information.
//! @param align The minimum alignment (in bytes) of the memory block requested. Must be a power of two. Values less
//! than `sizeof(size_t)` are ignored. `0` indicates to use default system alignment (typically
//! `2 * sizeof(void*)`).
//! @returns A non-`nullptr` memory block of @p size bytes with minimum alignment @p align. If an error occurred,
//! or memory could not be allocated, `nullptr` is returned. The memory is not initialized.
inline void* allocate(size_t size, size_t align = 0) noexcept
{
if (auto impl = detail::getCarbReallocate())
return impl(nullptr, size, align);
else
return nullptr;
}
//! Deallocates a block of memory previously allocated with @ref allocate().
//!
//! @note Any plugin (or the executable) may @ref allocate the memory and a different plugin (or the executable) may
//! @ref deallocate or @ref reallocate it.
//!
//! @note If carb.dll/libcarb.so is not loaded, this function will silently do nothing. Since @ref allocate would have
//! returned `nullptr` in this case, this function should never be called.
//!
//! @param p The block of memory previously returned from @ref allocate() or @ref reallocate(), or `nullptr`.
inline void deallocate(void* p) noexcept
{
if (p)
{
if (auto impl = detail::getCarbReallocate())
impl(p, 0, 0);
}
}
//! Reallocates a block of memory previously allocated with @ref allocate().
//!
//! This function changes the size of the memory block pointed to by @p p to @p size bytes with @p align alignment.
//! The contents are unchanged from the start of the memory block up to the minimum of the old size and @p size. If
//! @p size is larger than the old size, the added memory is not initialized. If @p p is `nullptr`, the call is
//! equivalent to `allocate(size, align)`; if @p size is `0` and @p p is not `nullptr`, the call is equivalent to
//! `deallocate(p)`. Unless @p p is `nullptr`, it must have been retrieved by an earlier call to @ref allocate() or
//! @ref reallocate(). If the memory region was moved in order to resize it, @p p will be freed as with `deallocate(p)`.
//!
//! @note Any plugin (or the executable) may @ref allocate the memory and a different plugin (or the executable) may
//! @ref deallocate or @ref reallocate it.
//!
//! @note If carb.dll/libcarb.so is not loaded, this function will always return @p p without side-effects.
//!
//! @param p The block of memory previously returned from @ref allocate() or @ref reallocate() if resizing is
//! resizing is desired. If `nullptr` is passed as this parameter, the call behaves as if
//! `allocate(size, align)` was called.
//! @param size The size of the memory block requested, in bytes. See above for further explanation.
//! @param align The minimum alignment (in bytes) of the memory block requested. Must be a power of two. Values less
//! than `sizeof(size_t)` are ignored. Changing the alignment from a previous allocation is undefined behavior.
//! `0` indicates to use default system alignment (typically `2 * sizeof(void*)`).
//! @returns A pointer to a block of memory of @p size bytes with minimum alignment @p align, unless an error
//! occurs in which case `nullptr` is returned. If @p p is `nullptr` and @p size is `0` then `nullptr` is also
//! returned.
inline void* reallocate(void* p, size_t size, size_t align = 0) noexcept
{
if (auto impl = detail::getCarbReallocate())
return impl(p, size, align);
else
return p;
}
/**
* A class implementing the 'Allocator' C++ Named Requirement.
*
* This class is usable for C++ classes that require an allocator, such as `std::vector`.
* @note This class requires dynamic or static linking to carb.dll/libcarb.so/libcarb.dylib in order to function.
* @tparam T The type to allocate
* @tparam Align The requested alignment. Must be zero or a power of two. Zero indicates to use `T`'s required
* alignment.
*/
template <class T, size_t Align = 0>
class Allocator
{
public:
using pointer = T*; //!< pointer
using const_pointer = const T*; //!< const_pointer
using reference = T&; //!< reference
using const_reference = const T&; //!< const_reference
using void_pointer = void*; //!< void_pointer
using const_void_pointer = const void*; //!< const_void_pointer
using value_type = T; //!< value_type
using size_type = std::size_t; //!< size_type
using difference_type = std::ptrdiff_t; //!< difference_type
static_assert(!Align || ::carb::cpp::has_single_bit(Align), "Must be a power of two");
constexpr static size_t alignment = Align; //!< Alignment (non-standard)
//! A struct that allows determining an allocator for class `U` through the `other` type.
template <class U>
struct rebind
{
//! The type of `Allocator<U>`
using other = Allocator<U, alignment>;
};
//! Constructor
constexpr Allocator() noexcept = default;
//! Copy constructor
constexpr Allocator(const Allocator&) noexcept = default;
//! Copy-assign operator
constexpr Allocator& operator=(const Allocator&) noexcept = default;
//! Copy constructor
template <class U, size_t UAlign>
constexpr Allocator(const Allocator<U, UAlign>& other) noexcept
{
CARB_UNUSED(other);
}
//! Copy-assign operator
template <class U, size_t UAlign>
constexpr Allocator& operator=(const Allocator<U, UAlign>& other) noexcept
{
CARB_UNUSED(other);
return *this;
}
//! Destructor
~Allocator() = default;
//! Equality operator
constexpr bool operator==(const Allocator& other) const noexcept
{
CARB_UNUSED(other);
return true;
}
//! Inequality operator
constexpr bool operator!=(const Allocator& other) const noexcept
{
CARB_UNUSED(other);
return false;
}
/**
* Allocates suitable storage for an array object of type `T[n]` and creates the array, but does not construct array
* elements.
*
* If \ref alignment is suitable (that is, not less than the required alignment of `T`) it is used, otherwise the
* required alignment of `T` is used.
* @param n The number of elements of `T` to allocate space for.
* @returns A pointer to memory that can contain an array of type `T[n]`, but no array elements have been
* constructed.
*/
pointer allocate(size_type n = 1) noexcept /*strengthened*/
{
auto align = ::carb_max(+alignment, std::alignment_of<T>::value);
return pointer(::carb::allocate(sizeof(T) * n, align));
}
/**
* Same as \ref allocate(size_type) but may use \p p (`nullptr` or a pointer obtained from \ref allocate()) to aid
* locality.
* @param n The number of elements of `T` to allocate space for.
* @param p May be `nullptr` or a pointer obtained from \ref allocate(). If non-`nullptr`, \p p is returned.
* @returns A pointer to memory that can contain an array of type `T[n]`, but no array elements have been
* constructed.
*/
pointer allocate(size_type n, const_void_pointer p) noexcept /*strengthened*/
{
return p ? pointer(p) : allocate(n);
}
/**
* Deallocates storage pointed to by `p`, which must be a value returned by a previous call to \ref allocate() that
* has not been invalidated by an intervening call to `deallocate`.
* @param p A value returned by a previous call to \ref allocate() and not previously passed to `deallocate`.
* @param n Must be the same size value that was originally passed to \ref allocate().
*/
void deallocate(pointer p, size_type n) noexcept /*strengthened*/
{
CARB_UNUSED(n);
::carb::deallocate(p);
}
/**
* Returns the largest value that can be passed to \ref allocate().
* @returns the largest value that can be passed to \ref allocate().
*/
size_type max_size() const noexcept
{
return size_type(-1);
}
/**
* Constructs an object of type `X` in previously-allocated storage at the address pointed to by `p`, using `args`
* as the constructor arguments.
* @param p The pointer at which to construct.
* @param args The constructor arguments.
*/
template <class X, class... Args>
void construct(X* const p, Args&&... args)
{
::new (const_cast<void*>(static_cast<const volatile void*>(p))) X(std::forward<Args>(args)...);
}
/**
* Destructs an object of type `X` pointed to by `p` but does not deallocate any storage.
* @param p The pointer to an object of type `X` to destroy.
*/
template <class X>
void destroy(X* const p)
{
p->~X();
}
};
/**
* An object can inherit from this class in order to use Carbonite allocation functions for creation/deletion.
*/
template <size_t Align = 0>
class UseCarbAllocatorAligned
{
public:
//! The alignment amount used by this allocator
constexpr static size_t alignment = Align;
//! \cond DEV
void* operator new(std::size_t count)
{
return carb::allocate(count, alignment);
}
void* operator new[](std::size_t count)
{
return carb::allocate(count, alignment);
}
void operator delete(void* ptr)
{
carb::deallocate(ptr);
}
void operator delete[](void* ptr)
{
carb::deallocate(ptr);
}
#if CARB_HAS_CPP17
void* operator new(std::size_t count, std::align_val_t al)
{
return carb::allocate(count, ::carb_max(alignment, size_t(al)));
}
void* operator new[](std::size_t count, std::align_val_t al)
{
return carb::allocate(count, ::carb_max(alignment, size_t(al)));
}
void operator delete(void* ptr, std::align_val_t al)
{
CARB_UNUSED(al);
carb::deallocate(ptr);
}
void operator delete[](void* ptr, std::align_val_t al)
{
CARB_UNUSED(al);
carb::deallocate(ptr);
}
#endif
//! \endcond
};
/** Allocated object deleter helper class. This is suitable for use in various STL container
* classes that accept a functor responsible for deleting an object that was allocated using
* an allocation system other than new/delete. This particular implementation ensures the
* object is destructed before deallocating its memory.
*/
template <class T>
class Deleter
{
public:
/** Functor operator to destruct and deallocate an object that was allocated and constructed
* using one of the carb::allocate() family of functions.
*
* @tparam T The data type of the object to delete.
* @param[in] p The object to be destroyed.
*/
void operator()(T* p) noexcept
{
p->~T();
carb::deallocate(p);
}
};
/**
* An object can inherit from this class in order to use Carbonite allocation functions for creation/deletion.
*/
using UseCarbAllocator = UseCarbAllocatorAligned<>;
} // namespace carb
| 14,513 |
C
| 39.093923 | 120 | 0.673741 |
omniverse-code/kit/include/carb/ClientUtils.h
|
// Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! \file
//! \brief Utilities for Carbonite clients
#pragma once
#include "Framework.h"
#include "assert/AssertUtils.h"
#include "crashreporter/CrashReporterUtils.h"
#include "l10n/L10nUtils.h"
#include "logging/Log.h"
#include "logging/StandardLogger.h"
#include "profiler/Profile.h"
#include "../omni/core/Omni.h"
#include <vector>
namespace carb
{
#ifndef DOXYGEN_SHOULD_SKIP_THIS
namespace detail
{
inline void registerBuiltinFileSystem(Framework* f)
{
f->registerPlugin(g_carbClientName, f->getBuiltinFileSystemDesc());
}
inline void registerBuiltinLogging(Framework* f)
{
f->registerPlugin(g_carbClientName, f->getBuiltinLoggingDesc());
}
inline void registerBuiltinAssert(Framework* f)
{
f->registerPlugin(g_carbClientName, f->getBuiltinAssertDesc());
}
inline void registerBuiltinThreadUtil(Framework* f)
{
f->registerPlugin(g_carbClientName, f->getBuiltinThreadUtilDesc());
}
inline void registerAtexitHandler()
{
# if CARB_PLATFORM_WINDOWS && !defined _DLL
// Since we're not using the dynamic runtime, we need to notify carb.dll if the executable's atexit() functions run.
// We only do this if this is compiled into the executable here, so check that
auto exeHandle = GetModuleHandleW(NULL);
HMODULE myHandle;
if (GetModuleHandleExW(
CARBWIN_GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | CARBWIN_GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
(LPCWSTR)®isterAtexitHandler, &myHandle) &&
myHandle == exeHandle)
{
// Verified that this function is compiled into the executable and without dynamic runtime.
auto carbHandle = GetModuleHandleW(L"carb.dll");
auto proc = (void (*)(void*))(carbHandle ? GetProcAddress(carbHandle, "carbControlAtexit") : nullptr);
if (proc)
{
// Call our undocumented function and pass our atexit() function so that carb.dll can register a callback
// to know when the first-chance (executable) atexit happens.
proc((void*)&atexit);
}
}
# endif
}
} // namespace detail
#endif
/**
* Main acquisition of the Carbonite Framework for Clients (applications and plugins).
*
* \warning It is typically not necessary to call this, since macros such as \ref OMNI_CORE_INIT already ensure that
* this function is called properly.
*
* At a high level, this function:
*
* - Calls \ref carb::acquireFramework() and assigns it to a global variable within this module: \ref g_carbFramework.
* - Calls \ref logging::registerLoggingForClient(), \ref assert::registerAssertForClient(), and
* \ref l10n::registerLocalizationForClient().
* - Calls \ref OMNI_CORE_START().
*
* @param args Arguments passed to \ref OMNI_CORE_START
* @returns A pointer to the Carbonite Framework, if initialization was successful; `nullptr` otherwise.
*/
inline Framework* acquireFrameworkAndRegisterBuiltins(const OmniCoreStartArgs* args = nullptr)
{
// Acquire framework and set into global variable
Framework* framework = acquireFramework(g_carbClientName);
if (framework)
{
g_carbFramework = framework;
static_assert(
kFrameworkVersion.major == 0,
"The framework automatically registers builtins now; the registerXXX functions can be removed once the framework version changes.");
detail::registerAtexitHandler();
// Starting up logging
detail::registerBuiltinLogging(framework);
logging::registerLoggingForClient();
// Starting up filesystem
detail::registerBuiltinFileSystem(framework);
detail::registerBuiltinAssert(framework);
detail::registerBuiltinThreadUtil(framework);
// grab the assertion helper interface.
assert::registerAssertForClient();
// grab the l10n interface.
l10n::registerLocalizationForClient();
// start up ONI
OMNI_CORE_START(args);
}
return framework;
}
/**
* This function releases the Carbonite Framework.
*
* The options performed are essentially the teardown operations for \ref acquireFrameworkAndRegisterBuiltins().
*
* At a high-level, this function:
* - Calls \ref logging::deregisterLoggingForClient(), \ref assert::deregisterAssertForClient(), and
* \ref l10n::deregisterLocalizationForClient().
* - Calls \ref omniReleaseStructuredLog().
* - Unloads all Carbonite plugins
* - Calls \ref OMNI_CORE_STOP
* - Calls \ref releaseFramework()
* - Sets \ref g_carbFramework to `nullptr`.
*
* \note It is not necessary to manually call this function if \ref OMNI_CORE_INIT is used, since that macro will ensure
* that the Framework is released.
*/
inline void releaseFrameworkAndDeregisterBuiltins()
{
if (isFrameworkValid())
{
logging::deregisterLoggingForClient();
assert::deregisterAssertForClient();
l10n::deregisterLocalizationForClient();
// Release structured log before unloading plugins
omniReleaseStructuredLog();
g_carbFramework->unloadAllPlugins();
OMNI_CORE_STOP();
releaseFramework();
}
g_carbFramework = nullptr;
}
} // namespace carb
/**
* Defines global variables of the framework and built-in plugins.
*
* \note Either this macro, or \ref CARB_GLOBALS_EX or \ref OMNI_APP_GLOBALS must be specified in the global namespace
* in exactly one compilation unit for a Carbonite Application.
*
* @param clientName The name of the client application. Must be unique with respect to any plugins loaded. Also is the
* name of the default log channel.
*/
#define CARB_GLOBALS(clientName) CARB_GLOBALS_EX(clientName, nullptr)
/**
* Defines global variables of the framework and built-in plugins.
*
* \note Either this macro, or \ref CARB_GLOBALS or \ref OMNI_APP_GLOBALS must be specified in the global namespace in
* exactly one compilation unit for a Carbonite Application.
*
* @param clientName The name of the client application. Must be unique with respect to any plugins loaded. Also is the
* name of the default log channel.
* @param clientDescription A description to use for the default log channel.
*/
#define CARB_GLOBALS_EX(clientName, clientDescription) \
CARB_FRAMEWORK_GLOBALS(clientName) \
CARB_LOG_GLOBALS() \
CARB_PROFILER_GLOBALS() \
CARB_ASSERT_GLOBALS() \
CARB_LOCALIZATION_GLOBALS() \
CARB_CRASH_REPORTER_GLOBALS() \
OMNI_GLOBALS_ADD_DEFAULT_CHANNEL(clientName, clientDescription)
| 7,507 |
C
| 37.701031 | 144 | 0.660583 |
omniverse-code/kit/include/carb/Strong.h
|
// Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "Defines.h"
#include "cpp/TypeTraits.h"
#include <typeindex> // for std::hash
#include <ostream> // for std::basic_ostream
/**
* Implements a strong type. `typedef` and `using` declarations do not declare a new type. `typedef int MyType` uses the
* name `MyType` to refer to int; MyType and int are therefore interchangeable.
*
* CARB_STRONGTYPE(MyType, int) differs in that it creates an int-like structure named MyType which is type-safe. MyType
* can be compared to `int` values, but cannot be implicitly assigned an int.
*/
#define CARB_STRONGTYPE(Name, T) using Name = ::carb::Strong<T, struct Name##Sig>
namespace carb
{
// clang-format off
template<class T, class Sig> class Strong final
{
private:
T val;
public:
using Type = T;
constexpr Strong() : val{} {}
constexpr explicit Strong(T&& val_) : val(std::forward<T>(val_)) {}
constexpr Strong(const Strong& rhs) = default;
Strong& operator=(const Strong& rhs) = default;
constexpr Strong(Strong&& rhs) = default;
Strong& operator=(Strong&& rhs) = default;
const T& get() const { return val; }
T& get() { return val; }
/// Ensure that the underlying type matches expected; recommended for printf
template <class U>
U ensure() const { static_assert(std::is_same<T, U>::value, "Types are not the same"); return val; }
explicit operator bool () const { return !!val; }
bool operator == (const Strong& rhs) const { return val == rhs.val; }
bool operator == (const T& rhs) const { return val == rhs; }
bool operator != (const Strong& rhs) const { return val != rhs.val; }
bool operator != (const T& rhs) const { return val != rhs; }
bool operator < (const Strong& rhs) const { return val < rhs.val; }
void swap(Strong& rhs) noexcept(noexcept(std::swap(val, rhs.val))) { std::swap(val, rhs.val); }
};
// clang-format on
template <class CharT, class Traits, class T, class Sig>
::std::basic_ostream<CharT, Traits>& operator<<(::std::basic_ostream<CharT, Traits>& o, const Strong<T, Sig>& s)
{
o << s.get();
return o;
}
// Swap can be specialized with ADL
template <class T, class Sig, typename = std::enable_if_t<carb::cpp::is_swappable<T>::value, bool>>
void swap(Strong<T, Sig>& lhs, Strong<T, Sig>& rhs) noexcept(noexcept(lhs.swap(rhs)))
{
lhs.swap(rhs);
}
} // namespace carb
// Specialization for std::hash
template <class T, class Sig>
struct std::hash<::carb::Strong<T, Sig>>
{
size_t operator()(const ::carb::Strong<T, Sig>& v) const
{
return ::std::hash<T>{}(v.get());
}
};
| 3,024 |
C
| 35.011904 | 120 | 0.677579 |
omniverse-code/kit/include/carb/ObjectUtils.h
|
// Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//!
//! @brief Helper utilities for Carbonite objects (carb::IObject).
#pragma once
#include "IObject.h"
#include <atomic>
namespace carb
{
/**
* Default handler for carb::IObject reaching zero references, which calls `delete`. Can be specialized for specific
* types.
* @param ptr The object to delete.
*/
template <class T>
void deleteHandler(T* ptr)
{
delete ptr;
}
} // namespace carb
/**
* Helper macro to implement default behavior of carb::IObject interface functions IObject::addRef() and
* IObject::release().
*
* Example usage:
* @code
* class Foo : public IObject
* {
* CARB_IOBJECT_IMPL
*
* public:
* ...
* };
* @endcode
*/
#define CARB_IOBJECT_IMPL \
public: \
/** \
* Atomically adds one to the reference count. \
* @returns The current reference count after one was added, though this value may change before read if other \
* threads are also modifying the reference count. The return value is guaranteed to be non-zero. \
*/ \
size_t addRef() override \
{ \
size_t prev = m_refCount.fetch_add(1, std::memory_order_relaxed); \
CARB_ASSERT(prev != 0); /* resurrected item if this occurs */ \
return prev + 1; \
} \
\
/** \
* Atomically subtracts one from the reference count. If the result is zero, carb::deleteHandler() is called for \
* `this`. \
* @returns The current reference count after one was subtracted. If zero is returned, carb::deleteHandler() was \
* called for `this`. \
*/ \
size_t release() override \
{ \
size_t prev = m_refCount.fetch_sub(1, std::memory_order_release); \
CARB_ASSERT(prev != 0); /* double release if this occurs */ \
if (prev == 1) \
{ \
std::atomic_thread_fence(std::memory_order_acquire); \
carb::deleteHandler(this); \
} \
return prev - 1; \
} \
\
private: \
std::atomic_size_t m_refCount{ 1 };
| 5,159 |
C
| 58.999999 | 120 | 0.301997 |
omniverse-code/kit/include/carb/Framework.h
|
// Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//!
//! @brief Core header for registering and acquiring interfaces.
#pragma once
#include "Defines.h"
#include "Memory.h"
#include "Types.h"
#include <cstddef>
#include <cstdint>
// free() can be #define'd which can interfere below, so handle that here
#ifdef free
# define CARB_FREE_UNDEFINED
# pragma push_macro("free")
# undef free
#endif
namespace carb
{
//! Defines the current major version of the Carbonite framework.
//!
//! Incrementing this variable causes great chaos as it represents a breaking change to users. Increment only with
//! great thought.
#define CARBONITE_MAJOR 0
//! Defines the current minor version of the Carbonite framework.
//!
//! This value is increment when non-breaking changes are made to the framework.
#define CARBONITE_MINOR 6
//! Defines the current version of the Carbonite framework.
constexpr struct Version kFrameworkVersion = { CARBONITE_MAJOR, CARBONITE_MINOR };
//! Four character code used to identify a @ref PluginRegistrationDesc object that is likely to
//! have further data provided in it.
constexpr FourCC kCarb_FourCC = CARB_MAKE_FOURCC('C', 'A', 'R', 'B');
//! Describes the different functions a plugin can define for use by carb::Framework.
//!
//! Populate this struct and register a plugin with carb::Framework::registerPlugin() for static plugins.
//!
//! Dynamic plugins are registered via @ref CARB_PLUGIN_IMPL.
struct PluginRegistrationDesc
{
//! This or @ref onPluginRegisterExFn required. Preferred over @ref onPluginRegisterExFn.
OnPluginRegisterFn onPluginRegisterFn;
OnPluginStartupFn onPluginStartupFn; //!< Can be `nullptr`.
OnPluginShutdownFn onPluginShutdownFn; //!< Can be `nullptr`.
GetPluginDepsFn getPluginDepsFn; //!< Can be `nullptr`.
OnReloadDependencyFn onReloadDependencyFn; //!< Can be `nullptr`.
OnPluginPreStartupFn onPluginPreStartupFn; //!< Can be `nullptr`.
OnPluginPostShutdownFn onPluginPostShutdownFn; //!< Can be `nullptr`.
OnPluginRegisterExFn onPluginRegisterExFn; //!< Can be `nullptr`.
OnPluginStartupExFn onPluginStartupExFn = nullptr; //!< Can be `nullptr`. Preferred over @ref onPluginStartupFn.
OnPluginRegisterEx2Fn onPluginRegisterEx2Fn = nullptr; //!< Can be `nullptr`. Preferred over onPluginRegisterFn and
//!< onPluginRegisterExFn.
//! These members exists as a version of PluginRegistrationDesc without changing the framework version to simplify
//! adoption. Static plugins that use Framework::registerPlugin() but were compiled with an earlier version of this
//! struct that did not have these members will not produce the required bit pattern,
//! thereby instructing the Framework that the subsequent members are not valid and cannot be read.
FourCC const checkValue{ kCarb_FourCC };
//! The size of this object in bytes. This is only valid if the @ref checkValue member is set
//! to @ref kCarb_FourCC. If it is not, this member and other following members will not be
//! accessed in order to avoid undefined behavior.
size_t const sizeofThis{ sizeof(PluginRegistrationDesc) };
OnPluginQuickShutdownFn onPluginQuickShutdownFn = nullptr; //!< Can be `nullptr`. Function that will be called for
//!< the plugin if
//!< \ref carb::quickReleaseFrameworkAndTerminate() is
//!< invoked.
//! Specifies the framework version required by this plugin.
Version frameworkVersion{ kFrameworkVersion };
};
//! Describes parameters for finding plugins on disk. Multiple search paths, matching wildcards, and exclusion wildcards
//! can be specified. Used primarily by @ref Framework::loadPlugins.
//!
//! Call @ref PluginLoadingDesc::getDefault() to instantiate this object, as it will correctly set defaults.
struct PluginLoadingDesc
{
//! List of folders in which to search for plugins.
//!
//! This may contain relative or absolute paths. All relative paths will be resolved relative to @ref
//! carb::filesystem::IFileSystem::getAppDirectoryPath(), not the current working directory. Absolute paths in the
//! list will be searched directly. If search paths configuration is invalid (e.g. search paths count is zero), the
//! fallback values are taken from the default plugin desc.
//!
//! Defaults to the directory containing the process's executable.
const char* const* searchPaths;
size_t searchPathCount; //!< Number of entries in @ref searchPaths. Defaults to 1.
bool searchRecursive; //!< Is search recursive in search folders. Default to `false`.
//! List of Filename wildcards to select loaded files. `*` and `?` can be used, e.g. "carb.*.pl?gin"
//!
//! Defaults to "*.plugin". This can lead to unnecessary plugins being loaded.
const char* const* loadedFileWildcards;
size_t loadedFileWildcardCount; //!< Number of entries in @ref loadedFileWildcards. Defaults to 1.
//! List of filename wildcards to mark loaded files as reloadable. Framework will treat them specially to allow
//! overwriting source plugins and will monitor them for changes.
//!
//! Defaults to `nullptr`.
const char* const* reloadableFileWildcards;
size_t reloadableFileWildcardCount; //!< Number of entries in @ref reloadableFileWildcards. Defaults to 0.
//! If `true`, load and store the plugins interface information, then immediately unload the plugin until needed.
//! When one of plugin's interfaces is acquired, the library will be loaded again.
//!
//! Defaults to `false`.
bool unloadPlugins;
//! List of filename wildcards to select excluded files. `*` and `?` can be used.
//!
//! Defaults to `nullptr`.
const char* const* excludedFileWildcards;
size_t excludedFileWildcardCount; //!< Number of entries in @ref excludedFileWildcards. Defaults to 0.
//! Returns a PluginLoadDesc with sensible defaults.
static PluginLoadingDesc getDefault()
{
static constexpr const char* defaultSearchPath = "";
static constexpr const char* defaultLoadedFileWildcard = "*.plugin";
return { &defaultSearchPath, 1, false, &defaultLoadedFileWildcard, 1, nullptr, 0, false, nullptr, 0 };
}
};
//! Flags for use with \ref carb::AcquireInterfaceOptions
enum AcquireInterfaceFlags : uint64_t
{
//! Default search type, a plugin name may be specified in `typeParam`.
eAIFDefaultType = 0,
//! Acquire interface from interface specified in `typeParam`.
eAIFFromInterfaceType,
//! Acquire interface from library specified in `typeParam`.
eAIFFromLibraryType,
//! New types can be added here
//! Count of types.
eAIFNumTypes,
//! A mask that contains all of the above types.
fAIFTypeMask = 0xf,
//! The interface acquire is optional and may fail without error logging.
fAIFOptional = (1 << 4),
//! The interface acquire will only succeed if the plugin is already initialized.
fAIFNoInitialize = (1 << 5),
};
static_assert(eAIFNumTypes <= fAIFTypeMask, "Too many types for mask");
//! A structure used with \ref Framework::internalAcquireInterface(). Typically callers should use one of the adapter
//! functions such as \ref Framework::tryAcquireInterface() and not use this directly.
struct AcquireInterfaceOptions
{
//! Size of this structure for versioning.
size_t sizeofThis;
//! The client requesting this interface
const char* clientName;
//! The interface requested
InterfaceDesc desc;
//! Type and flags. One Type must be specified as well as any flags.
AcquireInterfaceFlags flags;
//! Context interpreted based on the type specified in the `flags` member.
const void* typeParam;
};
CARB_ASSERT_INTEROP_SAFE(AcquireInterfaceOptions);
//! Result of loading a plugin. Used by @ref carb::Framework::loadPlugin. Non-negative values indicated success.
enum class LoadPluginResult : int32_t
{
//! Plugin was attempted to be loaded from a temporary path in use by the framework.
eForbiddenPath = -3,
//! Invalid argument passed to @ref Framework::loadPlugin.
eInvalidArg = -2,
//! An unspecified error occurred. The plugin was not loaded.
eFailed = -1,
//! The plugin was successfully loaded.
eSucceeded = 0,
//! The plugin was loaded as an ONI plugin.
eSucceededAsOmniverseNativeInterface = 1,
//! The plugin is already loaded.
eAlreadyLoaded = 2,
};
//! Release Hook function
//!
//! Called when the @ref carb::Framework (or an interface) is being released, before the actual release is done. Add a
//! release hook with @ref carb::Framework::addReleaseHook(). Registered release hooks can be removed with @ref
//! carb::Framework::removeReleaseHook().
//!
//! @param iface The interface that is being released. If the framework is being released, this is `nullptr`.
//!
//! @param userData The data passed to @ref carb::Framework::addReleaseHook().
using ReleaseHookFn = void (*)(void* iface, void* userData);
//! Load Hook function
//!
//! Called when a plugin is loaded for the first time and the requested interface becomes available. The interface must
//! be acquired with \ref Framework::tryAcquireInterface() or \ref Framework::acquireInterface() etc.
//!
//! The thread that first acquires the interface will call all load hooks for that interface before the interface value
//! is returned from the Framework. All other threads that acquire that interface will wait until load hooks have been
//! called. Calling load hooks is done without an internal Framework mutex locked, so other threads are able to acquire
//! other interfaces while load hooks are executing.
//!
//! It is safe for a `LoadHookFn` to call \ref Framework::removeLoadHook() for the handle that caused it to be called,
//! or any other handle.
//!
//! @see Framework::addLoadHook() Framework::removeLoadHook()
//! @param plugin The \ref PluginDesc for the plugin that has now loaded.
//! @param userData The `void*` that was passed to \ref Framework::addLoadHook().
using LoadHookFn = void (*)(const PluginDesc& plugin, void* userData);
//! Acquire the Carbonite framework for an application.
//!
//! Do not call this method directly. Rather, call a helper function such as @ref OMNI_CORE_INIT, @ref
//! carb::acquireFrameworkAndRegisterBuiltins or @ref carb::acquireFrameworkForBindings. Of the methods above, @ref
//! OMNI_CORE_INIT is preferred for most applications.
//!
//! The Carbonite framework is a singleton object, it will be created on the first acquire call. Subsequent calls to
//! acquire return the same instance.
//!
//! This function is expected to be used by applications, which links with the framework.
//!
//! Plugins should not use this function. Rather, plugins should use @ref carb::getFramework().
//!
//! @thread_safety This function may be called from multiple threads simultaneously.
//!
//! @param appName The application name requesting the framework. Must not be `nullptr`.
//!
//! @param frameworkVersion specifies the minimum framework version expected by the application. `nullptr` is return if
//! the minimum version cannot be met.
//!
//! @return The Carbonite framework. Can be `nullptr`.
//!
//! @see @ref carb::releaseFramework().
CARB_DYNAMICLINK carb::Framework* acquireFramework(const char* appName, Version frameworkVersion = kFrameworkVersion);
//! Returns `true` if the Carbonite framework has been created and is still alive. Creation happens at the first @ref
//! carb::acquireFramework() call and ends at any @ref carb::releaseFramework() call.
CARB_DYNAMICLINK bool isFrameworkValid();
//! Retrieves the Carbonite SDK version string,
//!
//! @returns A string describing the current Carbonite SDK version. This will be the same value
//! as the @ref CARB_SDK_VERSION value that was set when the SDK was built.
//!
//! @note This version is intended for use in host apps that link directly to the `carb` library.
//! Libraries that don't link directly to it such as plugins will not be able to call
//! into this without first dynamically importing it. Plugins should instead call this
//! through `carb::getFramework()->getSdkVersion()`.
CARB_DYNAMICLINK const char* carbGetSdkVersion();
//! Tests whether the Carbonite SDK headers match the version of used to build the framework.
//!
//! @param[in] version The version string to compare to the version stored in the Carbonite
//! framework library. This is expected to be the value of the
//! @ref CARB_SDK_VERSION symbol found in `carb/SdkVersion.h`.
//! @returns `true` if the version of the headers matches the version of the framework library
//! that is currently loaded. Returns `false` if the version string in the headers
//! does not match the version of the framework library. If the library does not
//! match the headers, it is not necessarily a fatal problem. It does however
//! indicate that issues may occur and that there may have been a building or
//! packaging problem for the host app.
#define CARB_IS_SAME_SDK_VERSION(version) (strcmp(version, carbGetSdkVersion()) == 0)
//! Releases the Carbonite framework immediately.
//!
//! In some cases more, than one client can acquire the framework (e.g. scripting bindings), but only one of the clients
//! should be responsible for releasing it.
//!
//! @thread_safety May be called from any thread.
CARB_DYNAMICLINK void releaseFramework();
//! Releases the Carbonite framework immediately and exits the process, without running C/C++ atexit() registered
//! functions or static destructors.
//!
//! @note This function does not return.
//!
//! @warning This function must not be called from within a DLL, shared object, or plugin.
//!
//! This function performs the following sequence:
//! 1. Calls any exported \ref carbOnPluginQuickShutdown on all loaded plugins, if the framework is acquired. No plugins
//! are unloaded, unregistered, nor have their interfaces destroyed.
//! 2. Calls any registered Framework release hooks (see \ref carb::Framework::addReleaseHook) in reverse order of
//! registration, if the framework is acquired.
//! 3. Flushes stdout/stderr.
//! 4. Calls `TerminateProcess()` on Windows or `_exit()` on Linux and MacOS.
//!
//! @thread_safety May be called from any thread.
//! @param exitCode The exit code that the process will exit with.
CARB_DYNAMICLINK void quickReleaseFrameworkAndTerminate [[noreturn]] (int exitCode);
#if CARB_PLATFORM_WINDOWS
//! Signal handler for SIGABRT for use with plugins that are statically linked to the CRT.
//!
//! @param[in] signal The signal that occurred. This will be SIGABRT.
//! @returns No return value.
//!
//! @remarks This acts as a signal handler for SIGABRT signals. This is installed during
//! plugin initialization. This should _never_ be called directly since it will
//! result in the process aborting immediately.
CARB_DYNAMICLINK void carbSignalHandler(int signal);
#endif
//! Defines the framework for creating Carbonite applications and plugins.
//!
//! See \carb_framework_overview for high-level documentation on core concepts, using @ref Framework, and creating
//! plugins.
//!
//! Plugins are shared libraries with a .plugin.dll/.so suffix. The plugins are named with the .plugin suffix to support
//! plugin discovery and support cohabitation with other supporting .dll/.so libraries in the same folder. It is a
//! recommended naming pattern, but not mandatory.
//!
//! Plugin library file format:
//!
//! - Windows: <plugin-name>.plugin.dll
//! - Linux: lib<plugin-name>.plugin.so
//!
//! A plugin implements one or many interfaces and has a name which uniquely identifies it to the framework. The
//! plugin's name usually matches the filename, but it is not mandatory, the actual plugin name is provided by the
//! plugin via @ref carb::OnPluginRegisterFn.
//!
//! "Static" plugin can also be registered with @ref Framework::registerPlugin() function, thus no shared library will
//! be involved.
//!
//! @ref Framework comes with 3 static plugins:
//!
//! - @ref carb::logging::ILogging
//! - @ref carb::filesystem::IFileSystem
//! - @ref carb::assert::IAssert
//!
//! These plugins are used by @ref Framework itself. Without @ref carb::logging::ILogging, @ref Framework won't be able
//! to log messages. Without @ref carb::filesystem::IFileSystem, @ref Framework won't be able to load any "dynamic"
//! plugins. Without @ref carb::assert::IAssert, assertion failures will simply write a message to stderr and abort.
//!
//! It's up to the application to register these needed plugins. @ref OMNI_CORE_INIT() performs this registration on
//! the user's behalf.
//!
//! The term "client" is often used across the @ref Framework API. Client is either:
//!
//! - A plugin. Here the client name is the same as the plugin name.
//!
//! - An application. The module which dynamically links with the Framework and uses @ref carb::acquireFramework().
//!
//! - Scripting bindings. This is technically similar to an application, in that it dynamically links with the @ref
//! Framework and uses @ref carb::acquireFramework().
//!
//! Clients are uniquely identified by their name. Many functions accept client name as an argument. This allows @ref
//! Framework to create a dependency tree of clients. This dependency tree allows the safe unloading of plugins.
//!
//! @thread_safety Unless otherwise noted, @ref Framework functions are thread-safe and may be called from multiple
//! threads simultaneously.
struct Framework
{
/**
* Load and register plugins from shared libraries.
*/
void loadPlugins(const PluginLoadingDesc& desc = PluginLoadingDesc::getDefault());
/**
* Load and register plugins from shared libraries. Prefer using @ref loadPlugins.
*/
void(CARB_ABI* loadPluginsEx)(const PluginLoadingDesc& desc);
/**
* Unloads all plugins, including registered "static" plugins (see @ref Framework::registerPlugin).
*/
void(CARB_ABI* unloadAllPlugins)();
/**
* Acquires the typed plugin interface, optionally from a specified plugin.
*
* If `nullptr` is passed as @p pluginName this method selects the default plugin for the given interface type.
* Default plugin selection happens on the first such acquire call for a particular interface name and locked until
* after this interface is released. By default the interface with highest version is selected.
*
* If the plugin has not yet been started, it will be loaded and started (\ref carbOnPluginStartup called) by this
* call.
*
* @ref Framework::setDefaultPlugin can be used to explicitly set which plugin to set as default, but it should be
* called before the first acquire call.
*
* If acquire fails, `nullptr` is returned and an error is logged.
*
* @param pluginName The option to specify a plugin (implementation) that you specifically want. Pass `nullptr` to
* search for all plugins.
*
* @return The requested plugin interface or `nullptr` if an error occurs (an error message is logged).
*
* @see See @ref tryAcquireInterface(const char*) for a version of this method that does not log errors.
*/
template <typename T>
T* acquireInterface(const char* pluginName = nullptr);
/**
* Tries to acquire the typed plugin interface, optionally from a specified plugin.
*
* If `nullptr` is passed as @p pluginName this method selects the default plugin for the given interface type.
* Default plugin selection happens on the first such acquire call for a particular interface name and locked until
* after this interface is released. By default the interface with highest version is selected.
*
* If the plugin has not yet been started, it will be loaded and started (\ref carbOnPluginStartup called) by this
* call.
*
* @ref Framework::setDefaultPlugin can be used to explicitly set which plugin to set as default, but it should be
* called before the first acquire call.
*
* @param pluginName The option to specify a plugin (implementation) that you specifically want. Pass `nullptr` to
* search for all plugins.
*
* @return The requested plugin interface or `nullptr` if an error occurs.
*/
template <typename T>
T* tryAcquireInterface(const char* pluginName = nullptr);
/**
* Acquires the typed plugin interface from the same plugin as the provided interface.
*
* Example:
*
* @code{.cpp}
* Foo* foo = framework->acquireInterface<Foo>();
*
* // the returned 'bar' interface is from the same plugin as 'foo'.
* Bar* bar = framework->acquireInterface<Bar>(foo);
* @endcode
*
* If foo and bar are not nullptr, they are guaranteed to be on the same plugin.
*
* @param pluginInterface The interface that was returned from acquireInterface. It will be used to select a
* plugin with requested interface.
*
* @return The typed plugin interface that is returned and will be started, or `nullptr` if the interface cannot be
* acquired (an error is logged).
*
* @see See @ref tryAcquireInterface(const void*) for a version of this method that does not log errors.
*/
template <typename T>
T* acquireInterface(const void* pluginInterface);
/**
* Tries to acquire the typed plugin interface from the same plugin as the provided interface.
*
* Example:
*
* @code{.cpp}
* Foo* foo = framework->acquireInterface<Foo>();
*
* // the returned 'bar' interface is from the same plugin as 'foo'.
* Bar* bar = framework->tryAcquireInterface<Bar>(foo);
* @endcode
*
* If foo and bar are not nullptr, they are guaranteed to be on the same plugin.
*
* @param pluginInterface The interface that was returned from acquireInterface. It will be used to select a
* plugin with requested interface.
*
* @return The typed plugin interface that is returned and will be started, or `nullptr` if the interface cannot be
* acquired.
*/
template <typename T>
T* tryAcquireInterface(const void* pluginInterface);
/**
* Acquires to the typed plugin interface from the given dynamic library file.
*
* @note If the given library was not a registered plugin, the Framework will attempt to register the library as a
* new plugin.
*
* If the plugin has not yet been started, it will be loaded and started (\ref carbOnPluginStartup called) by this
* call.
*
* @param libraryPath The library path to acquire the interface from. Can be absolute or relative (to the current
* working directory) path to a dynamic (.dll/.so/.dylib) library Carbonite plugin.
*
* @return The typed plugin interface (guaranteed to be from the given library) or `nullptr`. If `nullptr` is
* returned, an error is logged.
*
* @see See @ref tryAcquireInterfaceFromLibrary(const char*) for a version of this method that does not log errors.
*/
template <typename T>
T* acquireInterfaceFromLibrary(const char* libraryPath);
/**
* Tries to acquire the typed plugin interface from the given dynamic library file.
*
* @note If the given library was not a registered plugin, the Framework will attempt to register the library as a
* new plugin.
*
* If the plugin has not yet been started, it will be loaded and started (\ref carbOnPluginStartup called) by this
* call.
*
* This function works exactly as @ref Framework::acquireInterfaceFromLibrary(const char*), except if acquire fails
* it returns `nullptr` and doesn't log an error.
*
* @param libraryPath The library path to acquire the interface from. Can be absolute or relative (to the current
* working directory) path to a dynamic (.dll/.so/.dylib) library Carbonite plugin.
*
* @return The typed plugin interface or `nullptr` if the library file was not found or an error occurred.
*/
template <typename T>
T* tryAcquireInterfaceFromLibrary(const char* libraryPath);
/**
* Tries to acquire the typed plugin interface if and only if it has been previously acquired, optionally from a
* specified plugin.
*
* If `nullptr` is passed as @p pluginName this method selects the default plugin for the given interface type.
* Default plugin selection happens on the first such acquire call for a particular interface name and locked until
* after this interface is released. By default the interface with highest version is selected.
*
* Unlike \ref tryAcquireInterface, this function will only acquire an interface if the plugin providing it is
* already started (it won't attempt to start the plugin). This is useful during \ref carbOnPluginShutdown when a
* circularly-dependent interface may have already been released by the Framework and attempting to reload it would
* result in an error.
*
* @ref Framework::setDefaultPlugin can be used to explicitly set which plugin to set as default, but it should be
* called before the first acquire call.
*
* @param pluginName The option to specify a plugin (implementation) that you specifically want. Pass `nullptr` to
* search for all plugins.
*
* @return The requested plugin interface or `nullptr` if an error occurs or the plugin is not started.
*/
template <typename T>
T* tryAcquireExistingInterface(const char* pluginName = nullptr);
/**
* Gets the number of plugins with the specified interface.
*
* @return The number of plugins with the specified interface.
*/
template <typename T>
uint32_t getInterfacesCount();
//! Acquires all interfaces of the given type.
//!
//! The given output array must be preallocated. @p interfacesSize tells this method the size of the array.
//!
//! If @p interfaces is to small, the array is filled as much as possible and an error is logged.
//!
//! If @p interfaces is to big, entries past the required size will not be written.
//!
//! Upon output, `nullptr` may randomly appear in `interfaces`. This represents failed internal calls to @ref
//! tryAcquireInterface. No error is logged in this case.
//!
//! @param interfaces Preallocated array that will hold the acquired interfaces. Values in this array must be
//! preset to `nullptr` in order to determine which entries in the array are valid upon output.
//!
//! @param interfacesSize Number of preallocated array elements. See @ref Framework::getInterfacesCount().
//!
//! @rst
//! .. warning::
//! Carefully read this method's documentation, as it has a slew of design issues. It's use is not
//! recommended.
//! @endrst
template <typename T>
void acquireInterfaces(T** interfaces, uint32_t interfacesSize);
//! Acquires the plugin interface pointer from an interface description.
//!
//! This is an internal function. Use @ref Framework::acquireInterface(const char*) instead.
//!
//! @rst
//! .. deprecated:: 135.0
//! If explicit client functionality is needed, please use ``internalAcquireInterface`` instead.
//! However, note that this function will only be available beginning with Carbonite 135.0.
//! @endrst
//!
//! @param clientName The client requesting the plugin. This is used to form a dependency graph between clients.
//! Must not be `nullptr`.
//!
//! @param desc The plugin interface description
//!
//! @param pluginName The plugin that you specifically want. If `nullptr`, the interface's "default" plugin is
//! used.
//!
//! @return The returned function pointer for the interface being queried and started. If `nullptr` is returned, an
//! error is logged.
//!
//! @see See @ref tryAcquireInterfaceWithClient for a version of this method that does not log errors.
void*(CARB_ABI* acquireInterfaceWithClient)(const char* clientName, InterfaceDesc desc, const char* pluginName);
static_assert(kFrameworkVersion.major == 0, "Remove above function in next Framework version");
//! Tries to acquires the plugin interface pointer from an interface description.
//!
//! This method has the same contract as @ref Framework::acquireInterfaceWithClient except an error is not logged if
//! the interface could not be acquired.
//!
//! This is an internal function. Use @ref Framework::tryAcquireInterface(const char*) instead.
//!
//! @rst
//! .. deprecated:: 135.0
//! If explicit client functionality is needed, please use ``internalAcquireInterface`` instead.
//! However, note that this function will only be available beginning with Carbonite 135.0.
//! @endrst
//!
//! @param clientName The client requesting the plugin. This is used to form a dependency graph between clients.
//! Must not be `nullptr`.
//!
//! @param desc The plugin interface description
//!
//! @param pluginName The plugin that you specifically want. If `nullptr`, the interface's "default" plugin is
//! used.
//!
//! @return The returned function pointer for the interface being queried and started, or `nullptr` if an error
//! occurs.
void*(CARB_ABI* tryAcquireInterfaceWithClient)(const char* clientName, InterfaceDesc desc, const char* pluginName);
static_assert(kFrameworkVersion.major == 0, "Remove above function in next Framework version");
//! Acquires the typed plugin interface from the same plugin as the provided interface.
//!
//! This is an internal function. Use @ref Framework::acquireInterface(const char*) instead.
//!
//! @rst
//! .. deprecated:: 135.0
//! If explicit client functionality is needed, please use ``internalAcquireInterface`` instead.
//! However, note that this function will only be available beginning with Carbonite 135.0.
//! @endrst
//!
//! @param clientName The client requesting the plugin. This is used to form a dependency graph between clients.
//! Must not be `nullptr`.
//!
//! @param desc The plugin interface description.
//!
//! @param pluginInterface The interface that was returned from acquireInterface. It will be used to select a plugin
//! with requested interface.
//!
//! @return The returned function pointer for the interface being queried and started. If `nullptr` is returned, an
//! error is logged.
//!
//! @see See @ref tryAcquireInterfaceFromInterfaceWithClient for a version of this method that does not log errors.
void*(CARB_ABI* acquireInterfaceFromInterfaceWithClient)(const char* clientName,
InterfaceDesc desc,
const void* pluginInterface);
static_assert(kFrameworkVersion.major == 0, "Remove above function in next Framework version");
//! Tries to acquires the typed plugin interface from the same plugin as the provided interface.
//!
//! This method has the same contract as @ref Framework::acquireInterfaceFromInterfaceWithClient except an error is
//! not logged if the interface could not be acquired.
//!
//! This is an internal function. Use @ref Framework::tryAcquireInterface(const char*) instead.
//!
//! @rst
//! .. deprecated:: 135.0
//! If explicit client functionality is needed, please use ``internalAcquireInterface`` instead.
//! However, note that this function will only be available beginning with Carbonite 135.0.
//! @endrst
//!
//! @param clientName The client requesting the plugin. This is used to form a dependency graph between clients.
//! Must not be `nullptr`.
//!
//! @param desc The plugin interface description.
//!
//! @param pluginInterface The interface that was returned from acquireInterface. It will be used to select a plugin
//! with requested interface.
//!
//! @return The returned function pointer for the interface being queried and started, or `nullptr` if an error
//! occurs.
void*(CARB_ABI* tryAcquireInterfaceFromInterfaceWithClient)(const char* clientName,
InterfaceDesc desc,
const void* pluginInterface);
static_assert(kFrameworkVersion.major == 0, "Remove above function in next Framework version");
//! Acquires the plugin interface pointer from an interface description and a filename.
//!
//! @note If the given library was not a registered plugin, the Framework will attempt to register the library as a
//! new plugin.
//!
//! This is an internal function. Use @ref Framework::acquireInterfaceFromLibrary(const char*) instead.
//!
//! @rst
//! .. deprecated:: 135.0
//! If explicit client functionality is needed, please use ``internalAcquireInterface`` instead.
//! However, note that this function will only be available beginning with Carbonite 135.0.
//! @endrst
//!
//! @param clientName The client requesting the plugin. This is used to form a dependency graph between clients.
//! Must not be `nullptr`.
//!
//! @param desc The plugin interface description
//!
//! @param libraryPath The filename to acquire the interface from. Can be absolute or relative path to actual
//! .dll/.so Carbonite plugin. Path is relative to the current working directory. Must not be `nullptr`.
//!
//! @return The returned function pointer for the interface being queried and started. If `nullptr` is returned, an
//! error is logged.
//!
//! @see See @ref tryAcquireInterfaceFromLibraryWithClient for a version of this method that does not log errors.
void*(CARB_ABI* acquireInterfaceFromLibraryWithClient)(const char* clientName,
InterfaceDesc desc,
const char* libraryPath);
static_assert(kFrameworkVersion.major == 0, "Remove above function in next Framework version");
//! Tries to acquire the plugin interface pointer from an interface description and a filename.
//!
//! This method has the same contract as @ref Framework::acquireInterfaceFromLibraryWithClient except an error is
//! not logged if the interface could not be acquired.
//!
//! @note If the given library was not a registered plugin, the Framework will attempt to register the library as a
//! new plugin.
//!
//! This is an internal function. Use @ref Framework::tryAcquireInterfaceFromLibrary(const char*) instead.
//!
//! @rst
//! .. deprecated:: 135.0
//! If explicit client functionality is needed, please use ``internalAcquireInterface`` instead.
//! However, note that this function will only be available beginning with Carbonite 135.0.
//! @endrst
//!
//! @param clientName The client requesting the plugin. This is used to form a dependency graph between clients.
//! Must not be `nullptr`.
//!
//! @param desc The plugin interface description
//!
//! @param libraryPath The filename to acquire the interface from. Can be absolute or relative path to actual
//! .dll/.so Carbonite plugin. Path is relative to the current working directory. Must not be `nullptr`.
//!
//! @return The returned function pointer for the interface being queried and started, or `nullptr` on error.
void*(CARB_ABI* tryAcquireInterfaceFromLibraryWithClient)(const char* clientName,
InterfaceDesc desc,
const char* libraryPath);
static_assert(kFrameworkVersion.major == 0, "Remove above function in next Framework version");
//! Gets the number of plugins with the specified interface descriptor.
//!
//! @param interfaceDesc The interface descriptor to get the plugin count.
//!
//! @return The number of plugins with the specified interface descriptor.
uint32_t(CARB_ABI* getInterfacesCountEx)(InterfaceDesc interfaceDesc);
//! Acquires all interfaces of the given type.
//!
//! The given output array must be preallocated. @p interfacesSize tells this method the size of the array.
//!
//! If @p interfaces is to small, the array is filled as much as possible and an error is logged.
//!
//! If @p interfaces is to big, entries past the required size will not be written.
//!
//! Upon output, `nullptr` may randomly appear in `interfaces`. This represents failed internal calls to @ref
//! tryAcquireInterface. No error is logged in this case.
//!
//! This is an internal function. Use @ref Framework::acquireInterfaces() instead.
//!
//! @param clientName The client requesting the plugin. This is used to form a dependency graph between clients.
//! Must not be `nullptr`.
//!
//! @param desc The plugin interface description
//!
//! @param interfaces Preallocated array that will hold the acquired interfaces. Values in this array must be
//! preset to `nullptr` in order to determine which entries in the array are valid upon output.
//!
//! @param interfacesSize Number of preallocated array elements. See @ref Framework::getInterfacesCount().
//!
//! @rst
//! .. warning::
//! Carefully read this method's documentation, as it has a slew of design issues. It's use is not
//! recommended.
//! @endrst
void(CARB_ABI* acquireInterfacesWithClient)(const char* clientName,
InterfaceDesc interfaceDesc,
void** interfaces,
uint32_t interfacesSize);
//! Releases the use of an interface that is no longer needed.
//!
//! Correct plugin interface type is expected, compile-time check is performed.
//!
//! @param pluginInterface The interface that was returned from acquireInterface
template <typename T>
void releaseInterface(T* pluginInterface);
//! \cond DEV
//! Releases the use of an interface that is no longer needed.
//!
//! This is an internal function. Use @ref Framework::releaseInterface() instead.
//!
//! @param clientName The client requesting the plugin. This is used to form a dependency graph between clients.
//! Must not be `nullptr`.
//!
//! @param pluginInterface The interface that was returned from @ref Framework::acquireInterface.
void(CARB_ABI* releaseInterfaceWithClient)(const char* clientName, void* pluginInterface);
//! \endcond
//! Gets the plugin descriptor for a specified plugin.
//!
//! @param pluginName The plugin that you specifically want to get the descriptor for. Must not be `nullptr`.
//!
//! @return The @ref PluginDesc, it will be filled with zeros if the plugin doesn't exist. The returned memory will
//! be valid as long as the plugin is loaded.
const PluginDesc&(CARB_ABI* getPluginDesc)(const char* pluginName);
//! Gets the plugin descriptor for an interface returned from @ref Framework::acquireInterface.
//!
//! @param pluginInterface The interface that was returned from acquireInterface
//!
//! @return The PluginDesc, it will be filled with zeros if wrong interface pointer is provided.
const PluginDesc&(CARB_ABI* getInterfacePluginDesc)(void* pluginInterface);
//! Gets the plugins with the specified interface descriptor.
//!
//! @param interfaceDesc The interface descriptor to get the plugins for.
//!
//! @param outPlugins The array to be populated with the plugins of size @ref Framework::getInterfacesCount().
//! This array must be set to all zeros before given to this function in order to be able to tell the number of
//! entries written.
//!
//! @rst
//! .. danger::
//!
//! Do not use this method. The caller will be unable to correctly size ``outPlugins``. The size of the number
//! of loaded plugins matching ``interfaceDesc`` may change between the call to
//! :cpp:func:`carb::Framework::getInterfacesCount` and this method.
//! @endrst
void(CARB_ABI* getCompatiblePlugins)(InterfaceDesc interfaceDesc, PluginDesc* outPlugins);
//! Gets the number of registered plugins.
//!
//! @return The number of registered plugins.
size_t(CARB_ABI* getPluginCount)();
//! Gets all registered plugins.
//!
//! @param outPlugins The array to be populated with plugin descriptors of size @ref Framework::getPluginCount().
//!
//! @rst
//! .. danger::
//!
//! Do not use this method. The caller will be unable to correctly size ``outPlugins``. The number of plugins
//! may change between the call to :cpp:member:`carb::Framework::getPluginCount` and this method.
//! @endrst
void(CARB_ABI* getPlugins)(PluginDesc* outPlugins);
//! Attempts to reload all plugins that are currently loaded.
void(CARB_ABI* tryReloadPlugins)();
//! Register a "static" plugin.
//!
//! While typical plugins are "dynamic" and loaded from shared libraries (see @ref Framework::loadPlugins), a
//! "static" plugin can be added by calling this function from an application or another plugin. The contract is
//! exactly the same: you provide a set of functions (some of which are optional), which usually are looked for in a
//! shared library by the framework. It can be useful in some special scenarios where you want to hijack particular
//! interfaces or limited in your ability to produce new shared libraries.
//!
//! It is important that the plugin name provided by @ref PluginRegistrationDesc::onPluginRegisterFn function is
//! unique, registration will fail otherwise.
//!
//! @param clientName The client registering the plugin. This is used to form a dependency graph between clients.
//! Must not be `nullptr`.
//!
//! @param desc The plugin registration description.
//!
//! @return If registration was successful.
bool(CARB_ABI* registerPlugin)(const char* clientName, const PluginRegistrationDesc& desc);
//! Try to unregister a plugin.
//!
//! If plugin is in use, which means one if its interfaces was acquired by someone and not yet released, the
//! unregister will fail. Both "dynamic" (shared libraries) and "static" (see @ref Framework::registerPlugin)
//! plugins can be unregistered.
//!
//! @param pluginName The plugin to be unregistered.
//!
//! @return If unregistration was successful.
bool(CARB_ABI* unregisterPlugin)(const char* pluginName);
//! The descriptor for registering builtin @ref carb::logging::ILogging interface implementation.
const PluginRegistrationDesc&(CARB_ABI* getBuiltinLoggingDesc)();
//! The descriptor for registering builtin @ref carb::filesystem::IFileSystem interface implementation.
const PluginRegistrationDesc&(CARB_ABI* getBuiltinFileSystemDesc)();
//! Sets the default plugin to be used when an interface type is acquired.
//!
//! The mechanism of default interfaces allows @ref Framework to guarantee that every call to
//! `acquireInterface<Foo>()` will return the same `Foo` interface pointer for everyone. The only way to bypass it
//! is by explicitly passing the `pluginName` of the interface you want to acquire.
//!
//! It is important to note that if the interface was previously already acquired, the effect of this function won't
//! take place until it is released by all holders. So it is recommended to set defaults as early as possible.
//!
//! @tparam T The interface type.
//! @param pluginName The name of the plugin (e.g. "carb.profiler-cpu.plugin") that will be set as default. Must not
//! be `nullptr`.
template <class T>
void setDefaultPlugin(const char* pluginName);
//! \cond DEV
//! Sets the default plugin to be used when the given interface is acquired.
//!
//! The mechanism of default interfaces allows @ref Framework to guarantee that every call to
//! `acquireInterface<Foo>()` will return the same `Foo` interface pointer for everyone. The only way to bypass it
//! is by explicitly passing the `pluginName` of the interface you want to acquire.
//!
//! It is important to note that if the interface was previously already acquired, the effect of this function won't
//! take place until it is released by all holders. So it is recommended to set defaults as early as possible.
//!
//! @param clientName The client registering the plugin. This is used to form a dependency graph between clients.
//! Must not be `nullptr`.
//!
//! @param desc The plugin interface description.
//!
//! @param pluginName The plugin that will be set as default. Must not be `nullptr`.
void(CARB_ABI* setDefaultPluginEx)(const char* clientName, InterfaceDesc desc, const char* pluginName);
//! \endcond
//! Sets the temporary path where the framework will store data for reloadable plugins.
//!
//! This function must be called before loading any reloadable plugins. By default @ref Framework creates a
//! temporary folder in the executable's folder.
//!
//! @param tempPath Temporary folder path.
void(CARB_ABI* setReloadableTempPath)(const char* tempPath);
//! Returns temporary path where the framework will store data for reloadable plugins.
//!
//! @return Temporary path for reloadable data. The returned memory is valid until the @ref
//! Framework::setReloadableTempPath is called or the @ref Framework is destroyed.
const char*(CARB_ABI* getReloadableTempPath)();
//! Returns Carbonite version and build information.
//!
//! The format is: `v{major}.{minor} [{shortgithash} {gitbranch} {isdirty}]` where:
//!
//! - major - `kFrameworkVersion.major`
//! - minor - `kFrameworkVersion.minor`
//! - shortgithash - output of `git rev-parse --short HEAD`
//! - gitbranch - output of `git rev-parse --abbrev-ref HEAD`
//! - isdirty - `DIRTY` if `git status --porcelain` is not empty
//!
//! Examples:
//!
//! - `v1.0 [56ab220c master]`
//! - `v0.2 [f2fc1ba1 dev/mfornander/harden DIRTY]`
const char*(CARB_ABI* getBuildInfo)();
//! Checks if the provided plugin interface matches the requirements.
//!
//! @param interfaceCandidate The interface that was provided by the user.
//!
//! @return If the interface candidate matches template interface requirements, returns @p interfaceCandidate.
//! Otherwise, returns `nullptr`.
template <typename T>
T* verifyInterface(T* interfaceCandidate);
//! Checks if provided plugin interface matches the requirements.
//!
//! Do not directly use this method. Instead, use @ref Framework::verifyInterface.
//!
//! @param desc The interface description that sets the compatibility requirements.
//!
//! @param interfaceCandidate The interface that was provided by the user.
//!
//! @return if the interface candidate matches @p desc, returns @p interfaceCandidate. Otherwise, returns `nullptr`.
void*(CARB_ABI* verifyInterfaceEx)(InterfaceDesc desc, void* interfaceCandidate);
//! The descriptor for registering builtin @ref carb::assert::IAssert interface implementation.
const PluginRegistrationDesc&(CARB_ABI* getBuiltinAssertDesc)();
//! The descriptor for registering builtin @ref carb::thread::IThreadUtil interface implementation.
const PluginRegistrationDesc&(CARB_ABI* getBuiltinThreadUtilDesc)();
//! Load and register a plugin from the given filename.
//!
//! Call @ref unloadPlugin() to unload the plugin at @p libraryPath.
//!
//! @param libraryPath Name of the shared library. Must not be `nullptr`.
//!
//! @param reloadable Treat the plugin as reloadable.
//!
//! @param unload Grab the list of interfaces from the plugin and then unload it. If the user tries to acquire one
//! of the retrieved interfaces, the plugin will be lazily reloaded.
//!
//! @return Returns a non-negative value on success, negative value otherwise.
LoadPluginResult(CARB_ABI* loadPlugin)(const char* libraryPath, bool reloadable, bool unload);
//! Unloads the plugin at the given shared library path.
//!
//! @param Path to shared library. Must not be `nullptr`.
//!
//! @returns Returns `true` if a plugin was loaded at the given path and successfully unloaded. `false` otherwise.
bool(CARB_ABI* unloadPlugin)(const char* libraryPath);
//! Adds a release hook for either the framework or a specific interface.
//!
//! A release hook can be added multiple times with the same or different user data, in which case it will be called
//! multiple times. It is up to the caller to ensure uniqueness if uniqueness is desired. To remove a release hook,
//! call @ref carb::Framework::removeReleaseHook() with the same parameters.
//!
//! @param iface The interface (returned by @ref carb::Framework::acquireInterface()) to monitor for release. If
//! `nullptr` is specified, the release hook will be called when the @ref carb::Framework itself is unloaded.
//!
//! @param fn The release hook callback function that will be called. Must not be `nullptr`.
//!
//! @param user Data to be passed to the release hook function. May be `nullptr`.
//!
//! @returns Returns `true` if the interface was found and the release hook was added successfully; `false`
//! otherwise.
//!
//! @rst
//!
//! .. danger::
//!
//! It is *expressly forbidden* to call back into :cpp:type:`carb::Framework` in any way during the
//! :cpp:type:`carb::ReleaseHookFn` callback. Doing so results in undefined behavior. The only exception to this
//! rule is calling `removeReleaseHook()`.
//!
//! @endrst
bool(CARB_ABI* addReleaseHook)(void* iface, ReleaseHookFn fn, void* user);
//! Removes a release hook previously registered with @ref carb::Framework::addReleaseHook().
//!
//! The same parameters supplied to @ref carb::Framework::addReleaseHook() must be provided in order to identify the
//! correct release hook to remove. It is safe to call this function from within the release hook callback.
//!
//! @param iface The interface previously passed to @ref addReleaseHook().
//!
//! @param fn The function previously passed to @ref addReleaseHook().
//!
//! @param user The user data parameter previously passed to @ref addReleaseHook().
//!
//! @returns Returns `true` if the release hook was found and removed. If it was not found, `false` is returned.
//!
//! @rst
//!
//! .. danger::
//!
//! It is *expressly forbidden* to call back into :cpp:type:`carb::Framework` in any way during the
//! :cpp:type:`carb::ReleaseHookFn` callback. Doing so results in undefined behavior. The only exception to this
//! rule is calling `removeReleaseHook()`.
//!
//! @endrst
bool(CARB_ABI* removeReleaseHook)(void* iface, ReleaseHookFn fn, void* user);
//! @private
CARB_DEPRECATED("Use carbReallocate() instead")
void*(CARB_ABI* internalRealloc)(void* prev, size_t newSize, size_t align);
static_assert(kFrameworkVersion.major == 0, "Remove Framework::internalRealloc in next Framework version");
//! Allocates a block of memory.
//!
//! @note Any plugin (or the executable) may allocate the memory and a different plugin (or the executable) may free
//! or reallocate it.
//!
//! @warning It is undefined behavior to use memory allocated with this function or @ref reallocate() after the
//! Carbonite framework has been shut down.
//!
//! @param size The size of the memory block requested, in bytes. Specifying '0' will return a valid pointer that
//! can be passed to @ref free but cannot be used to store any information.
//! @param align The minimum alignment (in bytes) of the memory block requested. Must be a power of two. Values less
//! than `sizeof(size_t)` are ignored. `0` indicates to use default system alignment (typically
//! `2 * sizeof(void*)`).
//! @returns A non-`nullptr` memory block of @p size bytes with minimum alignment @p align. If an error occurred,
//! or memory could not be allocated, `nullptr` is returned. The memory is not initialized.
CARB_DEPRECATED("Use carb::allocate() instead") void* allocate(size_t size, size_t align = 0)
{
return carb::allocate(size, align);
}
static_assert(kFrameworkVersion.major == 0, "Remove Framework::allocate in next Framework version");
//! Frees a block of memory previously allocated with @ref allocate().
//!
//! @note Any plugin (or the executable) may allocate the memory and a different plugin (or the executable) may
//! free it.
//!
//! @param p The block of memory previously returned from @ref allocate() or @ref reallocate(), or `nullptr`.
CARB_DEPRECATED("Use carb::deallocate() instead") void free(void* p)
{
return carb::deallocate(p);
}
static_assert(kFrameworkVersion.major == 0,
"Remove Framework::free and CARB_FREE_UNDEFINED in next Framework version");
//! Reallocates a block of memory previously allocated with @ref allocate().
//!
//! This function changes the size of the memory block pointed to by @p p to @p size bytes with @p align alignment.
//! The contents are unchanged from the start of the memory block up to the minimum of the old size and @p size. If
//! @p size is larger than the old size, the added memory is not initialized. If @p p is `nullptr`, the call is
//! equivalent to `allocate(size, align)`; if @p size is `0` and @p p is not `nullptr`, the call is equivalent to
//! `free(p)`. Unless @p p is `nullptr`, it must have been retrieved by an earlier call to @ref allocate() or
//! @ref reallocate(). If the memory region was moved in order to resize it, @p p will be freed as with `free(p)`.
//!
//! @note Any plugin (or the executable) may allocate the memory and a different plugin (or the executable) may
//! reallocate it.
//!
//! @warning It is undefined behavior to use memory allocated with this function or @ref allocate() after the
//! Carbonite framework has been shut down.
//!
//! @param p The block of memory previously returned from @ref allocate() or @ref reallocate() if resizing is
//! resizing is desired. If `nullptr` is passed as this parameter, the call behaves as if
//! `allocate(size, align)` was called.
//! @param size The size of the memory block requested, in bytes. See above for further explanation.
//! @param align The minimum alignment (in bytes) of the memory block requested. Must be a power of two. Values less
//! than `sizeof(size_t)` are ignored. Changing the alignment from a previous allocation is undefined behavior.
//! `0` indicates to use default system alignment (typically `2 * sizeof(void*)`).
//! @returns A pointer to a block of memory of @p size bytes with minimum alignment @p align, unless an error
//! occurs in which case `nullptr` is returned. If @p p is `nullptr` and @p size is `0` then `nullptr` is also
//! returned.
CARB_DEPRECATED("Use carb::reallocate() instead") void* reallocate(void* p, size_t size, size_t align = 0)
{
return carb::reallocate(p, size, align);
}
static_assert(kFrameworkVersion.major == 0, "Remove Framework::reallocate in next Framework version");
//! Retrieves the Carbonite SDK version string,
//!
//! @returns A string describing the current Carbonite SDK version. This will be the same value
//! as the @ref CARB_SDK_VERSION value that was set when the SDK was built.
//!
//! @note This version is intended for use in plugins. Since Carbonite plugins aren't directly
//! linked to the `carb` library, access to carbGetSdkVersion() isn't as easy as calling
//! a library function. This version just provides access to the same result from a
//! location that is better guaranteed accessible to plugins.
const char*(CARB_ABI* getSdkVersion)();
//! Adds a load hook that is called when an interface becomes available.
//!
//! No attempt is made to load the plugin. This can be used as a notification mechanism when a plugin cannot be
//! loaded immediately (due to circular dependencies for instance) but may be loaded later. To remove the load hook,
//! use \ref removeLoadHook(). It is possible to register multiple load hooks with the same parameters, but this is
//! not recommended and will cause the function to be called multiple times with the same parameters.
//!
//! See \ref LoadHookFn for a discussion on how and when load hooks are called.
//!
//! @see LoadHookFn removeLoadHook()
//! @tparam T The interface type
//! @param pluginName the name of the specific plugin desired that exposes \c T, or \c nullptr for any plugin.
//! @param func the \ref LoadHookFn to call when the given interface becomes available. This function may be called
//! multiple times if multiple plugins that expose interface \c T are loaded.
//! @param userData application-specific data that is supplied to \p func when it is called.
//! @returns A \ref LoadHookHandle uniquely identifying this load hook; \ref kInvalidLoadHook if an error occurs.
//! When finished with the load hook, call \ref removeLoadHook().
template <class T>
LoadHookHandle addLoadHook(const char* pluginName, LoadHookFn func, void* userData);
//! @private
LoadHookHandle(CARB_ABI* internalAddLoadHook)(
const InterfaceDesc& iface, const char* plugin, const char* clientName, LoadHookFn fn, void* user, bool add);
//! Removes a previously-registered load hook.
//!
//! It is safe to remove the load hook from within the load hook callback.
//!
//! @param handle The \ref LoadHookHandle returned from \ref addLoadHook().
//! @returns Returns \c true if the load hook was found and removed. If it was not found, \c false is returned.
bool(CARB_ABI* removeLoadHook)(LoadHookHandle handle);
//! Registers a client as a script binding or script language owner. Typically handled by CARB_BINDINGS().
//!
//! This function is used to notify the Carbonite framework of dependencies from a script language. This allows
//! proper dependency tracking and shutdown ordering. For instance, if a python binding loads an interface from
//! *carb.assets.plugin*, it appears to Carbonite that a non-plugin client requested the interface. However, if
//! python was started from *carb.scripting-python.plugin*, then it becomes necessary to establish a dependency
//! relationship between *carb.scripting-python.plugin* and any plugins loaded from python bindings. This function
//! has two purposes in this example: the *carb.scripting-python.plugin* will register itself as
//! \ref BindingType::Owner for @p scriptType `python`. All bindings automatically register themselves as
//! \ref BindingType::Binding for @p scriptType `python` through `CARB_BINDINGS()`. Whenever the binding acquires an
//! interface, all registered \ref BindingType::Owner clients gain a dependency on the acquired interface.
//!
//! @param type The \ref BindingType of \p clientName.
//! @param clientName A plugin or binding's client name (`g_carbClientName` typically created by `CARB_GLOBALS()` or
//! `CARB_BINDINGS()`).
//! @param scriptType A user-defined script type, such as "python" or "lua". Must match between owner and bindings.
//! Not case-sensitive.
void(CARB_ABI* registerScriptBinding)(BindingType type, const char* clientName, const char* scriptType);
//! The main framework access function for acquiring an interface.
//!
//! @note This function is generally not intended to be used directly; instead, consider one of the many type-safe
//! adapter functions such as \ref tryAcquireInterface().
//!
//! @warning This function will be `nullptr` in Carbonite releases prior to 135.0
//!
//! @param options The structure containing the options for acquiring the interface.
//! @returns The interface pointer for the interface being acquired. May be `nullptr` if the interface could not be
//! acquired. Verbose logging will explain the entire acquisition process. Warning and Error logs may be
//! produced depending on options.
void*(CARB_ABI* internalAcquireInterface)(const AcquireInterfaceOptions& options);
};
} // namespace carb
//! The client's name.
//!
//! A "client" can be one of the following in the Carbonite framework:
//!
//! - A plugin. Here the client name is the same as the plugin name.
//!
//! - An application.
//!
//! - Scripting bindings.
//!
//! Clients are uniquely identified by their name. Many functions accept client name as an argument. This allows @ref
//! carb::Framework to create a dependency tree of clients. This dependency tree allows the safe unloading of
//! plugins.
CARB_WEAKLINK CARB_HIDDEN const char* g_carbClientName;
//! Defines the client's global @ref carb::Framework pointer.
//!
//! Do not directly access this pointer. Rather use helper methods like @ref carb::getFramework() and @ref
//! carb::isFrameworkValid().
CARB_WEAKLINK CARB_HIDDEN carb::Framework* g_carbFramework;
//! Global symbol to enforce the use of CARB_GLOBALS() in Carbonite modules. Do not modify or use
//! this value.
//!
//! If there is an unresolved symbol linker error about this symbol (build time or run time), it
//! means that the CARB_GLOBALS() macro was not called at the global scope in the module. This
//! exists to ensure that all the global symbols related to each Carbonite module have been
//! properly defined and initialized.
extern bool g_needToCall_CARB_GLOBALS_atGlobalScope;
//! Defines global variables for use by Carbonite. Call this macro from the global namespace.
//!
//! Do not call this macro directly. Rather:
//!
//! - For applications, call @ref OMNI_APP_GLOBALS.
//!
//! - For Carbonite plugins, call @ref CARB_PLUGIN_IMPL.
//!
//! - For ONI plugins, call @ref OMNI_MODULE_GLOBALS.
#define CARB_FRAMEWORK_GLOBALS(clientName) \
CARB_HIDDEN bool g_needToCall_CARB_GLOBALS_atGlobalScope = carb::detail::setClientName(clientName);
namespace carb
{
namespace detail
{
//! Sets the client name for the calling module.
//!
//! @param[in] clientName A string literal containing the name of the calling plugin or
//! executable. This string must be guaranteed constant for the
//! lifetime of the module.
//! @returns `true`.
//!
//! @note This should not be called directly. This is called as part of CARB_FRAMEWORK_GLOBALS().
inline bool setClientName(const char* clientName)
{
g_carbClientName = clientName;
return true;
}
} // namespace detail
//! Gets the Carbonite framework.
//!
//! The @ref carb::Framework can be `nullptr` for applications if it hasn't acquired it (see @ref
//! carb::acquireFramework()). It can also be `nullptr` for a plugin if the plugin is used externally and was not loaded
//! by framework itself.
//!
//! After starting up, @ref carb::getFramework() can be considered a getter for a global singleton that is the @ref
//! carb::Framework.
//!
//! @return The Carbonite framework.
inline Framework* getFramework()
{
return g_carbFramework;
}
inline void Framework::loadPlugins(const PluginLoadingDesc& desc)
{
return this->loadPluginsEx(desc);
}
template <typename T>
T* Framework::verifyInterface(T* interfaceCandidate)
{
const auto desc = T::getInterfaceDesc();
return static_cast<T*>(getFramework()->verifyInterfaceEx(desc, interfaceCandidate));
}
template <typename T>
T* Framework::acquireInterface(const char* pluginName)
{
const char* clientName = g_needToCall_CARB_GLOBALS_atGlobalScope ? g_carbClientName : nullptr;
if (this->internalAcquireInterface)
return static_cast<T*>(this->internalAcquireInterface(
{ sizeof(AcquireInterfaceOptions), clientName, T::getInterfaceDesc(), eAIFDefaultType, pluginName }));
else
return static_cast<T*>(this->acquireInterfaceWithClient(clientName, T::getInterfaceDesc(), pluginName));
}
template <typename T>
T* Framework::tryAcquireInterface(const char* pluginName)
{
const char* clientName = g_needToCall_CARB_GLOBALS_atGlobalScope ? g_carbClientName : nullptr;
if (this->internalAcquireInterface)
return static_cast<T*>(
this->internalAcquireInterface({ sizeof(AcquireInterfaceOptions), clientName, T::getInterfaceDesc(),
AcquireInterfaceFlags(eAIFDefaultType | fAIFOptional), pluginName }));
else
return static_cast<T*>(this->tryAcquireInterfaceWithClient(clientName, T::getInterfaceDesc(), pluginName));
}
template <typename T>
T* Framework::acquireInterface(const void* pluginInterface)
{
const char* clientName = g_needToCall_CARB_GLOBALS_atGlobalScope ? g_carbClientName : nullptr;
if (this->internalAcquireInterface)
return static_cast<T*>(
this->internalAcquireInterface({ sizeof(AcquireInterfaceOptions), clientName, T::getInterfaceDesc(),
eAIFFromInterfaceType, pluginInterface }));
else
return static_cast<T*>(
this->acquireInterfaceFromInterfaceWithClient(clientName, T::getInterfaceDesc(), pluginInterface));
}
template <typename T>
T* Framework::tryAcquireInterface(const void* pluginInterface)
{
const char* clientName = g_needToCall_CARB_GLOBALS_atGlobalScope ? g_carbClientName : nullptr;
if (this->internalAcquireInterface)
return static_cast<T*>(this->internalAcquireInterface(
{ sizeof(AcquireInterfaceOptions), clientName, T::getInterfaceDesc(),
AcquireInterfaceFlags(eAIFFromInterfaceType | fAIFOptional), pluginInterface }));
else
return static_cast<T*>(
this->tryAcquireInterfaceFromInterfaceWithClient(clientName, T::getInterfaceDesc(), pluginInterface));
}
template <typename T>
T* Framework::acquireInterfaceFromLibrary(const char* libraryPath)
{
const char* clientName = g_needToCall_CARB_GLOBALS_atGlobalScope ? g_carbClientName : nullptr;
if (this->internalAcquireInterface)
return static_cast<T*>(this->internalAcquireInterface(
{ sizeof(AcquireInterfaceOptions), clientName, T::getInterfaceDesc(), eAIFFromLibraryType, libraryPath }));
else
return static_cast<T*>(
this->acquireInterfaceFromLibraryWithClient(clientName, T::getInterfaceDesc(), libraryPath));
}
template <typename T>
T* Framework::tryAcquireInterfaceFromLibrary(const char* libraryPath)
{
const char* clientName = g_needToCall_CARB_GLOBALS_atGlobalScope ? g_carbClientName : nullptr;
if (this->internalAcquireInterface)
return static_cast<T*>(
this->internalAcquireInterface({ sizeof(AcquireInterfaceOptions), clientName, T::getInterfaceDesc(),
AcquireInterfaceFlags(eAIFFromLibraryType | fAIFOptional), libraryPath }));
else
return static_cast<T*>(
this->tryAcquireInterfaceFromLibraryWithClient(clientName, T::getInterfaceDesc(), libraryPath));
}
template <typename T>
T* Framework::tryAcquireExistingInterface(const char* pluginName)
{
const char* clientName = g_needToCall_CARB_GLOBALS_atGlobalScope ? g_carbClientName : nullptr;
return this->internalAcquireInterface ?
static_cast<T*>(this->internalAcquireInterface(
{ sizeof(AcquireInterfaceOptions), clientName, T::getInterfaceDesc(),
AcquireInterfaceFlags(eAIFDefaultType | fAIFOptional | fAIFNoInitialize), pluginName })) :
nullptr;
}
template <typename T>
uint32_t Framework::getInterfacesCount()
{
const InterfaceDesc desc = T::getInterfaceDesc();
return this->getInterfacesCountEx(desc);
}
template <typename T>
void Framework::acquireInterfaces(T** interfaces, uint32_t interfacesSize)
{
const InterfaceDesc desc = T::getInterfaceDesc();
const char* clientName = g_needToCall_CARB_GLOBALS_atGlobalScope ? g_carbClientName : nullptr;
this->acquireInterfacesWithClient(clientName, desc, reinterpret_cast<void**>(interfaces), interfacesSize);
}
template <typename T>
void Framework::releaseInterface(T* pluginInterface)
{
(void)(T::getInterfaceDesc()); // Compile-time check that the type is plugin interface
const char* clientName = g_needToCall_CARB_GLOBALS_atGlobalScope ? g_carbClientName : nullptr;
this->releaseInterfaceWithClient(clientName, pluginInterface);
}
template <typename T>
void Framework::setDefaultPlugin(const char* pluginName)
{
const char* clientName = g_needToCall_CARB_GLOBALS_atGlobalScope ? g_carbClientName : nullptr;
this->setDefaultPluginEx(clientName, T::getInterfaceDesc(), pluginName);
}
template <typename T>
LoadHookHandle Framework::addLoadHook(const char* pluginName, LoadHookFn func, void* user)
{
const char* clientName = g_needToCall_CARB_GLOBALS_atGlobalScope ? g_carbClientName : nullptr;
return this->internalAddLoadHook(T::getInterfaceDesc(), pluginName, clientName, func, user, true);
}
} // namespace carb
#ifdef CARB_FREE_UNDEFINED
# pragma pop_macro("free")
# undef CARB_FREE_UNDEFINED
#endif
| 71,177 |
C
| 48.670621 | 120 | 0.694115 |
omniverse-code/kit/include/carb/BindingsPythonUtils.h
|
// Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "BindingsUtils.h"
#include "IObject.h"
#include "cpp/TypeTraits.h"
#include "cpp/Functional.h"
// Python uses these in modsupport.h, so undefine them now
#pragma push_macro("min")
#undef min
#pragma push_macro("max")
#undef max
CARB_IGNOREWARNING_MSC_WITH_PUSH(4668) // 'X' is not defined as a preprocessor macro, replacing with '0' for '#if/#elif'
#include <pybind11/chrono.h>
#include <pybind11/functional.h>
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
CARB_IGNOREWARNING_MSC_POP
#pragma pop_macro("min")
#pragma pop_macro("max")
namespace py = pybind11;
PYBIND11_DECLARE_HOLDER_TYPE(T, carb::ObjectPtr<T>, true);
// Provide simple implementations of types used in multiple bindings.
namespace carb
{
template <typename InterfaceType, typename ReturnType, typename... Args>
auto wrapInterfaceFunctionReleaseGIL(ReturnType (*InterfaceType::*p)(Args...))
-> std::function<ReturnType(InterfaceType&, Args...)>
{
return [p](InterfaceType& c, Args... args) {
py::gil_scoped_release g;
return (c.*p)(args...);
};
}
template <typename InterfaceType, typename ReturnType, typename... Args>
auto wrapInterfaceFunctionReleaseGIL(const InterfaceType* c, ReturnType (*InterfaceType::*p)(Args...))
-> std::function<ReturnType(Args...)>
{
return [c, p](Args... args) {
py::gil_scoped_release g;
return (c->*p)(args...);
};
}
template <typename InterfaceType, typename... PyClassArgs>
py::class_<InterfaceType, PyClassArgs...> defineInterfaceClass(py::module& m,
const char* className,
const char* acquireFuncName,
const char* releaseFuncName = nullptr,
const char* classDocstring = nullptr)
{
auto cls = classDocstring ? py::class_<InterfaceType, PyClassArgs...>(m, className, classDocstring) :
py::class_<InterfaceType, PyClassArgs...>(m, className);
m.def(acquireFuncName,
[](const char* pluginName, const char* libraryPath) {
return libraryPath ? acquireInterfaceFromLibraryForBindings<InterfaceType>(libraryPath) :
acquireInterfaceForBindings<InterfaceType>(pluginName);
},
py::arg("plugin_name") = nullptr, py::arg("library_path") = nullptr, py::return_value_policy::reference);
if (releaseFuncName)
{
m.def(releaseFuncName, [](InterfaceType* iface) { carb::getFramework()->releaseInterface(iface); });
}
return cls;
}
/**
* Assuming std::function will call into python code this function makes it safe.
* It wraps it into try/catch, acquires GIL lock and log errors.
*/
template <typename Sig, typename... ArgsT>
auto callPythonCodeSafe(const std::function<Sig>& fn, ArgsT&&... args)
{
using ReturnT = cpp::invoke_result_t<decltype(fn), ArgsT...>;
try
{
if (fn)
{
py::gil_scoped_acquire gilLock;
return fn(std::forward<ArgsT>(args)...);
}
}
catch (const py::error_already_set& e)
{
CARB_LOG_ERROR("%s", e.what());
}
catch (const std::runtime_error& e)
{
CARB_LOG_ERROR("%s", e.what());
}
return ReturnT();
}
/**
* Helper class implement scripting callbacks.
* It extends ScriptCallbackRegistry to provide facility to make safe calls of python callback. It adds GIL lock and
* error handling. ScriptCallbackRegistryPython::call can be passed into C API as C function, as long as FuncT* is
* passed into as userData.
*/
template <class KeyT, typename ReturnT, typename... Args>
class ScriptCallbackRegistryPython : public ScriptCallbackRegistry<KeyT, ReturnT, Args...>
{
public:
using typename ScriptCallbackRegistry<KeyT, ReturnT, Args...>::FuncT;
static ReturnT call(Args... args, void* userData)
{
return callTyped((FuncT*)userData, std::forward<Args>(args)...);
}
static ReturnT callTyped(FuncT* f, Args&&... args)
{
return callPythonCodeSafe(*f, std::forward<Args>(args)...);
}
};
/**
* Holds subscription for python in RAII way. Unsubscribe function is called when destroyed.
*/
class Subscription
{
public:
template <class Unsubscribe>
explicit Subscription(Unsubscribe&& unsubscribe) : m_unsubscribeFn(std::forward<Unsubscribe>(unsubscribe))
{
}
void unsubscribe()
{
if (m_unsubscribeFn)
{
m_unsubscribeFn();
m_unsubscribeFn = nullptr;
}
}
~Subscription()
{
unsubscribe();
}
private:
std::function<void()> m_unsubscribeFn;
};
template <class Ret, class... Args>
class PyAdapter
{
using Function = std::function<Ret(Args...)>;
Function m_func;
struct ScopedDestroy
{
PyAdapter* m_callable;
ScopedDestroy(PyAdapter* callable) : m_callable(callable)
{
}
~ScopedDestroy()
{
delete m_callable;
}
};
public:
PyAdapter(Function&& func) : m_func(std::move(func))
{
}
template <class... Args2>
auto call(Args2&&... args)
{
using ReturnType = cpp::invoke_result_t<Function, Args2...>;
try
{
py::gil_scoped_acquire gil;
if (m_func)
{
return cpp::invoke(std::move(m_func), std::forward<Args2>(args)...);
}
}
catch (const py::error_already_set& e)
{
CARB_LOG_ERROR("%s", e.what());
}
catch (const std::runtime_error& e)
{
CARB_LOG_ERROR("%s", e.what());
}
py::gil_scoped_acquire gil; // Hold the GIL while constructing whatever return type
return ReturnType();
}
// Direct adapter to Carbonite callback when userData is the last argument, the PyAdapter* is the userdata, and
// multiple calls to this adapter are desired. The adapter must be deleted with `delete` or `destroy()` later.
static auto adaptCallAndKeep(Args... args, void* user)
{
return static_cast<PyAdapter*>(user)->call(std::forward<Args>(args)...);
}
// Direct adapter to Carbonite callback when userData is the last argument, the PyAdapter* is the userdata, and
// there will be only one call to the adapter.
static auto adaptCallAndDestroy(Args... args, void* user)
{
PyAdapter* callable = static_cast<PyAdapter*>(user);
ScopedDestroy scopedDestroy(callable);
return callable->call(std::forward<Args>(args)...);
}
// Call the adapter with perfect forwarding and keep the adapter around for future calls.
template <class... Args2>
static auto callAndKeep(void* user, Args2&&... args)
{
return static_cast<PyAdapter*>(user)->call(std::forward<Args2>(args)...);
}
// Call the adapter with perfect forwarding and destroy the adapter.
template <class... Args2>
static auto callAndDestroy(void* user, Args2&&... args)
{
PyAdapter* callable = static_cast<PyAdapter*>(user);
ScopedDestroy scopedDestroy(callable);
return callable->call(std::forward<Args2>(args)...);
}
static void destroy(void* user)
{
delete static_cast<PyAdapter*>(user);
}
};
template <class Ret, class... Args>
std::unique_ptr<PyAdapter<Ret, Args...>> createPyAdapter(std::function<Ret(Args...)>&& func)
{
return std::make_unique<PyAdapter<Ret, Args...>>(std::move(func));
}
template <class Callback, class Subscribe, class Unsubscribe>
std::shared_ptr<Subscription> createPySubscription(Callback&& func, Subscribe&& subscribe, Unsubscribe&& unsub)
{
auto callable = createPyAdapter(std::forward<Callback>(func));
using Callable = typename decltype(callable)::element_type;
auto&& id = subscribe(Callable::adaptCallAndKeep, callable.get());
return std::make_shared<Subscription>(
[unsub = std::forward<Unsubscribe>(unsub), id = std::move(id), callable = callable.release()] {
unsub(id);
delete callable;
});
}
/**
* Set of helpers to pass std::function (from python bindings) in Carbonite interfaces.
* Deprecated: use PyAdapter instead via createPyAdapter()/createPySubscription()
*/
template <typename ReturnT, typename... ArgsT>
class FuncUtils
{
public:
using StdFuncT = std::function<ReturnT(ArgsT...)>;
using CallbackT = ReturnT (*)(ArgsT..., void*);
static ReturnT callPythonCodeSafe(const std::function<ReturnT(ArgsT...)>& fn, ArgsT... args)
{
return carb::callPythonCodeSafe(fn, args...);
}
static ReturnT callbackWithUserData(ArgsT... args, void* userData)
{
StdFuncT* fn = (StdFuncT*)userData;
if (fn)
return callPythonCodeSafe(*fn, args...);
else
return ReturnT();
}
static StdFuncT* createStdFuncCopy(const StdFuncT& fn)
{
return new StdFuncT(fn);
}
static void destroyStdFuncCopy(StdFuncT* fn)
{
delete fn;
}
/**
* If you have std::function which calls into python code and an interface with pair of subscribe/unsubscribe
* functions, this function:
* 1. Prolong lifetime of std::function (and thus python callable) by making copy of it on heap.
* 2. Subscribes to interface C-style subscribe function by passing this std::function as void* userData (and
* calling it back safely)
* 3. Wraps subscription id into Subscription class returned to python. Which holds subscription and
* automatically unsubscribes when dead.
*/
template <class SubscriptionT>
static std::shared_ptr<Subscription> buildSubscription(const StdFuncT& fn,
SubscriptionT (*subscribeFn)(CallbackT, void*),
void (*unsubscribeFn)(SubscriptionT))
{
StdFuncT* funcCopy = new StdFuncT(fn);
auto id = subscribeFn(callbackWithUserData, funcCopy);
auto subscription = std::make_shared<Subscription>([=]() {
unsubscribeFn(id);
delete funcCopy;
});
return subscription;
}
};
template <class T>
struct StdFuncUtils;
template <class R, class... Args>
struct StdFuncUtils<std::function<R(Args...)>> : public FuncUtils<R, Args...>
{
};
template <class R, class... Args>
struct StdFuncUtils<const std::function<R(Args...)>> : public FuncUtils<R, Args...>
{
};
template <class R, class... Args>
struct StdFuncUtils<const std::function<R(Args...)>&> : public FuncUtils<R, Args...>
{
};
/**
* Helper to wrap function that returns `IObject*` into the same function that returns stolen ObjectPtr<IObject> holder
*/
template <typename ReturnT, typename... Args>
std::function<ReturnT(Args...)> wrapPythonCallback(std::function<ReturnT(Args...)>&& c)
{
return [c = std::move(c)](Args... args) -> ReturnT { return callPythonCodeSafe(c, std::forward<Args>(args)...); };
}
} // namespace carb
#ifdef DOXYGEN_BUILD
/**
* Macro that allows disabling pybind's use of RTTI to perform duck typing.
*
* Given a pointer, pybind uses RTTI to figure out the actual type of the pointer (e.g. given an `IObject*`, RTTI can be
* used to figure out the pointer is really an `IWindow*`). once pybind knows the "real" type, is generates a PyObject
* that contains wrappers for all of the "real" types methods.
*
* Unfortunately, RTTI is compiler dependent (not @rstref{ABI-safe <abi-compatibility>}) and we've disabled it in much
* of our code.
*
* The `polymorphic_type_hook` specializations generated by this macro disables pybind from using RTTI to find the
* "real" type of a pointer. this mean that when using our bindings in Python, you have to "cast" objects to access a
* given interface. For example:
* ```python
* obj = func_that_returns_iobject()
* win = IWindow(obj) # a cast. None is returned if the cast fails.
* if win:
* win->title = "hi"
* ```
*
* As an aside, since implementations can implement multiple interfaces and the actual implementations are hidden to
* pybind (we create bindings for interfaces not implementations), the pybind "duck" typing approach was never going to
* work for us. Said differently, some sort of "cast to this interface" was inevitable.
* @param TYPE The type to disable Pythonic dynamic casting for.
*/
# define DISABLE_PYBIND11_DYNAMIC_CAST(TYPE)
#else
# define DISABLE_PYBIND11_DYNAMIC_CAST(TYPE) \
namespace pybind11 \
{ \
template <> \
struct polymorphic_type_hook<TYPE> \
{ \
static const void* get(const TYPE* src, const std::type_info*&) \
{ \
return src; \
} \
}; \
template <typename itype> \
struct polymorphic_type_hook< \
itype, \
detail::enable_if_t<std::is_base_of<TYPE, itype>::value && !std::is_same<TYPE, itype>::value>> \
{ \
static const void* get(const TYPE* src, const std::type_info*&) \
{ \
return src; \
} \
}; \
}
#endif
DISABLE_PYBIND11_DYNAMIC_CAST(carb::IObject)
| 15,665 |
C
| 36.568345 | 120 | 0.557229 |
omniverse-code/kit/include/carb/RenderingTypes.h
|
// Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "Types.h"
namespace carb
{
/**
* Defines a resource format.
*/
enum class Format
{
eUnknown,
eR8_UNORM,
eR8_SNORM,
eR8_UINT,
eR8_SINT,
eRG8_UNORM,
eRG8_SNORM,
eRG8_UINT,
eRG8_SINT,
eBGRA8_UNORM,
eBGRA8_SRGB,
eRGBA8_UNORM,
eRGBA8_SNORM,
eRGBA8_UINT,
eRGBA8_SINT,
eRGBA8_SRGB,
eR16_UNORM,
eR16_SNORM,
eR16_UINT,
eR16_SINT,
eR16_SFLOAT,
eRG16_UNORM,
eRG16_SNORM,
eRG16_UINT,
eRG16_SINT,
eRG16_SFLOAT,
eRGBA16_UNORM,
eRGBA16_SNORM,
eRGBA16_UINT,
eRGBA16_SINT,
eRGBA16_SFLOAT,
eR32_UINT,
eR32_SINT,
eR32_SFLOAT,
eRG32_UINT,
eRG32_SINT,
eRG32_SFLOAT,
eRGB32_UINT,
eRGB32_SINT,
eRGB32_SFLOAT,
eRGBA32_UINT,
eRGBA32_SINT,
eRGBA32_SFLOAT,
eR10_G10_B10_A2_UNORM,
eR10_G10_B10_A2_UINT,
eR11_G11_B10_UFLOAT,
eR9_G9_B9_E5_UFLOAT,
eB5_G6_R5_UNORM,
eB5_G5_R5_A1_UNORM,
eBC1_RGBA_UNORM,
eBC1_RGBA_SRGB,
eBC2_RGBA_UNORM,
eBC2_RGBA_SRGB,
eBC3_RGBA_UNORM,
eBC3_RGBA_SRGB,
eBC4_R_UNORM,
eBC4_R_SNORM,
eBC5_RG_UNORM,
eBC5_RG_SNORM,
eBC6H_RGB_UFLOAT,
eBC6H_RGB_SFLOAT,
eBC7_RGBA_UNORM,
eBC7_RGBA_SRGB,
eD16_UNORM,
eD24_UNORM_S8_UINT,
eD32_SFLOAT,
eD32_SFLOAT_S8_UINT_X24,
// Formats for depth-stencil views
eR24_UNORM_X8,
eX24_R8_UINT,
eX32_R8_UINT_X24,
eR32_SFLOAT_X8_X24,
// Formats for sampler-feedback
eSAMPLER_FEEDBACK_MIN_MIP,
eSAMPLER_FEEDBACK_MIP_REGION_USED,
// Little-Endian Formats
eABGR8_UNORM,
eABGR8_SRGB,
// Must be last
eCount
};
/**
* Defines a sampling count for a resource.
*/
enum class SampleCount
{
e1x,
e2x,
e4x,
e8x,
e16x,
e32x,
e64x
};
/**
* Defines the presentation mode for the rendering system.
*/
enum class PresentMode : uint8_t
{
eNoTearing, //!< No tearing.
eAllowTearing //!< Allow tearing.
};
/**
* Defines a descriptor for clearing color values.
*/
union ClearColorValueDesc
{
Color<float> rgba32f;
Color<uint32_t> rgba32ui;
Color<int32_t> rgba32i;
};
/**
* Defines a descriptor for clearing depth-stencil values.
*/
struct ClearDepthStencilValueDesc
{
float depth;
uint32_t stencil;
};
enum class TextureGamma
{
eDefault, ///< treat as linear for HDR formats, as sRGB for LDR formats (use e*_SRGB tex format or convert on load)
eLinear, ///< treat as linear, leaves data unchanged
eSRGB, ///< treat as sRGB, (use e*_SRGB texture format or convert on load)
eCount
};
} // namespace carb
| 3,073 |
C
| 18.832258 | 119 | 0.649203 |
omniverse-code/kit/include/carb/FrameworkUtils.h
|
// Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "Framework.h"
#include "extras/Path.h"
#include <string>
#include <unordered_set>
#include <vector>
namespace carb
{
/**
* Get all registered plugins and collect folders they are located in.
*/
inline std::unordered_set<std::string> getPluginFolders()
{
Framework* framework = carb::getFramework();
std::vector<PluginDesc> plugins(framework->getPluginCount());
framework->getPlugins(plugins.data());
std::unordered_set<std::string> folders;
for (const auto& desc : plugins)
{
extras::Path p(desc.libPath);
const std::string& folder = p.getParent();
if (!folder.empty())
{
folders.insert(folder);
}
}
return folders;
}
} // namespace carb
| 1,188 |
C
| 27.999999 | 77 | 0.700337 |
omniverse-code/kit/include/carb/PluginUtils.h
|
// Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//!
//! @brief Utilities to ease the creation of Carbonite plugins.
#pragma once
#include "ClientUtils.h"
#include "PluginInitializers.h"
#include "PluginCoreUtils.h"
namespace omni
{
namespace structuredlog
{
void addModulesSchemas() noexcept;
}
} // namespace omni
//! Plugin helper macro to define boiler-plate code to register and unregister the plugin with various other components
//! in the system (e.g. logging channels, profiler, localization, etc.).
//!
//! Do not directly call this macro, rather call @ref CARB_PLUGIN_IMPL() which will call this macro for you.
#define CARB_DEFAULT_INITIALIZERS() \
CARB_EXPORT void carbOnPluginPreStartup() \
{ \
carb::pluginInitialize(); \
omni::structuredlog::addModulesSchemas(); \
} \
\
CARB_EXPORT void carbOnPluginPostShutdown() \
{ \
carb::pluginDeinitialize(); \
}
//! Main macro to declare a plugin implementation where multiple interface versions are not required.
//!
//! Authors of Carbonite plugins must use this macro in exactly one compilation unit for their plugin to generate code
//! expected by the Carbonite framework.
//!
//! @note Carbonite plugins can provide multiple versions of an interface to remain backwards compatible with apps and
//! modules that are built against earlier versions of plugins. In order to do this, see \ref CARB_PLUGIN_IMPL_EX.
//!
//! In particular, this macro:
//!
//! - Defines global variables, such as @ref g_carbFramework.
//!
//! - Registers a default logging channel with @ref omni::log::ILog.
//!
//! - Adds boiler-plate code for @oni_overview interop.
//!
//! - Adds boiler-plate code for plugin startup, shutdown, and registration. (See @carb_framework_overview for more
//! information).
//!
//! This macro must be used in the global namespace. A @ref carb::PluginImplDesc must be provided as well as all
//! interfaces exported by this plugin. Each interface must be declared with @ref CARB_PLUGIN_INTERFACE. There must also
//! exist a @ref fillInterface(InterfaceType&) function for each interface type that is exported by this plugin.
//! A trailing semicolon is optional.
//!
//! Example:
//! @code{.cpp}
//! // Plugin Implementation Descriptor
//! const carb::PluginImplDesc kPluginImpl{ "carb.windowing-glfw.plugin", "Windowing (glfw).", "NVIDIA",
//! carb::PluginHotReload::eDisabled, "dev" };
//!
//! // Generate boilerplate code
//! CARB_PLUGIN_IMPL(kPluginImpl, carb::windowing::IWindowing, carb::windowing::IGLContext)
//!
//! // Construct the carb::windowing::IWindowing interface
//! void fillInterface(carb::windowing::IWindowing& iface) { /* ... */ }
//!
//! // Construct the carb::windowing::IGLContext interface
//! void fillInterface(carb::windowing::IGLContext& iface) { /* ... */ }
//! @endcode
//!
//! See @carb_framework_overview and @carb_interfaces for more information on creating Carbonite plugins.
//!
//! @param impl The @ref carb::PluginImplDesc constant to be used as plugin description.
//!
//! @param ... One or more interface types to be implemented by the plugin. An interface is a `struct` or `class` with
//! a use of @ref CARB_PLUGIN_INTERFACE() inside it. These interface types are constructed by a global function
//! @ref fillInterface(InterfaceType&) that must exist in the plugin. See @ref fillInterface(InterfaceType&) for more
//! information about interface construction and destruction.
#define CARB_PLUGIN_IMPL(impl, ...) \
CARB_GLOBALS_EX(impl.name, impl.description) \
OMNI_MODULE_GLOBALS_FOR_PLUGIN() \
CARB_PLUGIN_IMPL_WITH_INIT_0_5(impl, __VA_ARGS__) /* for backwards compatibility */ \
CARB_PLUGIN_IMPL_WITH_INIT(impl, __VA_ARGS__) \
CARB_DEFAULT_INITIALIZERS()
//! Main macro to declare a plugin implementation where multiple interface versions are required.
//!
//! Authors of Carbonite plugins must use this macro in exactly one compilation unit for their plugin to generate code
//! expected by the Carbonite framework.
//!
//! @note This implementation macro allows Carbonite plugins to provide multiple versions of an interface in order to
//! remain backwards compatible with apps and modules that are built against earlier versions of plugins. Every
//! interface exported by the plugin must have a @ref fillInterface(carb::Version*, void*) function.
//!
//! In particular, this macro:
//!
//! - Defines global variables, such as @ref g_carbFramework.
//!
//! - Registers a default logging channel with @ref omni::log::ILog.
//!
//! - Adds boiler-plate code for @oni_overview interop.
//!
//! - Adds boiler-plate code for plugin startup, shutdown, and registration. (See @carb_framework_overview for more
//! information).
//!
//! This macro must be used in the global namespace. A @ref carb::PluginImplDesc must be provided as well as all
//! interfaces exported by this plugin. Each interface must be declared with @ref CARB_PLUGIN_INTERFACE. There must also
//! exist a @ref fillInterface(carb::Version*, void*) function for each interface type that is exported by this plugin.
//! A trailing semicolon is optional.
//!
//! Example:
//! @code{.cpp}
//! // Plugin Implementation Descriptor
//! const carb::PluginImplDesc kPluginImpl{ "carb.windowing-glfw.plugin", "Windowing (glfw).", "NVIDIA",
//! carb::PluginHotReload::eDisabled, "dev" };
//!
//! // Generate boilerplate code
//! CARB_PLUGIN_IMPL_EX(kPluginImpl, carb::windowing::IWindowing, carb::windowing::IGLContext)
//!
//! // Construct the carb::windowing::IWindowing interface
//! template <> void fillInterface<carb::windowing::IWindowing>(carb::Version* v, void* iface) { /* ... */ }
//!
//! // Construct the carb::windowing::IGLContext interface
//! template <> void fillInterface<carb::windowing::IGLContext>(carb::Version* v, void* iface) { /* ... */ }
//! @endcode
//!
//! See @carb_framework_overview and @carb_interfaces for more information on creating Carbonite plugins.
//!
//! @param impl The @ref carb::PluginImplDesc constant to be used as plugin description.
//!
//! @param ... One or more interface types to be implemented by the plugin. An interface is a `struct` or `class` with
//! a use of @ref CARB_PLUGIN_INTERFACE() inside it. These interface types are constructed by a global explicitly-
//! specialized template function @ref fillInterface(carb::Version*, void*) that must exist in the plugin. See
//! @ref fillInterface(carb::Version*, void*) for more information about interface construction and destruction.
#define CARB_PLUGIN_IMPL_EX(impl, ...) \
CARB_GLOBALS_EX(impl.name, impl.description) \
OMNI_MODULE_GLOBALS_FOR_PLUGIN() \
CARB_PLUGIN_IMPL_WITH_INIT_EX(impl, __VA_ARGS__) \
CARB_PLUGIN_IMPL_WITH_INIT_0_5_EX(impl, __VA_ARGS__) /* for backwards compatibility */ \
CARB_DEFAULT_INITIALIZERS()
/**
* Macros to declare a plugin implementation dependencies.
*
* If a plugin lists an interface "A" as dependency it is guaranteed that `carb::Framework::acquireInterface<A>()` call
* will return it, otherwise it can return `nullptr`. The Framework checks and resolves all dependencies before loading
* the plugin. If the dependency cannot be loaded (i.e. no plugin satisfies the interface, or a circular load is
* discovered) then the plugin will fail to load and `nullptr` will be returned from the
* carb::Framework::acquireInterface() function.
*
* @note Circular dependencies can exist as long as they are not stated in the CARB_PLUGIN_IMPL_DEPS() macros. For
* instance, assume plugins *Alpha*, *Beta*, and *Gamma*. *Alpha* is dependent on *Beta*; *Beta* is dependent on
* *Gamma*. *Gamma* is dependent on *Alpha*, but cannot list *Alpha* in its CARB_PLUGIN_IMPL_DEPS() macro, nor
* attempt to acquire and use it in *Gamma*'s carbOnPluginStartup() function. At a later point from within *Gamma*, the
* desired interface from *Alpha* may be acquired and used. However, in terms of unload order, *Alpha* will be unloaded
* first, followed by *Beta* and finally *Gamma*. In this case the *Gamma* carbOnPluginShutdown() function must account
* for the fact that *Alpha* will already be unloaded.
*
* @param ... One or more interface types (e.g. `carb::settings::ISettings`) to list as dependencies for this plugin.
*/
#define CARB_PLUGIN_IMPL_DEPS(...) \
template <typename... Types> \
static void getPluginDepsTyped(struct carb::InterfaceDesc** deps, size_t* count) \
{ \
static carb::InterfaceDesc depends[] = { Types::getInterfaceDesc()... }; \
*deps = depends; \
*count = sizeof(depends) / sizeof(depends[0]); \
} \
\
CARB_EXPORT void carbGetPluginDeps(struct carb::InterfaceDesc** deps, size_t* count) \
{ \
getPluginDepsTyped<__VA_ARGS__>(deps, count); \
}
/**
* Macro to declare a plugin without dependencies.
*
* Calling this macro is not required if there are no dependencies. This macro exists to make your plugin more
* readable.
*/
#define CARB_PLUGIN_IMPL_NO_DEPS() \
CARB_EXPORT void carbGetPluginDeps(struct carb::InterfaceDesc** deps, size_t* count) \
{ \
*deps = nullptr; \
*count = 0; \
}
/**
* Macro to declare a "minimal" plugin.
*
* Plugins in the Carbonite ecosystem tend to depend on other plugins. For example, plugins often want to access
* Carbonite's logging system via @ref carb::logging::ILogging. When calling @ref CARB_PLUGIN_IMPL, boiler-plate code
* is injected to ensure the plugin can use these "common" plugins.
*
* This macro avoids taking dependencies on these "common" plugins. When calling this macro, only the "minimal" boiler
* plate code is generated in order for the plugin to work. It's up to the developer to add additional code to make the
* plugin compatible with any desired "common" plugin.
*
* Use of this macro is rare in Omniverse.
*/
#define CARB_PLUGIN_IMPL_MINIMAL(impl, ...) \
CARB_FRAMEWORK_GLOBALS(kPluginImpl.name) \
CARB_PLUGIN_IMPL_WITH_INIT(impl, __VA_ARGS__)
| 13,568 |
C
| 61.819444 | 120 | 0.549455 |
omniverse-code/kit/include/carb/Version.h
|
// Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//!
//! @brief Utilities for Carbonite version.
#pragma once
#include <cinttypes>
#include <cstdint>
#include <cstdio>
#include <type_traits>
// Note: Ideally this would be in Defines.h, but there is a weird circular dependency:
// Defines.h -> assert/IAssert.h -> Interface.h -> Version.h
//! A macro to ensure interop safety by assertion
//!
//! In order to have @rstref{interop safety <abi-compatibility>} a type must be
//! <a href="https://en.cppreference.com/w/cpp/named_req/TriviallyCopyable">trivially-copyable</a> and conform to
//! <a href="https://en.cppreference.com/w/cpp/named_req/StandardLayoutType">StandardLayoutType</a>.
//! @param ... The Type to check
#define CARB_ASSERT_INTEROP_SAFE(...) \
static_assert(std::is_standard_layout<__VA_ARGS__>::value, "Must have standard layout to be interop safe"); \
static_assert(std::is_trivially_copyable<__VA_ARGS__>::value, "Must be trivially copyable to be interop safe")
namespace carb
{
/**
* Defines a version consisting of a major and minor version.
*/
struct Version
{
uint32_t major; //!< The major version.
uint32_t minor; //!< The minor version.
};
CARB_ASSERT_INTEROP_SAFE(Version);
/**
* Less-than comparison operator.
*
* Compares two versions and reports true if the left version is lower than the right.
*
* @note The major and minor versions are compared independently. While the \a number `1.11` is less than the \a number
* `1.9`, \a version `1.11` is considered to be higher, so `Version{ 1, 9 } < Version{ 1, 11 }` would be `true`.
* @param lhs The version on the left side of the operation
* @param rhs The version on the right side of the operation
* @returns `true` if \p lhs is a lower version than \p rhs; `false` otherwise.
*/
constexpr bool operator<(const Version& lhs, const Version& rhs) noexcept
{
if (lhs.major == rhs.major)
{
return lhs.minor < rhs.minor;
}
return lhs.major < rhs.major;
}
/**
* Less-than-or-equal comparison operator.
*
* Compares two versions and reports true if the left version is lower than or equal to the right.
*
* @note The major and minor versions are compared independently. While the \a number `1.11` is less than the \a number
* `1.9`, \a version `1.11` is considered to be higher, so `Version{ 1, 9 } <= Version{ 1, 11 }` would be `true`.
* @param lhs The version on the left side of the operation
* @param rhs The version on the right side of the operation
* @returns `true` if \p lhs is a version that is lower than or equal to \p rhs; `false` otherwise.
*/
constexpr bool operator<=(const Version& lhs, const Version& rhs) noexcept
{
if (lhs.major == rhs.major)
{
return lhs.minor <= rhs.minor;
}
return lhs.major < rhs.major;
}
/**
* Equality operator.
*
* Compares two versions and reports true if the left version and the right version are equal.
*
* @param lhs The version on the left side of the operation
* @param rhs The version on the right side of the operation
* @returns `true` if \p lhs is equal to \p rhs; `false` otherwise.
*/
constexpr bool operator==(const Version& lhs, const Version& rhs) noexcept
{
return lhs.major == rhs.major && lhs.minor == rhs.minor;
}
/**
* Inequality operator.
*
* Compares two versions and reports true if the left version and the right version are not equal.
*
* @param lhs The version on the left side of the operation
* @param rhs The version on the right side of the operation
* @returns `true` if \p lhs is not equal to \p rhs; `false` otherwise.
*/
constexpr bool operator!=(const Version& lhs, const Version& rhs) noexcept
{
return !(lhs == rhs);
}
/**
* Checks two versions to see if they are semantically compatible.
*
* For more information on semantic versioning, see https://semver.org/.
*
* @warning A major version of `0` is considered to be the "development/experimental" version and `0.x` minor versions
* may be but are not required to be compatible with each other. This function will consider \p minimum version `0.x` to
* be semantically compatible to different \p candidate version `0.y`, but will emit a warning to `stderr` if a \p name
* is provided.
*
* @param name An optional name that, if provided, will enable the warning message to `stderr` for `0.x` versions
* mentioned above.
* @param minimum The minimum version required. This is typically the version being tested.
* @param candidate The version offered. This is typically the version being tested against.
* @retval true If \p minimum and \p candidate share the same major version and \p candidate has a minor version that is
* greater-than or equal to the minor version in \p minimum.
* @retval false If \p minimum and \p candidate have different major versions or \p candidate has a minor version that
* is lower than the minor version requested in \p minimum.
*/
inline bool isVersionSemanticallyCompatible(const char* name, const Version& minimum, const Version& candidate)
{
if (minimum.major != candidate.major)
{
return false;
}
else if (minimum.major == 0)
{
// Need to special case when major is equal but zero, then any difference in minor makes them
// incompatible. See http://semver.org for details.
// the case of version 0.x (major of 0), we are only going to "warn" the user of possible
// incompatibility when a user asks for 0.x and we have an implementation 0.y (where y > x).
// see https://nvidia-omniverse.atlassian.net/browse/CC-249
if (minimum.minor > candidate.minor)
{
return false;
}
else if (minimum.minor < candidate.minor && name)
{
// using CARB_LOG maybe pointless, as logging may not be set up yet.
fprintf(stderr,
"Warning: Possible version incompatibility. Attempting to load %s with version v%" PRIu32
".%" PRIu32 " against v%" PRIu32 ".%" PRIu32 ".\n",
name, candidate.major, candidate.minor, minimum.major, minimum.minor);
}
}
else if (minimum.minor > candidate.minor)
{
return false;
}
return true;
}
} // namespace carb
| 6,741 |
C
| 39.614458 | 120 | 0.680463 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.