file_path
stringlengths 21
202
| content
stringlengths 12
1.02M
| size
int64 12
1.02M
| lang
stringclasses 9
values | avg_line_length
float64 3.33
100
| max_line_length
int64 10
993
| alphanum_fraction
float64 0.27
0.93
|
---|---|---|---|---|---|---|
omniverse-code/kit/exts/omni.graph.action/omni/graph/action/ogn/nodes/OgnOnce.cpp | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#include <OgnOnceDatabase.h>
#include <omni/graph/action/IActionGraph.h>
namespace omni
{
namespace graph
{
namespace action
{
class OgnOnce
{
bool m_open {false};
public:
static bool compute(OgnOnceDatabase& db)
{
auto iActionGraph = getInterface();
auto& state = db.internalState<OgnOnce>();
bool isReset = iActionGraph->getExecutionEnabled(inputs::reset.token(), db.getInstanceIndex());
if (isReset)
{
state.m_open = false;
return true;
}
if (!state.m_open)
{
state.m_open = true;
iActionGraph->setExecutionEnabled(outputs::once.token(), db.getInstanceIndex());
}
else
{
iActionGraph->setExecutionEnabled(outputs::after.token(), db.getInstanceIndex());
}
return true;
}
};
REGISTER_OGN_NODE()
} // action
} // graph
} // omni
| 1,356 | C++ | 24.129629 | 103 | 0.653392 |
omniverse-code/kit/exts/omni.graph.action/omni/graph/action/ogn/nodes/OgnOnStageEvent.py | """
This is the implementation of the OGN node defined in OgnOnStageEvent.ogn
"""
from contextlib import suppress
import carb
import omni.graph.core as og
import omni.kit.app
import omni.usd
from omni.graph.action import get_interface
from omni.graph.action.ogn.OgnOnStageEventDatabase import OgnOnStageEventDatabase
# global map of the token to python enum, should match the allowedTokens
event_name_to_enum = {
"Assets Loaded": omni.usd.StageEventType.ASSETS_LOADED,
"Assets Load Aborted": omni.usd.StageEventType.ASSETS_LOAD_ABORTED,
"Closed": omni.usd.StageEventType.CLOSED,
"Closing": omni.usd.StageEventType.CLOSING,
"Gizmo Tracking Changed": omni.usd.StageEventType.GIZMO_TRACKING_CHANGED,
"MDL Param Loaded": omni.usd.StageEventType.MDL_PARAM_LOADED,
"Opened": omni.usd.StageEventType.OPENED,
"Open Failed": omni.usd.StageEventType.OPEN_FAILED,
"Saved": omni.usd.StageEventType.SAVED,
"Save Failed": omni.usd.StageEventType.SAVE_FAILED,
"Selection Changed": omni.usd.StageEventType.SELECTION_CHANGED,
"Hierarchy Changed": omni.usd.StageEventType.HIERARCHY_CHANGED,
"Settings Loaded": omni.usd.StageEventType.SETTINGS_LOADED,
"Settings Saving": omni.usd.StageEventType.SETTINGS_SAVING,
"OmniGraph Start Play": omni.usd.StageEventType.OMNIGRAPH_START_PLAY,
"OmniGraph Stop Play": omni.usd.StageEventType.OMNIGRAPH_STOP_PLAY,
"Simulation Start Play": omni.usd.StageEventType.SIMULATION_START_PLAY,
"Simulation Stop Play": omni.usd.StageEventType.SIMULATION_STOP_PLAY,
"Animation Start Play": omni.usd.StageEventType.ANIMATION_START_PLAY,
"Animation Stop Play": omni.usd.StageEventType.ANIMATION_STOP_PLAY,
}
class OgnOnStageEventInternalState:
"""Convenience class for maintaining per-node state information"""
def __init__(self):
"""Instantiate the per-node state information."""
# This subscription object controls the lifetime of our callback, we will clean it up upon release
self.sub = None
# Set when the callback has triggered
self.is_set = False
# The last payload received
self.payload: carb.dictionary.Item = None
# The event_name we used to subscribe
self.sub_event_type = ""
# The node instance handle
self.node = None
# Counter to determine when an animation pause was detected
self._was_paused = 0
# cache of the int-valued event types
self._stop_play_events = [
int(e)
for e in (
omni.usd.StageEventType.OMNIGRAPH_STOP_PLAY,
omni.usd.StageEventType.SIMULATION_STOP_PLAY,
omni.usd.StageEventType.ANIMATION_STOP_PLAY,
)
]
self._start_play_events = [
int(e)
for e in (
omni.usd.StageEventType.OMNIGRAPH_START_PLAY,
omni.usd.StageEventType.SIMULATION_START_PLAY,
omni.usd.StageEventType.ANIMATION_START_PLAY,
)
]
self._timeline = omni.timeline.get_timeline_interface()
def _on_stage_event(self, e: carb.events.IEvent):
"""The event callback"""
if e is None:
return
# Maintain book-keeping for pause/resume so we can ignore them
if e.type in self._stop_play_events:
is_stopped = self._timeline.is_stopped()
if not is_stopped:
# This is actually a PAUSE, because the timeline is not stopped
self._was_paused += 1
return
elif (e.type in self._start_play_events) and (self._was_paused > 0):
# This is actually an UNPAUSE, because we previously detected a PAUSE
self._was_paused -= 1
return
if e.type != int(self.sub_event_type):
return
self.is_set = True
self.payload = e.payload
# Tell the evaluator we need to be computed
if self.node.is_valid():
self.node.request_compute()
def first_time_subscribe(self, node: og.Node, event_type: omni.usd.StageEventType) -> bool:
"""Checked call to set up carb subscription
Args:
node: The node instance
event_type: The stage event type
Returns:
True if we subscribed, False if we are already subscribed
"""
if self.sub is not None and self.sub_event_type != event_type:
# event name changed since we last subscribed, unsubscribe
self.sub.unsubscribe()
self.sub = None
if self.sub is None:
# Add a subscription for the given event type. This is a pop subscription, so we expect a 1-frame
# lag between send and receive
self.sub = (
omni.usd.get_context()
.get_stage_event_stream()
.create_subscription_to_pop(
self._on_stage_event, name=f"omni.graph.action.__onstageevent.{node.node_id()}"
)
)
self.sub_event_type = event_type
self.node = node
return True
return False
def try_pop_event(self):
"""Pop the payload of the last event received, or None if there is no event to pop"""
if self.is_set:
self.is_set = False
payload = self.payload
self.payload = None
return payload
return None
# ======================================================================
class OgnOnStageEvent:
@staticmethod
def internal_state():
"""Returns an object that will contain per-node state information"""
return OgnOnStageEventInternalState()
@staticmethod
def release(node):
# Unsubscribe right away instead of waiting for GC cleanup, we don't want our callback firing
# after the node has been released.
with suppress(og.OmniGraphError):
state = OgnOnStageEventDatabase.per_node_internal_state(node)
if state.sub:
state.sub.unsubscribe()
state.sub = None
@staticmethod
def compute(db) -> bool:
event_name = db.inputs.eventName
if not event_name:
return True
state = db.internal_state
# Check the validity of the input
try:
event_type = event_name_to_enum[event_name]
except KeyError:
db.log_error(f"{event_name} is not a recognized Stage Event")
return False
if state.first_time_subscribe(db.node, event_type):
return True
payload = state.try_pop_event()
# Drop events if we are disabled, unless we are a 'stop' event - this is a special case because STOP only comes
# after we are no longer playing.
if (
db.inputs.onlyPlayback
and (not db.node.get_graph().get_default_graph_context().get_is_playing())
and (
event_type
not in (
omni.usd.StageEventType.OMNIGRAPH_STOP_PLAY,
omni.usd.StageEventType.SIMULATION_STOP_PLAY,
omni.usd.StageEventType.ANIMATION_STOP_PLAY,
)
)
):
return True
if payload is None:
return True
get_interface().set_execution_enabled("outputs:execOut")
return True
# ----------------------------------------------------------------------------
@staticmethod
def update_node_version(context: og.GraphContext, node: og.Node, old_version: int, new_version: int):
if old_version < new_version and old_version < 2:
# We added inputs:onlyPlayback default true - to maintain previous behavior we should set this to false
node.create_attribute(
"inputs:onlyPlayback",
og.Type(og.BaseDataType.BOOL),
og.AttributePortType.ATTRIBUTE_PORT_TYPE_INPUT,
False,
)
return True
| 8,061 | Python | 37.390476 | 119 | 0.602531 |
omniverse-code/kit/exts/omni.graph.action/omni/graph/action/ogn/nodes/OgnOnMouseInput.cpp | // Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#include "ActionNodeCommon.h"
#include <carb/input/IInput.h>
#include <carb/input/InputTypes.h>
#include <omni/graph/action/IActionGraph.h>
#include <omni/kit/IAppWindow.h>
#include <OgnOnMouseInputDatabase.h>
#include <thread>
using namespace carb::input;
namespace omni
{
namespace graph
{
namespace action
{
// Three buttons, normalized or absolute movements and scroll
constexpr size_t s_numNames = 6;
static std::array<NameToken, s_numNames> s_elementTokens;
class OgnOnMouseInput
{
public:
SubscriptionId m_mouseEventSubsId{ 0 };
MouseEvent m_mouseEvent;
exec::unstable::Stamp m_elementSetStamp; // The stamp set by the authoring node when the event occurs
exec::unstable::SyncStamp m_elementSetSyncStamp; // The stamp used by each instance
static bool onMouseEvent(const InputEvent& e, void* userData)
{
if (e.deviceType != DeviceType::eMouse)
{
return false;
}
NodeHandle nodeHandle = reinterpret_cast<NodeHandle>(userData);
auto iNode = carb::getCachedInterface<omni::graph::core::INode>();
NodeObj nodeObj = iNode->getNodeFromHandle(nodeHandle);
if (!nodeObj.isValid())
return false;
auto& authoringState = OgnOnMouseInputDatabase::sInternalState<OgnOnMouseInput>(nodeObj, kAuthoringGraphIndex);
authoringState.m_elementSetStamp.next();
authoringState.m_mouseEvent = e.mouseEvent;
iNode->requestCompute(nodeObj);
return true;
}
static void initialize(const GraphContextObj& context, const NodeObj& nodeObj)
{
// First time initialization will fill up the token array
[[maybe_unused]] static bool callOnce = ([]
{ s_elementTokens = {
OgnOnMouseInputDatabase::tokens.LeftButton,
OgnOnMouseInputDatabase::tokens.MiddleButton,
OgnOnMouseInputDatabase::tokens.RightButton,
OgnOnMouseInputDatabase::tokens.NormalizedMove,
OgnOnMouseInputDatabase::tokens.PixelMove,
OgnOnMouseInputDatabase::tokens.Scroll
};
} (), true);
omni::kit::IAppWindow* appWindow = omni::kit::getDefaultAppWindow();
if (!appWindow)
return;
// TODO: We may want to change the mouse into a camera one instead of appWindow one.
Mouse* mouse = appWindow->getMouse();
if (!mouse)
return;
IInput* input = carb::getCachedInterface<IInput>();
if (!input)
return;
auto& authoringState = OgnOnMouseInputDatabase::sInternalState<OgnOnMouseInput>(nodeObj, kAuthoringGraphIndex);
// Someone will consume the mouse event so cannot subscribe this as the last one.
// TODO: Is it good to subscribe this to the front?
authoringState.m_mouseEventSubsId =
input->subscribeToInputEvents((carb::input::InputDevice*)mouse, kEventTypeAll, onMouseEvent,
reinterpret_cast<void*>(nodeObj.nodeHandle), kSubscriptionOrderFirst);
}
static void release(const NodeObj& nodeObj)
{
IInput* input = carb::getCachedInterface<IInput>();
if (!input)
return;
auto const& authoringState = OgnOnMouseInputDatabase::sInternalState<OgnOnMouseInput>(nodeObj, kAuthoringGraphIndex);
if (authoringState.m_mouseEventSubsId > 0)
{
input->unsubscribeToInputEvents(authoringState.m_mouseEventSubsId);
}
}
static bool compute(OgnOnMouseInputDatabase& db)
{
if (checkNodeDisabledForOnlyPlay(db))
return true;
auto const& authoringState =
OgnOnMouseInputDatabase::sInternalState<OgnOnMouseInput>(db.abi_node(), kAuthoringGraphIndex);
auto& localState = db.internalState<OgnOnMouseInput>();
if (localState.m_elementSetSyncStamp.makeSync(authoringState.m_elementSetStamp))
{
NameToken const& elementIn = db.inputs.mouseElement();
size_t const eventIndex = static_cast<size_t>(authoringState.m_mouseEvent.type);
bool eventMatched = false;
if (eventIndex < 6) // Left/Middle/Right Buttons
{
eventMatched = s_elementTokens[eventIndex / 2] == elementIn;
}
else if (eventIndex == 6) // Move
{
eventMatched = elementIn == db.tokens.NormalizedMove || elementIn == db.tokens.PixelMove;
}
else if (eventIndex == 7) // Scroll
{
eventMatched = elementIn == db.tokens.Scroll;
}
else
{
db.logError("Invalid Input Event %zu detected", eventIndex);
}
float* deltaValue = db.outputs.value().data();
carb::Float2 eventValue{ 0.0f, 0.0f };
auto iActionGraph = getInterface();
if (eventMatched)
{
switch (authoringState.m_mouseEvent.type)
{
case MouseEventType::eLeftButtonDown:
case MouseEventType::eMiddleButtonDown:
case MouseEventType::eRightButtonDown:
db.outputs.isPressed() = true;
iActionGraph->setExecutionEnabled(outputs::pressed.token(), db.getInstanceIndex());
break;
case MouseEventType::eLeftButtonUp:
case MouseEventType::eMiddleButtonUp:
case MouseEventType::eRightButtonUp:
db.outputs.isPressed() = false;
iActionGraph->setExecutionEnabled(outputs::released.token(), db.getInstanceIndex());
break;
case MouseEventType::eMove:
db.outputs.isPressed() = false;
iActionGraph->setExecutionEnabled(outputs::valueChanged.token(), db.getInstanceIndex());
if (elementIn == db.tokens.NormalizedMove)
{
eventValue = authoringState.m_mouseEvent.normalizedCoords;
}
else
{
eventValue = authoringState.m_mouseEvent.pixelCoords;
}
break;
case MouseEventType::eScroll:
db.outputs.isPressed() = false;
iActionGraph->setExecutionEnabled(outputs::valueChanged.token(), db.getInstanceIndex());
eventValue = authoringState.m_mouseEvent.scrollDelta;
break;
default:
break;
}
}
deltaValue[0] = eventValue.x;
deltaValue[1] = eventValue.y;
}
return true;
}
};
REGISTER_OGN_NODE()
} // namespace action
} // namespace graph
} // namespace omni
| 7,348 | C++ | 35.381188 | 125 | 0.607648 |
omniverse-code/kit/exts/omni.graph.action/omni/graph/action/ogn/nodes/OgnSendCustomEvent.py | """
This is the implementation of the OGN node defined in OgnSendCustomEvent.ogn
"""
import codecs
import pickle
import carb.events
import omni.kit.app
from omni.graph.action import get_interface
def registered_event_name(event_name):
"""Returns the internal name used for the given custom event name"""
n = "omni.graph.action." + event_name
return carb.events.type_from_string(n)
payload_path = "!path"
# ======================================================================
class OgnSendCustomEvent:
"""
This node triggers when the specified message bus event is received
"""
@staticmethod
def compute(db) -> bool:
"""Compute the outputs from the current input"""
event_name = db.inputs.eventName
if not event_name:
return True
path = db.inputs.path
input_bundle = db.inputs.bundle
reg_event_name = registered_event_name(event_name)
message_bus = omni.kit.app.get_app().get_message_bus_event_stream()
payload = {}
if input_bundle.valid:
# Copy the contents of the input bundle into the event dict
for attr in input_bundle.attributes:
tp = attr.type
arg_obj = (tp, attr.value)
# Since we are python at both ends, easiest to pickle the attrib values so we
# can re-animate them on the other side
as_str = codecs.encode(pickle.dumps(arg_obj), "base64").decode()
payload[attr.name] = as_str
if path:
payload[payload_path] = path
message_bus.push(reg_event_name, payload=payload)
get_interface().set_execution_enabled("outputs:execOut")
return True
| 1,744 | Python | 27.606557 | 93 | 0.599197 |
omniverse-code/kit/exts/omni.graph.action/omni/graph/action/ogn/nodes/OgnGate.cpp | // Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#include <OgnGateDatabase.h>
#include <omni/graph/action/IActionGraph.h>
namespace omni
{
namespace graph
{
namespace action
{
enum GateState
{
kGateStateClosed = 0,
kGateStateOpen = 1,
kGateStateUninitialized = 2,
};
class OgnGate
{
public:
int m_gate{ kGateStateUninitialized };
static bool compute(OgnGateDatabase& db)
{
auto iActionGraph = getInterface();
auto& state = db.internalState<OgnGate>();
// On our first ever compute call, initialize the gate state.
if (state.m_gate == kGateStateUninitialized)
{
state.m_gate = db.inputs.startClosed() ? kGateStateClosed : kGateStateOpen;
}
// In compute one of toggle or enter will be enabled
bool isToggle = iActionGraph->getExecutionEnabled(inputs::toggle.token(), db.getInstanceIndex());
if (isToggle)
{
// toggle the state
state.m_gate = (state.m_gate + 1) % 2;
}
else
{
if (state.m_gate == kGateStateOpen)
iActionGraph->setExecutionEnabled(outputs::exit.token(), db.getInstanceIndex());
}
return true;
}
};
REGISTER_OGN_NODE()
} // action
} // graph
} // omni
| 1,676 | C++ | 25.203125 | 105 | 0.656921 |
omniverse-code/kit/exts/omni.graph.action/omni/graph/action/ogn/nodes/OgnOnCustomEvent.py | """
This is the implementation of the OGN node defined in OgnOnCustomEvent.ogn
"""
import codecs
import pickle
from contextlib import suppress
import carb.events
import omni.graph.core as og
import omni.kit.app
from omni.graph.action import get_interface
from omni.graph.action.ogn.OgnOnCustomEventDatabase import OgnOnCustomEventDatabase
def registered_event_name(event_name):
"""Returns the internal name used for the given custom event name"""
n = "omni.graph.action." + event_name
return carb.events.type_from_string(n)
payload_path = "!path"
class OgnOnCustomEventInternalState:
"""Convenience class for maintaining per-node state information"""
def __init__(self):
"""Instantiate the per-node state information."""
# This subscription object controls the lifetime of our callback, it will be
# cleaned up automatically when our node is destroyed
self.sub = None
# Set when the callback has triggered
self.is_set = False
# The last payload received
self.payload = None
# The event_name we used to subscribe
self.sub_event_name = ""
# The node instance handle
self.node = None
def on_event(self, custom_event):
"""The event callback"""
if custom_event is None:
return
self.is_set = True
self.payload = custom_event.payload
# Tell the evaluator we need to be computed
if self.node.is_valid():
self.node.request_compute()
def first_time_subscribe(self, node: og.Node, event_name: str) -> bool:
"""Checked call to set up carb subscription
Args:
node: The node instance
event_name: The name of the carb event
Returns:
True if we subscribed, False if we are already subscribed
"""
if self.sub is not None and self.sub_event_name != event_name:
# event name changed since we last subscribed, unsubscribe
self.sub.unsubscribe()
self.sub = None
if self.sub is None:
# Add a subscription for the given event name. This is a pop subscription, so we expect a 1-frame
# lag between send and receive
reg_event_name = registered_event_name(event_name)
message_bus = omni.kit.app.get_app().get_message_bus_event_stream()
self.sub = message_bus.create_subscription_to_pop_by_type(reg_event_name, self.on_event)
self.sub_event_name = event_name
self.node = node
return True
return False
def try_pop_event(self):
"""Pop the payload of the last event received, or None if there is no event to pop"""
if self.is_set:
self.is_set = False
payload = self.payload
self.payload = None
return payload
return None
# ======================================================================
class OgnOnCustomEvent:
"""
This node triggers when the specified message bus event is received
"""
@staticmethod
def internal_state():
"""Returns an object that will contain per-node state information"""
return OgnOnCustomEventInternalState()
@staticmethod
def compute(db) -> bool:
event_name = db.inputs.eventName
if not event_name:
return True
state = db.internal_state
if state.first_time_subscribe(db.node, event_name):
return True
# Drop events if we are disabled
if db.inputs.onlyPlayback and (not db.node.get_graph().get_default_graph_context().get_is_playing()):
state.try_pop_event()
return True
payload = state.try_pop_event()
if payload is None:
return True
# Copy the event dict contents into the output bundle
db.outputs.bundle.clear()
for name in payload.get_keys():
# Special 'path' entry gets copied to output attrib
if name == payload_path:
db.outputs.path = payload[name]
continue
as_str = payload[name]
arg_obj = pickle.loads(codecs.decode(as_str.encode(), "base64"))
attr_type, attr_value = arg_obj
new_attr = db.outputs.bundle.insert((attr_type, name))
new_attr.value = attr_value
get_interface().set_execution_enabled("outputs:execOut")
return True
# ----------------------------------------------------------------------------
@staticmethod
def release(node):
# Unsubscribe right away instead of waiting for GC cleanup, we don't want our callback firing
# after the node has been released.
with suppress(og.OmniGraphError):
state = OgnOnCustomEventDatabase.per_node_internal_state(node)
if state.sub:
state.sub.unsubscribe()
state.sub = None
# ----------------------------------------------------------------------------
@staticmethod
def update_node_version(context: og.GraphContext, node: og.Node, old_version: int, new_version: int):
if old_version < new_version and old_version < 2:
# We added inputs:onlyPlayback default true - to maintain previous behavior we should set this to false
node.create_attribute(
"inputs:onlyPlayback",
og.Type(og.BaseDataType.BOOL),
og.AttributePortType.ATTRIBUTE_PORT_TYPE_INPUT,
False,
)
return True
| 5,593 | Python | 34.18239 | 115 | 0.590917 |
omniverse-code/kit/exts/omni.graph.action/omni/graph/action/ogn/nodes/OgnDelay.cpp | // Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#include <OgnDelayDatabase.h>
#include <omni/graph/action/IActionGraph.h>
namespace omni
{
namespace graph
{
namespace action
{
namespace
{
constexpr double kUninitializedStartTime = -1.;
}
class OgnDelay
{
double m_startTime{ kUninitializedStartTime }; // The value of the context time when we started latent state
public:
static bool compute(OgnDelayDatabase& db)
{
auto iActionGraph = getInterface();
const auto& contextObj = db.abi_context();
auto iContext = contextObj.iContext;
auto& state = db.internalState<OgnDelay>();
double startTime = state.m_startTime;
double now = iContext->getTimeSinceStart(contextObj);
if (state.m_startTime > kUninitializedStartTime)
{
// We are being polled, check if we have slept long enough
double duration = db.inputs.duration();
duration = std::max(duration, 0.);
if (now - startTime >= duration)
{
state.m_startTime = kUninitializedStartTime;
iActionGraph->endLatentState(db.getInstanceIndex());
iActionGraph->setExecutionEnabled(outputs::finished.token(), db.getInstanceIndex());
return true;
}
}
else
{
// This is the first entry, start sleeping
state.m_startTime = now;
iActionGraph->startLatentState(db.getInstanceIndex());
return true;
}
// still sleeping
return true;
}
};
REGISTER_OGN_NODE()
} // action
} // graph
} // omni
| 2,040 | C++ | 27.746478 | 112 | 0.646078 |
omniverse-code/kit/exts/omni.graph.action/omni/graph/action/ogn/nodes/OgnAddPrimRelationship.cpp | // Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#include "OgnAddPrimRelationshipDatabase.h"
#include <omni/graph/core/PreUsdInclude.h>
#include <pxr/usd/sdf/path.h>
#include <pxr/usd/usd/common.h>
#include <pxr/usd/usd/prim.h>
#include <pxr/usd/usd/relationship.h>
#include <pxr/usd/usdUtils/stageCache.h>
#include <omni/graph/core/PostUsdInclude.h>
#include <algorithm>
namespace omni
{
namespace graph
{
namespace action
{
class OgnAddPrimRelationship
{
public:
// Add relationship data on a prim
static bool compute(OgnAddPrimRelationshipDatabase& db)
{
const auto& path = db.inputs.path();
const auto& target = db.inputs.target();
auto& isSuccessful = db.outputs.isSuccessful();
isSuccessful = false;
if (!pxr::SdfPath::IsValidPathString(path) || !pxr::SdfPath::IsValidPathString(target))
{
return false;
}
const char* relName = db.tokenToString(db.inputs.name());
if (!relName)
{
return false;
}
pxr::SdfPath primPath(path);
pxr::SdfPath targetPath(target);
long int stageId = db.abi_context().iContext->getStageId(db.abi_context());
pxr::UsdStageRefPtr stage = pxr::UsdUtilsStageCache::Get().Find(pxr::UsdStageCache::Id::FromLongInt(stageId));
pxr::UsdPrim prim = stage->GetPrimAtPath(primPath);
if (!prim)
{
return false;
}
if (pxr::UsdRelationship myRel = prim.CreateRelationship(pxr::TfToken(relName)))
{
isSuccessful = myRel.AddTarget(targetPath);
}
return isSuccessful;
}
};
REGISTER_OGN_NODE()
}
}
}
| 2,060 | C++ | 26.118421 | 118 | 0.666505 |
omniverse-code/kit/exts/omni.graph.action/omni/graph/action/ogn/nodes/OgnOnPlaybackTick.cpp | // Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#include <OgnOnPlaybackTickDatabase.h>
#include <omni/graph/action/IActionGraph.h>
namespace omni
{
namespace graph
{
namespace action
{
class OgnOnPlaybackTick
{
public:
static bool compute(OgnOnPlaybackTickDatabase& db)
{
const auto& contextObj = db.abi_context();
auto iContext = contextObj.iContext;
if (!iContext->getIsPlaying(contextObj))
{
return true;
}
db.outputs.time() = iContext->getTime(contextObj);
db.outputs.deltaSeconds() = iContext->getElapsedTime(contextObj);
db.outputs.frame() = iContext->getFrame(contextObj);
auto iActionGraph = getInterface();
iActionGraph->setExecutionEnabled(outputs::tick.token(), db.getInstanceIndex());
return true;
}
};
REGISTER_OGN_NODE()
} // action
} // graph
} // omni
| 1,279 | C++ | 26.234042 | 88 | 0.702893 |
omniverse-code/kit/exts/omni.graph.action/omni/graph/action/ogn/nodes/OgnMultisequence.cpp | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#include <OgnMultisequenceDatabase.h>
#include <carb/extras/StringUtils.h>
#include <omni/graph/action/IActionGraph.h>
namespace omni
{
namespace graph
{
namespace action
{
class OgnMultisequence
{
public:
static bool setExecutionEnabled(NodeObj nodeObj, const char* attribName, bool andPushed, InstanceIndex instIndex)
{
auto iNode = nodeObj.iNode;
if (!iNode->getAttributeExists(nodeObj, attribName))
return false;
AttributeObj attrObj = iNode->getAttribute(nodeObj, attribName);
auto iActionGraph = getInterface();
if (andPushed)
iActionGraph->setExecutionEnabledAndPushed(attrObj.iAttribute->getNameToken(attrObj), instIndex);
else
iActionGraph->setExecutionEnabled(attrObj.iAttribute->getNameToken(attrObj), instIndex);
return true;
}
// Which branch we should take next
uint32_t m_nextOutput {0};
static bool compute(OgnMultisequenceDatabase& db)
{
OgnMultisequence& state = db.internalState<OgnMultisequence>();
NodeObj nodeObj = db.abi_node();
// Set the execution values
uint32_t executionIndex = 0;
// lots of room to append digits to the output name
std::array<char, 32> outputName;
auto formatAttrName = [&outputName](uint32_t n) {
carb::extras::formatString(outputName.data(), outputName.size(), "outputs:output%d", n);
};
for (uint32_t i = 0;; ++i)
{
formatAttrName(i);
if (i == state.m_nextOutput &&
setExecutionEnabled(nodeObj, outputName.data(), true, db.getInstanceIndex()))
{
executionIndex = i;
}
else if (i != state.m_nextOutput && nodeObj.iNode->getAttributeExists(nodeObj, outputName.data()))
{
// keep looping while we are matching attributes (shouldn't be any holes in the sequence)
}
else
{
// Check for end of sequence
if (executionIndex == i - 1)
{
formatAttrName(executionIndex);
setExecutionEnabled(nodeObj, outputName.data(), false, db.getInstanceIndex());
state.m_nextOutput = 0;
}
else
{
++state.m_nextOutput;
}
break;
}
}
return true;
}
};
REGISTER_OGN_NODE()
} // action
} // graph
} // omni
| 2,988 | C++ | 30.463158 | 117 | 0.605087 |
omniverse-code/kit/exts/omni.graph.action/omni/graph/action/ogn/nodes/OgnSetPrimActive.cpp | // Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
#include "OgnSetPrimActiveDatabase.h"
#include <omni/graph/action/IActionGraph.h>
#include <omni/graph/core/PreUsdInclude.h>
#include <pxr/usd/sdf/path.h>
#include <pxr/usd/usd/stage.h>
#include <pxr/usd/usd/prim.h>
#include <pxr/usd/usdUtils/stageCache.h>
#include <omni/graph/core/PostUsdInclude.h>
#include <omni/fabric/FabricUSD.h>
namespace omni
{
namespace graph
{
namespace action
{
class OgnSetPrimActive
{
public:
// ----------------------------------------------------------------------------
static bool compute(OgnSetPrimActiveDatabase& db)
{
PXR_NS::SdfPath sdfPath;
if(db.inputs.primTarget().size() == 0)
{
const auto& primPath = db.inputs.prim();
if (pxr::SdfPath::IsValidPathString(primPath))
{
sdfPath = pxr::SdfPath(primPath);
}
else
{
return true;
}
}
else
{
if(db.inputs.primTarget().size() > 1)
db.logWarning("Only one prim target is supported, the rest will be ignored");
sdfPath = omni::fabric::toSdfPath(db.inputs.primTarget()[0]);
}
// Find our stage
const GraphContextObj& context = db.abi_context();
long stageId = context.iContext->getStageId(context);
auto stage = pxr::UsdUtilsStageCache::Get().Find(pxr::UsdStageCache::Id::FromLongInt(stageId));
if (!stage)
{
db.logError("Could not find USD stage %ld", stageId);
return false;
}
pxr::UsdPrim targetPrim = stage->GetPrimAtPath(sdfPath);
if (!targetPrim)
{
db.logError("Could not find prim \"%s\" in USD stage", sdfPath.GetText());
return false;
}
bool ok = targetPrim.SetActive(db.inputs.active());
if (ok)
{
auto iActionGraph = getInterface();
iActionGraph->setExecutionEnabled(outputs::execOut.token(), db.getInstanceIndex());
return true;
}
db.logError("Failed to set %s active state", sdfPath.GetText());
return false;
}
};
REGISTER_OGN_NODE()
} // namespace action
} // namespace graph
} // namespace omni
| 2,682 | C++ | 29.488636 | 103 | 0.59918 |
omniverse-code/kit/exts/omni.graph.action/omni/graph/action/ogn/nodes/OgnTickN.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from dataclasses import dataclass
import omni.graph.core as og
# Set True to enable verbose debugging prints to console
DEBUG_PRINT = False
# ==============================================================================================================
@dataclass
class OgnTickNState:
count: int
# --------------------------------------------------------------------------------------------------------------
class OgnTickN:
@staticmethod
def internal_state():
"""Returns an object that will contain per-node state information"""
return OgnTickNState(-1)
@staticmethod
def compute(db) -> bool:
def print_state(state_string: str):
print(f"{db.node.get_prim_path()} {state_string}")
count = db.internal_state.count
count += 1
if count == 0:
_ = DEBUG_PRINT and print_state("START")
finished, tick = og.ExecutionAttributeState.LATENT_PUSH, og.ExecutionAttributeState.DISABLED
else:
duration = db.inputs.duration
if duration < count:
# Finished
finished, tick = og.ExecutionAttributeState.LATENT_FINISH, og.ExecutionAttributeState.DISABLED
count = -1
_ = DEBUG_PRINT and print_state("FINISHED")
else:
# Still ticking
finished = og.ExecutionAttributeState.DISABLED
if (count % db.inputs.period) == 0: # noqa: S001
tick = og.ExecutionAttributeState.ENABLED
else:
tick = og.ExecutionAttributeState.DISABLED
_ = DEBUG_PRINT and print_state("TICK")
# Write outputs
db.outputs.finished = finished
db.outputs.tick = tick
db.internal_state.count = count
return True
| 2,263 | Python | 36.114754 | 112 | 0.572249 |
omniverse-code/kit/exts/omni.graph.action/omni/graph/action/ogn/nodes/OgnCountdown.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import omni.graph.core as og
# Set True to enable verbose debugging prints to console
DEBUG_PRINT = False
# ==============================================================================================================
class OgnCountdown:
@staticmethod
def initialize(_, node):
og.Controller.attribute("state:count", node).set(-1)
@staticmethod
def compute(db) -> bool:
def print_state(state_string: str):
print(f"{db.node.get_prim_path()} {state_string}")
count = db.state.count
duration = db.inputs.duration
count += 1
tick_value = 1
alpha = 0
if count == 0:
if DEBUG_PRINT:
print_state("START")
finished, tick = og.ExecutionAttributeState.LATENT_PUSH, og.ExecutionAttributeState.DISABLED
else:
if duration < count:
# Finished
finished, tick = og.ExecutionAttributeState.LATENT_FINISH, og.ExecutionAttributeState.DISABLED
tick_value = count - 1
alpha = 1.0
count = -1
if DEBUG_PRINT:
print_state("FINISHED")
else:
# Still ticking
finished = og.ExecutionAttributeState.DISABLED
tick_value = count
alpha = count / max(duration, 1)
period = db.inputs.period
if (period == 0) or ((count % db.inputs.period) == 0): # noqa: S001
tick = og.ExecutionAttributeState.ENABLED
else:
tick = og.ExecutionAttributeState.DISABLED
if DEBUG_PRINT:
print_state("TICK")
# Write outputs
db.outputs.finished = finished
db.outputs.tick = tick
db.state.count = count
db.outputs.alpha = alpha
db.outputs.tickValue = tick_value
return True
| 2,377 | Python | 34.492537 | 112 | 0.55995 |
omniverse-code/kit/exts/omni.graph.action/omni/graph/action/ogn/nodes/OgnOnClosing.cpp | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#include <OgnOnClosingDatabase.h>
#include <omni/graph/action/IActionGraph.h>
namespace omni
{
namespace graph
{
namespace action
{
class OgnOnClosing
{
public:
static bool compute(OgnOnClosingDatabase& db)
{
auto iActionGraph = getInterface();
// This node does nothing but trigger downstream when computed. When the execution evaluator receives the
// pre-detach event it will ensure that only OnClosing nodes will be evaluated before cleaning up the graph.
// It's not possible to encapsulate the closing logic of the evaluator in this node due to the
// special requirement that other event nodes be disabled, which only applies to this one case.
// (We don't want OnTick running during the closing).
iActionGraph->setExecutionEnabled(outputs::execOut.token(), db.getInstanceIndex());
return true;
}
};
REGISTER_OGN_NODE()
} // action
} // graph
} // omni
| 1,380 | C++ | 33.524999 | 116 | 0.736232 |
omniverse-code/kit/exts/omni.graph.action/omni/graph/action/ogn/nodes/OgnOnMessageBusEvent.py | """
This is the implementation of the OGN node defined in OgnOnMessageBusEvent.ogn
"""
from contextlib import suppress
from typing import Optional
import carb
import carb.dictionary
import carb.events
import numpy as np
import omni.graph.core as og
import omni.kit.app
from omni.graph.action import get_interface
from omni.graph.action.ogn.OgnOnMessageBusEventDatabase import OgnOnMessageBusEventDatabase
class OgnOnMessageBusEventInternalState:
"""Convenience class for maintaining per-node state information"""
def __init__(self):
# This subscription object controls the lifetime of our callback, it will be
# cleaned up automatically when our node is destroyed
self.sub: carb.events.ISubscription = None
# Set when the callback has triggered
self.is_set = False
# The last payload received
self.payload: carb.dictionary.Item = None
# The event_name we used to subscribe
self.sub_event_name = ""
# The node instance handle
self.node: og.Node = None
def on_event(self, custom_event: carb.events.IEvent):
if custom_event is None:
return
self.is_set = True
self.payload = custom_event.payload
# Tell the evaluator we need to be computed
if self.node.is_valid():
self.node.request_compute()
def first_time_subscribe(self, node: og.Node, event_name: str) -> bool:
"""Checked call to set up carb subscription
Args:
node: The node instance
event_name: The name of the carb event
Returns:
True if we subscribed, False if we are already subscribed
"""
if self.sub is not None and self.sub_event_name != event_name:
# event name changed since we last subscribed, unsubscribe
self.sub.unsubscribe()
self.sub = None
if self.sub is None:
# Add a subscription for the given event name. This is a pop subscription, so we expect a 1-frame
# lag between send and receive
reg_event_name = carb.events.type_from_string(event_name)
message_bus = omni.kit.app.get_app().get_message_bus_event_stream()
self.sub = message_bus.create_subscription_to_pop_by_type(reg_event_name, self.on_event)
self.sub_event_name = event_name
self.node = node
return True
return False
def try_pop_event(self) -> Optional[carb.dictionary.Item]:
"""Pop the payload of the last event received, or None if there is no event to pop"""
if self.is_set:
self.is_set = False
payload = self.payload
self.payload = None
return payload
return None
# ======================================================================
class OgnOnMessageBusEvent:
"""
This node triggers when the specified message bus event is received
"""
@staticmethod
def internal_state():
"""Returns an object that will contain per-node state information"""
return OgnOnMessageBusEventInternalState()
@staticmethod
def compute(db) -> bool:
event_name = db.inputs.eventName
if not event_name:
return True
state = db.internal_state
if state.first_time_subscribe(db.node, event_name):
return True
# Drop events if we are disabled
if db.inputs.onlyPlayback and (not db.node.get_graph().get_default_graph_context().get_is_playing()):
state.try_pop_event()
return True
payload = state.try_pop_event()
if payload is None:
return True
node: og.Node = db.node
# Copy the event dict contents into dynamic attributes if they exist
for key in payload.get_keys():
attr_name = f"outputs:{key}"
if not node.get_attribute_exists(attr_name):
continue
attrib = node.get_attribute(attr_name)
if attrib:
value = payload[key]
og_type: og.Type = attrib.get_resolved_type()
is_array = og_type.array_depth > 0
is_tuple = og_type.tuple_count > 1
is_matrix = is_tuple and (
og_type.role in (og.AttributeRole.FRAME, og.AttributeRole.MATRIX, og.AttributeRole.TRANSFORM)
)
if is_array:
if is_matrix:
dim = 2 if og_type.tuple_count == 4 else 3 if og_type.tuple_count == 9 else 4
value = np.array(value).reshape((-1, dim, dim))
elif is_tuple:
dim = og_type.tuple_count
value = np.array(value).reshape((-1, dim))
elif is_matrix:
dim = 2 if og_type.tuple_count == 4 else 3 if og_type.tuple_count == 9 else 4
value = np.array(value).reshape((dim, dim))
try:
og.Controller.set(attrib, value)
except TypeError as exc:
db.log_error(f"Failed to copy payload data {key} to attribute {attr_name}:\n{exc}")
return False
get_interface().set_execution_enabled("outputs:execOut")
return True
# ----------------------------------------------------------------------------
@staticmethod
def release(node):
# Unsubscribe right away instead of waiting for GC cleanup, we don't want our callback firing
# after the node has been released.
with suppress(og.OmniGraphError):
state = OgnOnMessageBusEventDatabase.per_node_internal_state(node)
if state.sub:
state.sub.unsubscribe()
state.sub = None
| 5,839 | Python | 36.435897 | 113 | 0.578352 |
omniverse-code/kit/exts/omni.graph.action/omni/graph/action/ogn/nodes/OgnOnImpulseEvent.cpp | // Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#include <OgnOnImpulseEventDatabase.h>
#include <omni/graph/action/IActionGraph.h>
#include "ActionNodeCommon.h"
namespace omni
{
namespace graph
{
namespace action
{
class OgnOnImpulseEvent
{
public:
// ----------------------------------------------------------------------------
// Called by OG when our state attrib changes.
static void onValueChanged(const AttributeObj& attrObj, const void* userData)
{
// state::enableImpulse has changed, so we need to compute ASAP
NodeObj nodeObj = attrObj.iAttribute->getNode(attrObj);
nodeObj.iNode->requestCompute(nodeObj);
}
// ----------------------------------------------------------------------------
static void initialize(const GraphContextObj& context, const NodeObj& nodeObj)
{
AttributeObj attribObj = nodeObj.iNode->getAttributeByToken(nodeObj, state::enableImpulse.m_token);
attribObj.iAttribute->registerValueChangedCallback(attribObj, onValueChanged, true);
}
// ----------------------------------------------------------------------------
static bool compute(OgnOnImpulseEventDatabase& db)
{
if (checkNodeDisabledForOnlyPlay(db))
return true;
bool enableImpulse = db.state.enableImpulse();
if (enableImpulse)
{
auto iActionGraph = getInterface();
iActionGraph->setExecutionEnabled(outputs::execOut.token(), db.getInstanceIndex());
db.state.enableImpulse() = false;
}
return true;
}
// ----------------------------------------------------------------------------
static bool updateNodeVersion(const GraphContextObj& context, const NodeObj& nodeObj, int oldVersion, int newVersion)
{
if (oldVersion < newVersion)
{
if (oldVersion < 2)
{
// We added inputs:onlyPlayback default true - to maintain previous behavior we should set this to false
const bool val{ false };
nodeObj.iNode->createAttribute(nodeObj, "inputs:onlyPlayback", Type(BaseDataType::eBool), &val, nullptr,
kAttributePortType_Input, kExtendedAttributeType_Regular, nullptr);
}
return true;
}
return false;
}
};
REGISTER_OGN_NODE()
} // action
} // graph
} // omni
| 2,814 | C++ | 34.632911 | 121 | 0.589552 |
omniverse-code/kit/exts/omni.graph.action/omni/graph/action/ogn/nodes/OgnCounter.cpp | // Copyright (c) 2022-2023 NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#include "OgnCounterDatabase.h"
#include <omni/graph/action/IActionGraph.h>
namespace omni {
namespace graph {
namespace action {
class OgnCounter
{
public:
static bool compute(OgnCounterDatabase& db)
{
auto iActionGraph = getInterface();
bool isReset = iActionGraph->getExecutionEnabled(inputs::reset.token(), omni::graph::core::kAccordingToContextIndex);
if (isReset)
{
db.state.count() = 0;
}
else
{
db.state.count() = db.state.count() + 1;
}
db.outputs.count() = db.state.count();
iActionGraph->setExecutionEnabled(outputs::execOut.token(), db.getInstanceIndex());
return true;
}
};
REGISTER_OGN_NODE()
} // namespace action
} // namespace graph
} // namespace omni
| 1,240 | C++ | 27.204545 | 125 | 0.682258 |
omniverse-code/kit/exts/omni.graph.action/omni/graph/action/ogn/nodes/OgnForEach.cpp | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#include <OgnForEachDatabase.h>
#include <omni/graph/action/IActionGraph.h>
namespace omni {
namespace graph {
namespace action {
class OgnForEach
{
public:
size_t m_arrayIndex {0};
static bool compute(OgnForEachDatabase& db)
{
auto iActionGraph = getInterface();
auto const& arrayIn = db.inputs.arrayIn();
size_t const arrayLen = arrayIn.size();
auto& state = db.internalState<OgnForEach>();
if (state.m_arrayIndex >= arrayLen) {
iActionGraph->setExecutionEnabled(outputs::finished.token(), db.getInstanceIndex());
state.m_arrayIndex = 0;
return true;
}
size_t currentIndex = state.m_arrayIndex++;
iActionGraph->setExecutionEnabledAndPushed(outputs::loopBody.token(), db.getInstanceIndex());
db.outputs.arrayIndex() = static_cast<int>(currentIndex);
ConstRawPtr arrayAttribPtr{nullptr};
size_t arraySize{0};
arrayIn.rawData(arrayAttribPtr, arraySize);
// arrayAttribPtr is a pointer to the attribute data, the data being an array of something,
// so we need to deref that pointer to get the pointer to the actual data.
ConstRawPtr arrayData = *(ConstRawPtr*)arrayAttribPtr;
auto& element = db.outputs.element();
// Ensure output attribute is writable
uint8_t* out{ nullptr };
void** outPtr = (void**)(&out);
auto hdl = element.abi_handle();
db.abi_context().iAttributeData->getDataW(outPtr, db.abi_context(), &hdl, 1);
if (!outPtr)
{
db.logError("Could not make writable output");
return false;
}
// Determine the size of the element to copy
const IAttributeType& iAttributeType = *carb::getCachedInterface<IAttributeType>();
Type const type = element.type();
size_t strideBytes = iAttributeType.baseDataSize(type) * type.componentCount;
size_t offsetBytes = currentIndex * strideBytes;
memcpy(out, arrayData + offsetBytes, strideBytes);
return true;
}
static void onConnectionTypeResolve(const NodeObj& node)
{
auto const arrayIn = node.iNode->getAttributeByToken(node, inputs::arrayIn.token());
auto const element = node.iNode->getAttributeByToken(node, outputs::element.token());
auto const arrayInType = arrayIn.iAttribute->getResolvedType(arrayIn);
auto const elementType = element.iAttribute->getResolvedType(element);
if (elementType.baseType == BaseDataType::eUnknown)
{
std::array<AttributeObj, 2> attrs { arrayIn, element };
std::array<uint8_t, 2> tupleCounts {
arrayInType.componentCount,
arrayInType.componentCount
};
// value type can not be an array because we don't support arrays-of-arrays
std::array<uint8_t, 2> arrayDepths {
1,
0
};
std::array<AttributeRole, 2> rolesBuf {
arrayInType.role,
// Copy the attribute role from the array type to the value type
AttributeRole::eUnknown
};
node.iNode->resolvePartiallyCoupledAttributes(node, attrs.data(), tupleCounts.data(),
arrayDepths.data(), rolesBuf.data(), attrs.size());
}
}
};
REGISTER_OGN_NODE()
} // namespace action
} // namespace graph
} // namespace omni
| 3,959 | C++ | 35.666666 | 109 | 0.634251 |
omniverse-code/kit/exts/omni.graph.action/omni/graph/action/ogn/nodes/OgnSwitchToken.cpp | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#include <OgnSwitchTokenDatabase.h>
#include <carb/events/EventsUtils.h>
#include <carb/extras/StringUtils.h>
#include <cstring>
#include <omni/graph/action/IActionGraph.h>
static constexpr size_t kMaxAttrNameLen{ 17 };
static constexpr char k3Dots[] = "...";
namespace omni
{
namespace graph
{
namespace action
{
using namespace omni::fabric;
using namespace carb::events;
class OgnSwitchToken
{
carb::ObjectPtr<ISubscription> m_nodeChangedSub;
// ----------------------------------------------------------------------------
static AttributeObj getCorrespondingOutputAttrib(NodeObj nodeObj, char const* branchName)
{
char buffer[32];
char const* suffix = branchName + std::strlen("inputs:branch");
(void) carb::extras::formatString(buffer, sizeof(buffer), "outputs:output%s", suffix);
AttributeObj outputAttrObj = nodeObj.iNode->getAttribute(nodeObj, buffer);
if (!outputAttrObj.isValid())
throw std::runtime_error(formatString("Could not find attribute %s", buffer));
return outputAttrObj;
}
// ----------------------------------------------------------------------------
// Return a shorter version of the given string with ... in the middle
static std::array<char, kMaxAttrNameLen + 1> ellipsisStr(char const* val, size_t valLen)
{
constexpr size_t snipSize{ (kMaxAttrNameLen - 3 / 2) };
std::array<char, kMaxAttrNameLen + 1> uiLabel{};
auto writeIter = std::copy(val, val + snipSize, uiLabel.begin());
writeIter = std::copy(k3Dots, k3Dots + 3, writeIter);
std::copy(val + valLen - snipSize, val + valLen, writeIter);
return uiLabel;
}
// ----------------------------------------------------------------------------
static void onValueChanged(const AttributeObj& attrObj, const void* userData)
{
// Get the new value
NodeObj nodeObj = attrObj.iAttribute->getNode(attrObj);
if (nodeObj.nodeHandle == kInvalidNodeHandle)
return;
auto graphObj = nodeObj.iNode->getGraph(nodeObj);
if (!graphObj.isValid())
return;
auto context = graphObj.iGraph->getDefaultGraphContext(graphObj);
if (!context.isValid())
return;
std::string name = attrObj.iAttribute->getName(attrObj);
if (name.size() < 14)
return;
char const* suffix = name.data() + 13;
auto const* pBranchVal = getDataR<NameToken>(
context, attrObj.iAttribute->getConstAttributeDataHandle(attrObj, kAccordingToContextIndex));
if (!pBranchVal)
return;
Token branchTok{*pBranchVal};
if (!branchTok.size())
return;
// Set metadata on corresponding output attrib
try
{
AttributeObj outputAttrObj = getCorrespondingOutputAttrib(nodeObj, name.c_str());
char const* branchVal = branchTok.getText();
size_t const branchValLen = strlen(branchVal);
if (branchValLen > kMaxAttrNameLen)
{
// Too long - instead use middle-ellipsis
auto uiLabel = ellipsisStr(branchVal, branchValLen);
outputAttrObj.iAttribute->setMetadata(outputAttrObj, kOgnMetadataUiName, uiLabel.data());
}
else
outputAttrObj.iAttribute->setMetadata(outputAttrObj, kOgnMetadataUiName, branchTok.getText());
}
catch (std::exception const& ex)
{
nodeObj.iNode->logComputeMessageOnInstance(nodeObj, kAuthoringGraphIndex, ogn::Severity::eError, ex.what());
}
}
public:
// ----------------------------------------------------------------------------
static void initialize(const GraphContextObj& context, const NodeObj& nodeObj)
{
auto& state = OgnSwitchTokenDatabase::sInternalState<OgnSwitchToken>(nodeObj);
// Callback anytime an attribute is added to this node so we can monitor value changed
state.m_nodeChangedSub = carb::events::createSubscriptionToPop(
nodeObj.iNode->getEventStream(nodeObj).get(), [nodeObj](carb::events::IEvent* e) {
switch (static_cast<INodeEvent>(e->type))
{
case INodeEvent::eCreateAttribute:
{
carb::dictionary::IDictionary* iDict = carb::dictionary::getCachedDictionaryInterface();
auto name = iDict->get<char const*>(e->payload, "attribute");
if (name && std::strstr(name, "inputs:branch") == name)
{
AttributeObj attribObj = nodeObj.iNode->getAttribute(nodeObj, name);
if (attribObj.isValid())
{
attribObj.iAttribute->registerValueChangedCallback(attribObj, onValueChanged, false);
}
}
}
default:
break;
}
});
// Hook up all the existing attributes to value changed callback
size_t nAttribs = nodeObj.iNode->getAttributeCount(nodeObj);
if (!nAttribs)
return;
std::vector<AttributeObj> allAttribs;
allAttribs.resize(nAttribs);
nodeObj.iNode->getAttributes(nodeObj, allAttribs.data(), nAttribs);
for (auto& attribObj : allAttribs)
{
char const* name = attribObj.iAttribute->getName(attribObj);
if (name && std::strstr(name, "inputs:branch") == name)
{
attribObj.iAttribute->registerValueChangedCallback(attribObj, onValueChanged, false);
}
}
}
// ----------------------------------------------------------------------------
static bool compute(OgnSwitchTokenDatabase& db)
{
NodeObj nodeObj = db.abi_node();
GraphContextObj context = db.abi_context();
NameToken const value = db.inputs.value();
auto iNode = nodeObj.iNode;
GraphObj graphObj = iNode->getGraph(nodeObj);
// Check which branch matches the input value
size_t nAttribs = nodeObj.iNode->getAttributeCount(nodeObj);
if (!nAttribs)
return false;
std::vector<AttributeObj> allAttribs;
allAttribs.resize(nAttribs);
nodeObj.iNode->getAttributes(nodeObj, allAttribs.data(), nAttribs);
for (auto& attribObj : allAttribs)
{
char const* name = attribObj.iAttribute->getName(attribObj);
if (name && std::strstr(name, "inputs:branch") == name)
{
auto const* pBranchVal = getDataR<NameToken>(context, attribObj.iAttribute->getConstAttributeDataHandle(attribObj, db.getInstanceIndex()));
if (value == *pBranchVal)
{
try
{
AttributeObj outputAttrObj = getCorrespondingOutputAttrib(nodeObj, name);
auto iActionGraph = getInterface();
iActionGraph->setExecutionEnabled(outputAttrObj.iAttribute->getNameToken(outputAttrObj), db.getInstanceIndex());
}
catch (std::exception const& ex)
{
db.logError(ex.what());
return false;
}
break;
}
}
}
return true;
}
};
REGISTER_OGN_NODE()
} // action
} // graph
} // omni
| 8,019 | C++ | 37.743961 | 155 | 0.568649 |
omniverse-code/kit/exts/omni.graph.action/omni/graph/action/ogn/nodes/OgnOnVariableChange.cpp | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#include <OgnOnVariableChangeDatabase.h>
#include <omni/graph/action/IActionGraph.h>
#include <omni/graph/core/ogn/string.h>
#include "ActionNodeCommon.h"
namespace omni
{
namespace graph
{
namespace action
{
using namespace omni::graph::core;
using omni::graph::core::ogn::VariableAttribute;
// unnamed namespace to avoid multiple declaration when linking
namespace {
bool compareAndCacheVariables(VariableAttribute var, std::vector<uint8_t>& cachedVariable, Type& cachedType)
{
if (!var.isValid())
return true;
RawPtr varValuePtr{ nullptr };
size_t size{ 0 };
if (cachedType.arrayDepth > 0)
{
// for arrays, raw data returns the pointer to the base address of the array and the size of the pointer
var.rawData(varValuePtr, size);
if (varValuePtr)
{
// var.size() is the number of elements.
size = var.size() * var.type().baseTypeSize();
varValuePtr = *reinterpret_cast<RawPtr*>(varValuePtr);
}
}
else
{
var.rawData(varValuePtr, size);
}
if (!varValuePtr)
size = 0;
if ((cachedType != var.type()) || (size != cachedVariable.size()) ||
(memcmp(cachedVariable.data(), varValuePtr, size) != 0)) {
cachedType = var.type();
cachedVariable.resize(size);
memcpy(cachedVariable.data(), varValuePtr, size);
return false;
}
return true;
}
} // namespace
class OgnOnVariableChange
{
std::vector<uint8_t> m_cachedVariable{};
Type m_cachedType{};
public:
// ----------------------------------------------------------------------------
static void initialize(GraphContextObj const& context, NodeObj const& nodeObj)
{
AttributeObj attrObj = nodeObj.iNode->getAttributeByToken(nodeObj, inputs::variableName.m_token);
attrObj.iAttribute->registerValueChangedCallback(attrObj, onValueChanged, true);
}
// ----------------------------------------------------------------------------
// Called by OG when the value of the variableName changes
static void onValueChanged(AttributeObj const& attrObj, void const* userData)
{
auto nodeObj = attrObj.iAttribute->getNode(attrObj);
if (nodeObj.nodeHandle == kInvalidNodeHandle)
return;
NodeObj node = attrObj.iAttribute->getNode(attrObj);
GraphObj graph = node.iNode->getGraph(nodeObj);
size_t instCount = graph.iGraph->getInstanceCount(graph);
//instanced or not
if (instCount == 0)
{
auto& state = OgnOnVariableChangeDatabase::sInternalState<OgnOnVariableChange>(nodeObj, kAuthoringGraphIndex);
state.m_cachedVariable.clear();
state.m_cachedType = Type();
}
else
{
for (size_t idx = 0; idx < instCount; ++idx)
{
auto& state = OgnOnVariableChangeDatabase::sInternalState<OgnOnVariableChange>(nodeObj, {idx});
state.m_cachedVariable.clear();
state.m_cachedType = Type();
}
}
}
static bool compute(OgnOnVariableChangeDatabase& db)
{
if (checkNodeDisabledForOnlyPlay(db))
return true;
auto& state = db.internalState<OgnOnVariableChange>();
auto var = db.getVariable(db.inputs.variableName());
auto& cachedVariable = state.m_cachedVariable;
auto& cachedType = state.m_cachedType;
bool isClean = (cachedVariable.size() != 0);
bool isSame = compareAndCacheVariables(var, cachedVariable, cachedType);
if (isClean && !isSame)
{
auto iActionGraph = getInterface();
iActionGraph->setExecutionEnabled(outputs::changed.token(), db.getInstanceIndex());
}
return true;
}
};
REGISTER_OGN_NODE()
} // action
} // graph
} // omni
| 4,331 | C++ | 29.942857 | 122 | 0.620411 |
omniverse-code/kit/exts/omni.graph.action/omni/graph/action/ogn/nodes/OgnOnObjectChange.cpp | // Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// clang-format off
#include "UsdPCH.h"
// clang-format on
#include <OgnOnObjectChangeDatabase.h>
#include "ActionNodeCommon.h"
#include <omni/graph/action/IActionGraph.h>
#include <omni/fabric/FabricUSD.h>
#include <omni/graph/core/ArrayWrapper.h>
// clang-format off
#include <omni/usd/UsdContext.h>
// clang-format on
#include <mutex>
namespace omni
{
namespace graph
{
namespace action
{
using namespace omni::graph::core;
// The idea here is that each node instance will register as a notice listener when it's compute is called if the path
// input has changed, or they haven't registered before. The watched path needs to be verified every compute because
// input could be changing at any point. The listener must always be revoked in `release`. Any changes since the last
// compute() will trigger the output exection.
//
class UsdNoticeListener;
using UsdNoticeListenerRefPtr = pxr::TfRefPtr<UsdNoticeListener>;
using UsdNoticeListenerWeakPtr = pxr::TfWeakPtr<UsdNoticeListener>;
// Derive from Ref and Weak bases so that we don't crash during `release` (and we delete the listener) from a
// different thread than the notices are being sent on. Instead we just decref the listener so that the internal
// weak pointer will keep it alive until notice sending is completed
class UsdNoticeListener : public pxr::TfRefBase, public pxr::TfWeakBase
{
public:
enum class WatchType
{
eInvalid,
ePrim,
ePrimProperty
};
static UsdNoticeListenerRefPtr New(long stageId)
{
return pxr::TfCreateRefPtr(new UsdNoticeListener(stageId));
}
~UsdNoticeListener()
{
pxr::TfNotice::Revoke(m_registerKey);
}
void registerForPath(pxr::SdfPath path, UsdNoticeListenerRefPtr ptr)
{
// Ensure our dirty flag isn't still set
{
const std::lock_guard<std::mutex> lock(m_noticeMutex);
m_watchedPath = path;
m_isDirty = false;
m_noticedName = pxr::TfToken();
}
// If we are given an empty path we can revoke
if (path.IsEmpty())
{
if (m_registerKey.IsValid())
pxr::TfNotice::Revoke(m_registerKey);
return;
}
// Determine if the path actually exists on the stage
m_watchType = WatchType::eInvalid;
if (path.IsPrimPath())
{
pxr::UsdPrim prim = m_stage->GetPrimAtPath(path);
if (!prim)
return;
m_watchType = WatchType::ePrim;
}
else if (path.IsPrimPropertyPath())
{
pxr::UsdProperty prop = m_stage->GetPropertyAtPath(path);
if (!prop)
return;
m_watchType = WatchType::ePrimProperty;
}
// Register ourselves
if (!m_registerKey.IsValid())
m_registerKey = pxr::TfNotice::Register(UsdNoticeListenerWeakPtr(ptr), &UsdNoticeListener::Handle);
}
void Handle(const class pxr::UsdNotice::ObjectsChanged& objectsChanged)
{
// If already dirty, don't worry about it
if (m_isDirty)
return;
if(m_stage != objectsChanged.GetStage())
return;
switch (m_watchType)
{
case WatchType::ePrim:
{
pxr::UsdPrim prim = m_stage->GetPrimAtPath(m_watchedPath);
if (prim)
{
for (const auto& path : objectsChanged.GetChangedInfoOnlyPaths())
{
const pxr::SdfPath& changePath = path.GetPrimPath();
if (m_stage->GetPrimAtPath(changePath) == prim)
{
const std::lock_guard<std::mutex> lock(m_noticeMutex);
m_noticedName = path.GetNameToken();
m_isDirty = true;
break;
}
}
}
break;
}
case WatchType::ePrimProperty:
{
pxr::UsdProperty prop = m_stage->GetPropertyAtPath(m_watchedPath);
if (prop)
{
if (objectsChanged.AffectedObject(prop))
{
const std::lock_guard<std::mutex> lock(m_noticeMutex);
m_noticedName = m_watchedPath.GetNameToken();
m_isDirty = true;
}
}
break;
}
case WatchType::eInvalid:
break;
}
}
pxr::UsdStageRefPtr m_stage;
pxr::TfNotice::Key m_registerKey;
pxr::SdfPath m_watchedPath;
WatchType m_watchType{ WatchType::eInvalid };
bool m_isDirty{ false };
pxr::TfToken m_noticedName;
std::mutex m_noticeMutex;
private:
UsdNoticeListener(long stageId)
{
m_stage = pxr::UsdUtilsStageCache::Get().Find(pxr::UsdStageCache::Id::FromLongInt(stageId));
if (!m_stage)
{
CARB_LOG_ERROR("Could not find USD stage");
return;
}
}
};
// ============================================================================
class OgnOnObjectChange
{
public:
UsdNoticeListenerRefPtr m_listener{ nullptr };
static bool compute(OgnOnObjectChangeDatabase& db)
{
auto& nodeObj = db.abi_node();
const auto& contextObj = db.abi_context();
if (checkNodeDisabledForOnlyPlay(db))
return true;
pxr::SdfPath watchedPath = toSdfPath(db.inputs.prim.firstOrDefault());
if (!watchedPath.IsEmpty())
{
auto name = db.inputs.name();
if (name != omni::fabric::kUninitializedToken)
{
auto nameToken = toTfToken(name);
watchedPath = watchedPath.AppendProperty(nameToken);
}
}
if (watchedPath.IsEmpty())
{
auto const& path = db.inputs.path();
if (!pxr::SdfPath::IsValidPathString(path))
{
if (!path.empty())
{
db.logError("Invalid path %s", path.data());
return false;
}
}
else
watchedPath = pxr::SdfPath(path);
}
auto& state = db.internalState<OgnOnObjectChange>();
auto processChange = [&]() -> pxr::TfToken
{
// Find the listener for this node, or create one if it doesn't exist
if (!state.m_listener)
{
long stageId = db.abi_context().iContext->getStageId(db.abi_context());
state.m_listener = UsdNoticeListener::New(stageId);
state.m_listener->registerForPath(watchedPath, state.m_listener);
return pxr::TfToken();
}
if (state.m_listener->m_watchedPath != watchedPath)
{
// re-register for the new path
state.m_listener->registerForPath(watchedPath, state.m_listener);
return pxr::TfToken();
}
{
const std::lock_guard<std::mutex> lock(state.m_listener->m_noticeMutex);
if (std::exchange(state.m_listener->m_isDirty, false))
{
return state.m_listener->m_noticedName;
}
}
return pxr::TfToken();
};
auto changed = processChange();
if (!changed.IsEmpty())
{
auto iActionGraph = getInterface();
iActionGraph->setExecutionEnabled(outputs::changed.token(), db.getInstanceIndex());
db.outputs.propertyName() = omni::fabric::asInt(changed);
}
return true;
}
// ----------------------------------------------------------------------------
static void release(const NodeObj& nodeObj)
{
auto& state = OgnOnObjectChangeDatabase::sInternalState<OgnOnObjectChange>(nodeObj);
state.m_listener.Reset();
}
// ----------------------------------------------------------------------------
static bool updateNodeVersion(const GraphContextObj& context, const NodeObj& nodeObj, int oldVersion, int newVersion)
{
if (oldVersion < newVersion)
{
if (oldVersion < 2)
{
// We added inputs:onlyPlayback default true - to maintain previous behavior we should set this to false
const bool val{ false };
nodeObj.iNode->createAttribute(nodeObj, "inputs:onlyPlayback", Type(BaseDataType::eBool), &val, nullptr,
kAttributePortType_Input, kExtendedAttributeType_Regular, nullptr);
}
return true;
}
return false;
}
};
REGISTER_OGN_NODE()
} // action
} // graph
} // omni
| 9,254 | C++ | 30.587031 | 121 | 0.557921 |
omniverse-code/kit/exts/omni.graph.action/omni/graph/action/ogn/nodes/OgnOnTick.cpp | // Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#include <OgnOnTickDatabase.h>
#include <omni/graph/action/IActionGraph.h>
namespace omni
{
namespace graph
{
namespace action
{
class OgnOnTick
{
public:
static size_t computeVectorized(OgnOnTickDatabase& db, size_t count)
{
auto iActionGraph = getInterface();
const auto& contextObj = db.abi_context();
const IGraphContext* const iContext = contextObj.iContext;
bool isPlaying = iContext->getIsPlaying(contextObj);
auto elapsed = iContext->getElapsedTime(contextObj);
auto const t = iContext->getTime(contextObj);
auto const f = iContext->getFrame(contextObj);
auto const ts = iContext->getTimeSinceStart(contextObj);
auto const ast = iContext->getAbsoluteSimTime(contextObj);
auto onlyPlayback = db.inputs.onlyPlayback.vectorized(count);
auto framePeriod = db.inputs.framePeriod.vectorized(count);
auto isPlayingOut = db.outputs.isPlaying.vectorized(count);
auto time = db.outputs.time.vectorized(count);
auto frame = db.outputs.frame.vectorized(count);
auto timeSinceStart = db.outputs.timeSinceStart.vectorized(count);
auto absoluteSimTime = db.outputs.absoluteSimTime.vectorized(count);
auto deltaSeconds = db.outputs.deltaSeconds.vectorized(count);
auto accumulatedSeconds = db.state.accumulatedSeconds.vectorized(count);
auto frameCount = db.state.frameCount.vectorized(count);
for (size_t idx = 0; idx < count; ++idx)
{
if (onlyPlayback[idx] && !isPlaying)
{
accumulatedSeconds[idx] = 0;
frameCount[idx] = 0;
continue;
}
++frameCount[idx];
accumulatedSeconds[idx] += elapsed;
if (frameCount[idx] > framePeriod[idx])
{
frameCount[idx] = 0;
deltaSeconds[idx] = accumulatedSeconds[idx];
accumulatedSeconds[idx] = 0;
isPlayingOut[idx] = isPlaying;
time[idx] = t;
frame[idx] = f;
timeSinceStart[idx] = ts;
absoluteSimTime[idx] = ast;
iActionGraph->setExecutionEnabled(outputs::tick.token(), db.getInstanceIndex() + idx);
}
}
return count;
}
};
REGISTER_OGN_NODE()
} // action
} // graph
} // omni
| 2,839 | C++ | 32.411764 | 102 | 0.637548 |
omniverse-code/kit/exts/omni.graph.action/omni/graph/action/ogn/nodes/OgnFlipFlop.cpp | // Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#include <OgnFlipFlopDatabase.h>
#include <omni/graph/action/IActionGraph.h>
namespace omni
{
namespace graph
{
namespace action
{
class OgnFlipFlop
{
public:
// The flip-flop cycles output activation between 2 outputs
static constexpr size_t kNumLevels = 2;
// The output that will be activated on the next compute. 0 == A, 1 == B
int m_nextLevel{ 0 };
static bool compute(OgnFlipFlopDatabase& db)
{
auto iActionGraph = getInterface();
OgnFlipFlop& state = db.internalState<OgnFlipFlop>();
if (state.m_nextLevel == 0)
{
iActionGraph->setExecutionEnabled(outputs::a.token(), db.getInstanceIndex());
db.outputs.isA() = true;
}
else
{
iActionGraph->setExecutionEnabled(outputs::b.token(), db.getInstanceIndex());
db.outputs.isA() = false;
}
state.m_nextLevel = (state.m_nextLevel + 1) % kNumLevels;
return true;
}
};
REGISTER_OGN_NODE()
} // action
} // graph
} // omni
| 1,472 | C++ | 26.277777 | 89 | 0.669158 |
omniverse-code/kit/exts/omni.graph.action/omni/graph/action/ogn/nodes/ActionNodeCommon.h | // Copyright (c) 2021-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
namespace omni
{
namespace graph
{
namespace action
{
/**
* Checks if the node with `inputs:onlyPlayback` should be disabled, because playback is not happening.
*
* @param[in] db The node OGN Database object
* @return true if the node should be disabled
*/
template<typename NodeDb>
bool checkNodeDisabledForOnlyPlay(NodeDb const& db)
{
return db.inputs.onlyPlayback() && (not db.abi_context().iContext->getIsPlaying(db.abi_context()));
}
}
}
}
| 897 | C | 27.062499 | 103 | 0.760312 |
omniverse-code/kit/exts/omni.graph.action/omni/graph/action/ogn/nodes/OgnSyncGate.cpp | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#include <OgnSyncGateDatabase.h>
#include <omni/graph/action/IActionGraph.h>
namespace omni
{
namespace graph
{
namespace action
{
class OgnSyncGate
{
public:
// The current number of accumulated executions.
uint32_t m_numAccumulatedExecIn{ 0 };
// The current synchronization value.
uint64_t m_syncValue{ 0 };
static bool compute(OgnSyncGateDatabase& db)
{
auto nodeObj = db.abi_node();
const AttributeObj attr = nodeObj.iNode->getAttributeByToken(nodeObj, inputs::execIn.m_token);
const auto accumulationThreshold = static_cast<uint32_t>(attr.iAttribute->getUpstreamConnectionCount(attr));
OgnSyncGate& state = db.internalState<OgnSyncGate>();
const bool reset = db.inputs.syncValue() != state.m_syncValue;
state.m_numAccumulatedExecIn = reset ? 1 : std::min(state.m_numAccumulatedExecIn + 1, accumulationThreshold);
db.outputs.syncValue() = state.m_syncValue = db.inputs.syncValue();
if (state.m_numAccumulatedExecIn >= accumulationThreshold)
{
auto iActionGraph = getInterface();
iActionGraph->setExecutionEnabled(outputs::execOut.token(), db.getInstanceIndex());
}
return true;
}
};
REGISTER_OGN_NODE()
} // action
} // graph
} // omni
| 1,727 | C++ | 32.882352 | 117 | 0.710481 |
omniverse-code/kit/exts/omni.graph.action/omni/graph/action/ogn/nodes/OgnOnGamepadInput.cpp | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#include "ActionNodeCommon.h"
#include <carb/input/IInput.h>
#include <carb/input/InputTypes.h>
#include <omni/graph/action/IActionGraph.h>
#include <omni/kit/IAppWindow.h>
#include <OgnOnGamepadInputDatabase.h>
#include <atomic>
using namespace carb::input;
namespace omni
{
namespace graph
{
namespace action
{
// This is different from carb::input::GamepadInput::eCount by 10 because we don't allow joysticks and triggers input.
constexpr auto s_firstEventWeCareAbout = carb::input::GamepadInput::eA;
constexpr size_t s_numNames = size_t(carb::input::GamepadInput::eCount) - size_t(s_firstEventWeCareAbout);
static std::array<NameToken, s_numNames> s_elementTokens;
class OgnOnGamepadInput
{
public:
// Assume a zero subsId means not registered
SubscriptionId m_gamepadConnectionSubsId{ 0 };
SubscriptionId m_elementEventSubsId{ 0 };
GamepadEvent m_elementEvent;
std::atomic<bool> m_requestedRegistrationInCompute{ false }; // Dirty flag to ask compute() to potentially subscribe
// to the new gamepad if needed
exec::unstable::Stamp m_elementSetStamp; // The stamp set by the authoring node when the even occurs
exec::unstable::SyncStamp m_elementSetSyncStamp; // The stamp used by each instance
static bool trySwitchGamepadSubscription(const size_t newGamepadId, const NodeObj& nodeObj)
{
IInput* input = carb::getCachedInterface<IInput>();
if (!input)
return false;
auto& state = OgnOnGamepadInputDatabase::sInternalState<OgnOnGamepadInput>(nodeObj, kAuthoringGraphIndex);
omni::kit::IAppWindow* appWindow = omni::kit::getDefaultAppWindow();
if (!appWindow)
{
return false;
}
Gamepad* gamepad = appWindow->getGamepad(newGamepadId);
if (!gamepad)
{
CARB_LOG_WARN_ONCE("The new gamepad ID is not associated with any gamepad");
if (state.m_elementEventSubsId > 0)
{
input->unsubscribeToInputEvents(state.m_elementEventSubsId);
}
state.m_elementEventSubsId = 0;
return false;
}
if (state.m_elementEventSubsId > 0)
{
input->unsubscribeToInputEvents(state.m_elementEventSubsId);
}
state.m_elementEventSubsId =
input->subscribeToInputEvents((carb::input::InputDevice*)gamepad, kEventTypeAll, onGamepadEvent,
reinterpret_cast<void*>(nodeObj.nodeHandle), kSubscriptionOrderDefault);
return true;
}
static bool onGamepadEvent(const InputEvent& e, void* userData)
{
if (e.deviceType != DeviceType::eGamepad)
{
return false;
}
NodeHandle nodeHandle = reinterpret_cast<NodeHandle>(userData);
auto iNode = carb::getCachedInterface<omni::graph::core::INode>();
NodeObj nodeObj = iNode->getNodeFromHandle(nodeHandle);
if (!nodeObj.isValid())
return false;
// Copy the event data and request the next compute()
auto& authoringState =
OgnOnGamepadInputDatabase::sInternalState<OgnOnGamepadInput>(nodeObj, kAuthoringGraphIndex);
authoringState.m_elementSetStamp.next();
authoringState.m_elementEvent = e.gamepadEvent;
iNode->requestCompute(nodeObj);
return true;
}
static void onGamepadIdChanged(const AttributeObj& attrObj, const void* value)
{
NodeObj nodeObj{ attrObj.iAttribute->getNode(attrObj) };
if (nodeObj.nodeHandle == kInvalidNodeHandle)
return;
GraphObj graphObj{ nodeObj.iNode->getGraph(nodeObj) };
if (graphObj.graphHandle == kInvalidGraphHandle)
return;
GraphContextObj context{ graphObj.iGraph->getDefaultGraphContext(graphObj) };
if (context.contextHandle == kInvalidGraphContextHandle)
return;
ConstAttributeDataHandle attributeDataHandle =
attrObj.iAttribute->getConstAttributeDataHandle(attrObj, kAccordingToContextIndex);
const uint32_t gamepadId = *getDataR<uint32_t>(context, attributeDataHandle);
// Change subscription target
trySwitchGamepadSubscription(gamepadId, nodeObj);
}
static void initialize(const GraphContextObj& context, const NodeObj& nodeObj)
{
// First time look up all the tokens and their lowercase equivalents
[[maybe_unused]] static bool callOnce = ([] {
s_elementTokens = {
OgnOnGamepadInputDatabase::tokens.FaceButtonBottom,
OgnOnGamepadInputDatabase::tokens.FaceButtonRight,
OgnOnGamepadInputDatabase::tokens.FaceButtonLeft,
OgnOnGamepadInputDatabase::tokens.FaceButtonTop,
OgnOnGamepadInputDatabase::tokens.LeftShoulder,
OgnOnGamepadInputDatabase::tokens.RightShoulder,
OgnOnGamepadInputDatabase::tokens.SpecialLeft,
OgnOnGamepadInputDatabase::tokens.SpecialRight,
OgnOnGamepadInputDatabase::tokens.LeftStickButton,
OgnOnGamepadInputDatabase::tokens.RightStickButton,
OgnOnGamepadInputDatabase::tokens.DpadUp,
OgnOnGamepadInputDatabase::tokens.DpadRight,
OgnOnGamepadInputDatabase::tokens.DpadDown,
OgnOnGamepadInputDatabase::tokens.DpadLeft
};
} (), true);
// Get the default or stored gamepad ID when creating new nodes or loading a saved file
auto gamepadIdAttr = nodeObj.iNode->getAttributeByToken(nodeObj, inputs::gamepadId.token());
IInput* input = carb::getCachedInterface<IInput>();
if (!input)
return; // Happens normally in headless
auto& authoringState =
OgnOnGamepadInputDatabase::sInternalState<OgnOnGamepadInput>(nodeObj, kAuthoringGraphIndex);
authoringState.m_requestedRegistrationInCompute.store(true);
gamepadIdAttr.iAttribute->registerValueChangedCallback(gamepadIdAttr, onGamepadIdChanged, true);
// This will allow user to connect gamepad after specifying gamepad ID
authoringState.m_gamepadConnectionSubsId = input->subscribeToGamepadConnectionEvents(
[](const carb::input::GamepadConnectionEvent& evt, void* userData)
{
NodeHandle nodeHandle = reinterpret_cast<NodeHandle>(userData);
auto iNode = carb::getCachedInterface<omni::graph::core::INode>();
NodeObj nodeObj = iNode->getNodeFromHandle(nodeHandle);
auto& state = OgnOnGamepadInputDatabase::sInternalState<OgnOnGamepadInput>(nodeObj);
state.m_requestedRegistrationInCompute.store(true);
// Since the subscription depends on another GamepadConnectionEvents in IAppWindwImplCommon, and we
// cannot assume the execution order
iNode->requestCompute(nodeObj);
},
reinterpret_cast<void*>(nodeObj.nodeHandle));
}
static void release(const NodeObj& nodeObj)
{
auto& authoringState = OgnOnGamepadInputDatabase::sInternalState<OgnOnGamepadInput>(nodeObj);
IInput* input = carb::getCachedInterface<IInput>();
if (!input)
return;
if (authoringState.m_elementEventSubsId > 0)
{
input->unsubscribeToInputEvents(authoringState.m_elementEventSubsId);
}
if (authoringState.m_gamepadConnectionSubsId > 0)
{
input->unsubscribeToGamepadConnectionEvents(authoringState.m_gamepadConnectionSubsId);
}
}
static bool compute(OgnOnGamepadInputDatabase& db)
{
auto& authoringState =
OgnOnGamepadInputDatabase::sInternalState<OgnOnGamepadInput>(db.abi_node(), kAuthoringGraphIndex);
auto& localState = db.internalState<OgnOnGamepadInput>();
// Retry subscribe gamepad when new gamepad is connected
if (authoringState.m_requestedRegistrationInCompute.exchange(false))
{
trySwitchGamepadSubscription(db.inputs.gamepadId(), db.abi_node());
}
if (checkNodeDisabledForOnlyPlay(db))
return true;
if (localState.m_elementSetSyncStamp.makeSync(authoringState.m_elementSetStamp))
{
NameToken const& gamepadElementIn = db.inputs.gamepadElementIn();
// Offset the index by 10 since we excluded the joysticks and triggers.
if (size_t(authoringState.m_elementEvent.input) < size_t(s_firstEventWeCareAbout))
return true;
size_t elementIndex = size_t(authoringState.m_elementEvent.input) - size_t(s_firstEventWeCareAbout);
if (elementIndex >= s_elementTokens.size())
{
db.logError("Invalid Key %d detected", authoringState.m_elementEvent.input);
return false;
}
auto iActionGraph = getInterface();
if (gamepadElementIn == s_elementTokens[elementIndex])
{
if (authoringState.m_elementEvent.value == 1)
{
iActionGraph->setExecutionEnabled(outputs::pressed.token(), omni::graph::core::kAccordingToContextIndex);
db.outputs.isPressed() = true;
}
else
{
iActionGraph->setExecutionEnabled(outputs::released.token(), omni::graph::core::kAccordingToContextIndex);
db.outputs.isPressed() = false;
}
}
}
return true;
}
};
REGISTER_OGN_NODE()
} // namespace action
} // namespace graph
} // namespace omni
| 10,224 | C++ | 38.941406 | 126 | 0.654636 |
omniverse-code/kit/exts/omni.graph.action/omni/graph/action/ogn/tests/TestOgnCounter.py | import omni.kit.test
import omni.graph.core as og
import omni.graph.core.tests as ogts
import os
class TestOgn(ogts.OmniGraphTestCase):
async def test_generated(self):
test_data = [{'inputs': [['inputs:execIn', 1, False]], 'outputs': [['outputs:count', 1, False]], 'state_set': [['state:count', 0, False]]}, {'inputs': [['inputs:execIn', 1, False]], 'outputs': [['outputs:count', 2, False]], 'state_set': [['state:count', 1, False]]}, {'inputs': [['inputs:execIn', 0, False], ['inputs:reset', 1, False]], 'outputs': [['outputs:count', 0, False]], 'state_set': [['state:count', 1, False]], 'state_get': [['state:count', 0, False]]}]
test_node = None
test_graph = None
for i, test_run in enumerate(test_data):
inputs = test_run.get('inputs', [])
outputs = test_run.get('outputs', [])
state_set = test_run.get('state_set', [])
state_get = test_run.get('state_get', [])
setup = test_run.get('setup', None)
if setup is None or setup:
await omni.usd.get_context().new_stage_async()
test_graph = None
elif not setup:
self.assertTrue(test_graph is not None and test_graph.is_valid(), "Test is misconfigured - empty setup cannot be in the first test")
if setup:
(test_graph, test_nodes, _, _) = og.Controller.edit("/TestGraph", setup)
self.assertTrue(test_nodes)
test_node = test_nodes[0]
elif setup is None:
if test_graph is None:
test_graph = og.Controller.create_graph("/TestGraph")
self.assertTrue(test_graph is not None and test_graph.is_valid())
test_node = og.Controller.create_node(
("TestNode_omni_graph_action_Counter", test_graph), "omni.graph.action.Counter"
)
self.assertTrue(test_graph is not None and test_graph.is_valid(), "Test graph invalid")
self.assertTrue(test_node is not None and test_node.is_valid(), "Test node invalid")
await og.Controller.evaluate(test_graph)
values_to_set = inputs + state_set
if values_to_set:
for attribute_name, attribute_value, _ in inputs + state_set:
og.Controller((attribute_name, test_node)).set(attribute_value)
await og.Controller.evaluate(test_graph)
for attribute_name, expected_value, _ in outputs + state_get:
attribute = og.Controller.attribute(attribute_name, test_node)
actual_output = og.Controller.get(attribute)
expected_type = None
if isinstance(expected_value, dict):
expected_type = expected_value["type"]
expected_value = expected_value["value"]
ogts.verify_values(expected_value, actual_output, f"omni.graph.action.Counter User test case #{i+1}: {attribute_name} attribute value error")
if expected_type:
tp = og.AttributeType.type_from_ogn_type_name(expected_type)
actual_type = attribute.get_resolved_type()
if tp != actual_type:
raise ValueError(f"omni.graph.action.Counter User tests - {attribute_name}: Expected {expected_type}, saw {actual_type.get_ogn_type_name()}")
async def test_data_access(self):
from omni.graph.action.ogn.OgnCounterDatabase import OgnCounterDatabase
test_file_name = "OgnCounterTemplate.usda"
usd_path = os.path.join(os.path.dirname(__file__), "usd", test_file_name)
if not os.path.exists(usd_path):
self.assertTrue(False, f"{usd_path} not found for loading test")
(result, error) = await ogts.load_test_file(usd_path)
self.assertTrue(result, f'{error} on {usd_path}')
test_node = og.Controller.node("/TestGraph/Template_omni_graph_action_Counter")
database = OgnCounterDatabase(test_node)
self.assertTrue(test_node.is_valid())
node_type_name = test_node.get_type_name()
self.assertEqual(og.GraphRegistry().get_node_type_version(node_type_name), 1)
def _attr_error(attribute: og.Attribute, usd_test: bool) -> str:
test_type = "USD Load" if usd_test else "Database Access"
return f"{node_type_name} {test_type} Test - {attribute.get_name()} value error"
self.assertTrue(test_node.get_attribute_exists("inputs:execIn"))
attribute = test_node.get_attribute("inputs:execIn")
db_value = database.inputs.execIn
self.assertTrue(test_node.get_attribute_exists("inputs:reset"))
attribute = test_node.get_attribute("inputs:reset")
db_value = database.inputs.reset
self.assertTrue(test_node.get_attribute_exists("outputs:count"))
attribute = test_node.get_attribute("outputs:count")
db_value = database.outputs.count
self.assertTrue(test_node.get_attribute_exists("outputs:execOut"))
attribute = test_node.get_attribute("outputs:execOut")
db_value = database.outputs.execOut
self.assertTrue(test_node.get_attribute_exists("state:count"))
attribute = test_node.get_attribute("state:count")
db_value = database.state.count
| 5,356 | Python | 55.389473 | 471 | 0.60717 |
omniverse-code/kit/exts/omni.graph.action/omni/graph/action/_impl/templates/template_omni.graph.action.SwitchToken.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from pathlib import Path
from typing import List
import omni.graph.core as og
import omni.ui as ui
from omni.kit.property.usd.custom_layout_helper import CustomLayoutFrame, CustomLayoutGroup, CustomLayoutProperty
from omni.kit.window.property.templates import HORIZONTAL_SPACING
class CustomLayout:
def __init__(self, compute_node_widget):
self.enable = True
self.compute_node_widget = compute_node_widget
self.controller = og.Controller()
self.add_button: ui.Button = None
self.remove_button: ui.Button = None
self.node = self.controller.node(self.compute_node_widget._payload[-1])
def _get_input_attrib_names(self) -> List[str]:
"""Return the list of dynamic input attribs"""
all_attribs = self.node.get_attributes()
input_attrib_names = []
for attrib in all_attribs:
attrib_name = attrib.get_name()
name_prefix = attrib_name[:13]
if name_prefix == "inputs:branch":
input_attrib_names.append(attrib_name)
return input_attrib_names
def _get_max_suffix(self) -> int:
"""Return the maximum suffix of dynamic inputs or -1 if there are none"""
names = self._get_input_attrib_names()
if not names:
return -1
return max(int(name[13:]) for name in names)
def _on_click_add(self):
next_suffix = f"{self._get_max_suffix() + 1:02}"
new_attr = self.controller.create_attribute(
self.node,
f"inputs:branch{next_suffix}",
og.Type(og.BaseDataType.TOKEN, 1, 0, og.AttributeRole.NONE),
og.AttributePortType.ATTRIBUTE_PORT_TYPE_INPUT,
)
new_attr.set_metadata(og.MetadataKeys.LITERAL_ONLY, "1")
self.controller.create_attribute(
self.node,
f"outputs:output{next_suffix}",
og.Type(og.BaseDataType.UINT, 1, 0, og.AttributeRole.EXECUTION),
og.AttributePortType.ATTRIBUTE_PORT_TYPE_OUTPUT,
)
self.compute_node_widget.rebuild_window()
self.remove_button.enabled = True
def _on_click_remove(self):
max_suffix = self._get_max_suffix()
if max_suffix < 0:
return
attrib_to_remove = self.node.get_attribute(f"outputs:output{max_suffix:02}")
self.controller.remove_attribute(attrib_to_remove)
attrib_to_remove = self.node.get_attribute(f"inputs:branch{max_suffix:02}")
self.controller.remove_attribute(attrib_to_remove)
self.compute_node_widget.rebuild_window()
self.remove_button.enabled = max_suffix > 0
def _controls_build_fn(self, *args):
max_suffix = self._get_max_suffix()
icons_path = Path(__file__).absolute().parent.parent.parent.parent.parent.parent.joinpath("icons")
with ui.HStack(height=0, spacing=HORIZONTAL_SPACING):
ui.Spacer()
self.add_button = ui.Button(
image_url=f"{icons_path.joinpath('add.svg')}",
width=22,
height=22,
style={"Button": {"background_color": 0x1F2124}},
clicked_fn=self._on_click_add,
tooltip_fn=lambda: ui.Label("Add New Branch"),
)
self.remove_button = ui.Button(
image_url=f"{icons_path.joinpath('remove.svg')}",
width=22,
height=22,
style={"Button": {"background_color": 0x1F2124}},
enabled=(max_suffix > 0),
clicked_fn=self._on_click_remove,
tooltip_fn=lambda: ui.Label("Remove Branch"),
)
def apply(self, props):
# Called by compute_node_widget to apply UI when selection changes
def find_prop(name):
return next((p for p in props if p.prop_name == name), None)
frame = CustomLayoutFrame(hide_extra=True)
names = self._get_input_attrib_names()
with frame:
with CustomLayoutGroup("Inputs"):
for name in names:
prop = find_prop(name)
CustomLayoutProperty(prop.prop_name)
CustomLayoutProperty(None, None, build_fn=self._controls_build_fn)
return frame.apply(props)
| 4,701 | Python | 40.610619 | 113 | 0.61689 |
omniverse-code/kit/exts/omni.graph.action/omni/graph/action/tests/test_action_graph_nodes_02.py | """Action Graph Node Tests, Part 2"""
import random
from functools import partial
from typing import List
import carb
import carb.input
import numpy as np
import omni.client
import omni.graph.core as og
import omni.graph.core.tests as ogts
import omni.graph.tools.ogn as ogn
import omni.kit.app
import omni.kit.test
from carb.input import GamepadInput, KeyboardEventType, KeyboardInput
from omni.graph.core import ThreadsafetyTestUtils
# ======================================================================
class TestActionGraphNodes(ogts.OmniGraphTestCase):
"""Tests action graph node functionality"""
TEST_GRAPH_PATH = "/World/TestGraph"
keys = og.Controller.Keys
E = og.ExecutionAttributeState.ENABLED
D = og.ExecutionAttributeState.DISABLED
async def setUp(self):
"""Set up test environment, to be torn down when done"""
await super().setUp()
# ----------------------------------------------------------------------
@ThreadsafetyTestUtils.make_threading_test
def test_ongamepadinput_node(self, test_instance_id: int = 0):
"""Test OnGamepadInput node"""
# Obtain an interface to a few gamepads and carb input provider.
input_provider = ThreadsafetyTestUtils.add_to_threading_cache(
test_instance_id, carb.input.acquire_input_provider()
)
gamepad_list = ThreadsafetyTestUtils.add_to_threading_cache(
test_instance_id,
[
input_provider.create_gamepad("Gamepad 0", "0"),
input_provider.create_gamepad("Gamepad 1", "1"),
input_provider.create_gamepad("Gamepad 2", "2"),
],
)
# Connect the gamepads.
for gamepad in gamepad_list:
self.assertIsNotNone(gamepad)
ThreadsafetyTestUtils.single_evaluation_first_test_instance(
test_instance_id, partial(input_provider.set_gamepad_connected, gamepad, True)
)
yield ThreadsafetyTestUtils.EVALUATION_WAIT_FRAME # Yielding to compute by waiting for the next app frame.
# Instance a test graph setup. Note that we append the graph path with the test_instance_id
# so that the graph can be uniquely identified in the thread-safety test!
graph_path = self.TEST_GRAPH_PATH + str(test_instance_id)
og.Controller.create_graph({"graph_path": graph_path, "evaluator_name": "execution"})
(_, (on_gamepad_input_node,), _, _) = og.Controller.edit(
graph_path, {self.keys.CREATE_NODES: ("OnGamepadInput", "omni.graph.action.OnGamepadInput")}
)
# Obtain necessary attributes.
in_onlyplayback_attr = on_gamepad_input_node.get_attribute("inputs:onlyPlayback")
in_gamepadid_attr = on_gamepad_input_node.get_attribute("inputs:gamepadId")
in_gamepad_element_attr = on_gamepad_input_node.get_attribute("inputs:gamepadElementIn")
out_pressed_attr = on_gamepad_input_node.get_attribute("outputs:pressed")
out_released_attr = on_gamepad_input_node.get_attribute("outputs:released")
out_ispressed_attr = on_gamepad_input_node.get_attribute("outputs:isPressed")
# Define a list of all possible gamepad inputs.
# FIXME: Note that the commented-out gamepad inputs produce errors in the OnGamepadInput's
# compute method because those specific inputs are not being considered. Maybe change to
# include these inputs and/or not spit out scary-looking error messages to users?
possible_gamepad_inputs = ThreadsafetyTestUtils.add_to_threading_cache(
test_instance_id,
[
GamepadInput.A,
GamepadInput.B,
# GamepadInput.COUNT,
GamepadInput.DPAD_DOWN,
GamepadInput.DPAD_LEFT,
GamepadInput.DPAD_RIGHT,
GamepadInput.DPAD_UP,
GamepadInput.LEFT_SHOULDER,
GamepadInput.LEFT_STICK,
# GamepadInput.LEFT_STICK_DOWN,
# GamepadInput.LEFT_STICK_LEFT,
# GamepadInput.LEFT_STICK_RIGHT,
# GamepadInput.LEFT_STICK_UP,
# GamepadInput.LEFT_TRIGGER,
GamepadInput.MENU1,
GamepadInput.MENU2,
GamepadInput.RIGHT_SHOULDER,
GamepadInput.RIGHT_STICK,
# GamepadInput.RIGHT_STICK_DOWN,
# GamepadInput.RIGHT_STICK_LEFT,
# GamepadInput.RIGHT_STICK_RIGHT,
# GamepadInput.RIGHT_STICK_UP,
# GamepadInput.RIGHT_TRIGGER,
GamepadInput.X,
GamepadInput.Y,
],
)
# Define a dict of all valid inputs:gamepadElementIn tokens, along with
# their corresponding gamepad input. NOTE: The node's allowed token names
# are a bit different from the carb.input.GamepadInput values, might make
# sense to have them be the same?
allowed_gamepad_element_tokens = ThreadsafetyTestUtils.add_to_threading_cache(
test_instance_id,
{
"Face Button Bottom": GamepadInput.A,
"Face Button Right": GamepadInput.B,
"Face Button Left": GamepadInput.X,
"Face Button Top": GamepadInput.Y,
"Left Shoulder": GamepadInput.LEFT_SHOULDER,
"Right Shoulder": GamepadInput.RIGHT_SHOULDER,
"Special Left": GamepadInput.MENU1,
"Special Right": GamepadInput.MENU2,
"Left Stick Button": GamepadInput.LEFT_STICK,
"Right Stick Button": GamepadInput.RIGHT_STICK,
"D-Pad Up": GamepadInput.DPAD_UP,
"D-Pad Right": GamepadInput.DPAD_RIGHT,
"D-Pad Down": GamepadInput.DPAD_DOWN,
"D-Pad Left": GamepadInput.DPAD_LEFT,
},
)
# Wrap main driver code in the following generator (in order to leverage
# the ThreadsafetyTestUtils.make_threading_test decorator). Note that for simplicity's sake the
# fake gamepad IDs correspond directly with their index in the gamepad_list.
def _test_ongamepadinput_node(quick_run: bool = True, num_inputs_to_test: int = 5):
_possible_gamepad_inputs = []
_allowed_gamepad_element_tokens = {}
# Codepath for accelerated test (won't take as long and still provide some code coverage).
if quick_run:
# Make sure that the input flags make sense.
if num_inputs_to_test < 1:
num_inputs_to_test = 1
elif num_inputs_to_test > 14:
num_inputs_to_test = 14
# Define two sets of random indices that'll determine the combination of inputs:gamepadElementIn
# tokens and emulated key inputs that we'll test for the current function call. Note that the
# inputs:gamepadElementIn tokens we choose and the emulated keys need not coincide, resulting
# in no output being generated by the OnGamepadInput node. Also note that because we want to
# use the same set of random indices in each test instance (so that all test graph instances
# run their tests/comparisons using the same combination of buttons/inputs), we add said indices
# (or more specifically an internal method that's used to create those indices, so that they're
# only created once for all test instances) to the overall threading cache.
def create_random_indices():
rand_indices_0 = set()
while len(rand_indices_0) < num_inputs_to_test:
rand_indices_0.add(random.randrange(len(possible_gamepad_inputs)))
rand_indices_1 = set()
while len(rand_indices_1) < num_inputs_to_test:
rand_indices_1.add(random.randrange(len(allowed_gamepad_element_tokens)))
return rand_indices_0, rand_indices_1
rand_indices_0, rand_indices_1 = ThreadsafetyTestUtils.add_to_threading_cache(
test_instance_id, create_random_indices()
)
# Convert the sets into lists so that their elements can be accessible by index.
rand_indices_0 = list(rand_indices_0)
rand_indices_1 = list(rand_indices_1)
# Create an abbreviated list of possible gamepad input elements.
for r_i in rand_indices_0:
_possible_gamepad_inputs.append(possible_gamepad_inputs[r_i])
# Create an abbreviated dict of allowed token-key input pairs.
temp_keys_list = list(allowed_gamepad_element_tokens)
temp_values_list = list(allowed_gamepad_element_tokens.values())
for r_i in rand_indices_1:
_allowed_gamepad_element_tokens[temp_keys_list[r_i]] = temp_values_list[r_i]
# Codepath for full test.
else:
_possible_gamepad_inputs = possible_gamepad_inputs
_allowed_gamepad_element_tokens = allowed_gamepad_element_tokens
# Set the gamepad id on each OnGamepadInput node. Note that multiple gamepad nodes are set to
# track the same gamepad to look for potential concurrency issues.
# for on_gamepad_input_node in on_gamepad_input_nodes:
in_gamepadid_attr.set(test_instance_id % len(gamepad_list)) # noqa S001
# Loop through each allowed gamepad element token, and set it on the OnGamepadInput nodes'
# corresponding input attributes.
for allowed_token, allowed_input in _allowed_gamepad_element_tokens.items():
in_gamepad_element_attr.set(allowed_token)
# Loop through each possible gamepad input, which we will emulate.
for emulated_input in _possible_gamepad_inputs:
# Loop through each possible input event type (0 == key is released, 1 == key is pressed)
for event_type in [1, 0]:
# Trigger each gamepad input.
input_provider.buffer_gamepad_event(
gamepad_list[test_instance_id % len(gamepad_list)], emulated_input, event_type # noqa S001
)
yield ThreadsafetyTestUtils.EVALUATION_WAIT_FRAME # Yielding to compute by waiting for the next app frame.
# If the current emulated input matches the inputs:gamepadElementsIn attribute setting,
# check that the nodes reacted appropriately. Otherwise check that the nodes did not
# register the input.
if emulated_input == allowed_input:
if event_type == 1:
self.assertEqual(out_pressed_attr.get(), self.E)
self.assertEqual(out_released_attr.get(), self.D)
self.assertTrue(out_ispressed_attr.get())
elif event_type == 0:
self.assertEqual(out_pressed_attr.get(), self.D)
self.assertEqual(out_released_attr.get(), self.E)
self.assertFalse(out_ispressed_attr.get())
else:
self.assertEqual(out_pressed_attr.get(), self.D)
self.assertEqual(out_released_attr.get(), self.D)
self.assertFalse(out_ispressed_attr.get())
# Test that the OnGamepadInput nodes works correctly when the onlyPlayback input is disabled.
in_onlyplayback_attr.set(False)
for _ in _test_ongamepadinput_node():
yield ThreadsafetyTestUtils.EVALUATION_WAIT_FRAME # Yielding to compute by waiting for the next app frame.
# Test that the OnGamepadInput nodes works correctly when the onlyPlayback input is enabled.
in_onlyplayback_attr.set(True)
timeline = omni.timeline.get_timeline_interface()
timeline.set_target_framerate(timeline.get_time_codes_per_seconds())
timeline.play()
for _ in _test_ongamepadinput_node():
yield ThreadsafetyTestUtils.EVALUATION_WAIT_FRAME # Yielding to compute by waiting for the next app frame.
timeline.stop()
# Delete the gamepad node before destroying the gamepads themselves so that the nodes don't throw
# any warnings about having invalid gamepadIds.
og.Controller.delete_node(on_gamepad_input_node)
# Disconnect and destroy the gamepads.
for gamepad in gamepad_list:
ThreadsafetyTestUtils.single_evaluation_last_test_instance(
test_instance_id, partial(input_provider.set_gamepad_connected, gamepad, False)
)
yield ThreadsafetyTestUtils.EVALUATION_WAIT_FRAME # Yielding to compute by waiting for the next app frame.
ThreadsafetyTestUtils.single_evaluation_last_test_instance(
test_instance_id, partial(input_provider.destroy_gamepad, gamepad)
)
yield ThreadsafetyTestUtils.EVALUATION_WAIT_FRAME # Yielding to compute by waiting for the next app frame.
# ----------------------------------------------------------------------
# The OnImpulseEvent node ALSO has a built-in test construct in its .ogn file located
# at ../../nodes/OgnOnImpulseEvent.ogn (relative to the source location of the currently-
# opened testing script).
@ThreadsafetyTestUtils.make_threading_test
def test_onimpulseevent_node(self, test_instance_id: int = 0):
"""Test OnImpulseEvent node"""
# Instance a test graph setup. Note that we append the graph path with the test_instance_id
# so that the graph can be uniquely identified in the thread-safety test!
graph_path = self.TEST_GRAPH_PATH + str(test_instance_id)
og.Controller.create_graph({"graph_path": graph_path, "evaluator_name": "execution"})
(_, (_, counter_node), _, _,) = og.Controller.edit(
graph_path,
{
self.keys.CREATE_NODES: [
("OnImpulse", "omni.graph.action.OnImpulseEvent"),
("Counter", "omni.graph.action.Counter"),
],
self.keys.SET_VALUES: [("OnImpulse.inputs:onlyPlayback", False)],
self.keys.CONNECT: (
"OnImpulse.outputs:execOut",
"Counter.inputs:execIn",
),
},
)
counter_controller = og.Controller(og.Controller.attribute("outputs:count", counter_node))
# After several updates, there should have been no compute calls.
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(counter_controller.get(), 0)
# Change the OnImpulse node's state attribute. The node should now request compute.
og.Controller.edit(graph_path, {self.keys.SET_VALUES: (graph_path + "/OnImpulse.state:enableImpulse", True)})
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(counter_controller.get(), 1)
# More updates should not result in more computes.
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(counter_controller.get(), 1)
# ----------------------------------------------------------------------
async def test_onkeyboardinput_node(self):
"""Test OnKeyboardInput node"""
app = omni.kit.app.get_app()
og.Controller.create_graph({"graph_path": self.TEST_GRAPH_PATH, "evaluator_name": "execution"})
(_, (on_keyboard_input_node,), _, _) = og.Controller.edit(
self.TEST_GRAPH_PATH, {self.keys.CREATE_NODES: ("OnKeyboardInput", "omni.graph.action.OnKeyboardInput")}
)
# Obtain necessary attributes.
in_keyin_attr = on_keyboard_input_node.get_attribute("inputs:keyIn")
in_playbackonly_attr = on_keyboard_input_node.get_attribute("inputs:onlyPlayback")
out_keyout_attr = on_keyboard_input_node.get_attribute("outputs:keyOut")
out_pressed_attr = on_keyboard_input_node.get_attribute("outputs:pressed")
out_released_attr = on_keyboard_input_node.get_attribute("outputs:released")
out_ispressed_attr = on_keyboard_input_node.get_attribute("outputs:isPressed")
# Obtain an interface to the keyboard and carb input provider.
keyboard = omni.appwindow.get_default_app_window().get_keyboard()
input_provider = carb.input.acquire_input_provider()
self.assertIsNotNone(keyboard)
# Define a list of all possible carb.input.KeyboardInput inputs. Note that not all possible
# keys are necessarily detectable by the OnKeyboardInput node (this list also includes the
# UNKNOWN key).
possible_key_inputs = [
KeyboardInput.A,
KeyboardInput.APOSTROPHE,
KeyboardInput.B,
KeyboardInput.BACKSLASH,
KeyboardInput.BACKSPACE,
KeyboardInput.C,
KeyboardInput.CAPS_LOCK,
KeyboardInput.COMMA,
KeyboardInput.D,
KeyboardInput.DEL,
KeyboardInput.DOWN,
KeyboardInput.E,
KeyboardInput.END,
KeyboardInput.ENTER,
KeyboardInput.EQUAL,
KeyboardInput.ESCAPE,
KeyboardInput.F,
KeyboardInput.F1,
KeyboardInput.F10,
KeyboardInput.F11,
KeyboardInput.F12,
KeyboardInput.F2,
KeyboardInput.F3,
KeyboardInput.F4,
KeyboardInput.F5,
KeyboardInput.F6,
KeyboardInput.F7,
KeyboardInput.F8,
KeyboardInput.F9,
KeyboardInput.G,
KeyboardInput.GRAVE_ACCENT,
KeyboardInput.H,
KeyboardInput.HOME,
KeyboardInput.I,
KeyboardInput.INSERT,
KeyboardInput.J,
KeyboardInput.K,
KeyboardInput.KEY_0,
KeyboardInput.KEY_1,
KeyboardInput.KEY_2,
KeyboardInput.KEY_3,
KeyboardInput.KEY_4,
KeyboardInput.KEY_5,
KeyboardInput.KEY_6,
KeyboardInput.KEY_7,
KeyboardInput.KEY_8,
KeyboardInput.KEY_9,
KeyboardInput.L,
KeyboardInput.LEFT,
KeyboardInput.LEFT_ALT,
KeyboardInput.LEFT_BRACKET,
KeyboardInput.LEFT_CONTROL,
KeyboardInput.LEFT_SHIFT,
KeyboardInput.LEFT_SUPER,
KeyboardInput.M,
KeyboardInput.MENU,
KeyboardInput.MINUS,
KeyboardInput.N,
KeyboardInput.NUMPAD_0,
KeyboardInput.NUMPAD_1,
KeyboardInput.NUMPAD_2,
KeyboardInput.NUMPAD_3,
KeyboardInput.NUMPAD_4,
KeyboardInput.NUMPAD_5,
KeyboardInput.NUMPAD_6,
KeyboardInput.NUMPAD_7,
KeyboardInput.NUMPAD_8,
KeyboardInput.NUMPAD_9,
KeyboardInput.NUMPAD_ADD,
KeyboardInput.NUMPAD_DEL,
KeyboardInput.NUMPAD_DIVIDE,
KeyboardInput.NUMPAD_ENTER,
KeyboardInput.NUMPAD_EQUAL,
KeyboardInput.NUMPAD_MULTIPLY,
KeyboardInput.NUMPAD_SUBTRACT,
KeyboardInput.NUM_LOCK,
KeyboardInput.O,
KeyboardInput.P,
KeyboardInput.PAGE_DOWN,
KeyboardInput.PAGE_UP,
KeyboardInput.PAUSE,
KeyboardInput.PERIOD,
KeyboardInput.PRINT_SCREEN,
KeyboardInput.Q,
KeyboardInput.R,
KeyboardInput.RIGHT,
KeyboardInput.RIGHT_ALT,
KeyboardInput.RIGHT_BRACKET,
KeyboardInput.RIGHT_CONTROL,
KeyboardInput.RIGHT_SHIFT,
KeyboardInput.RIGHT_SUPER,
KeyboardInput.S,
KeyboardInput.SCROLL_LOCK,
KeyboardInput.SEMICOLON,
KeyboardInput.SLASH,
KeyboardInput.SPACE,
KeyboardInput.T,
KeyboardInput.TAB,
KeyboardInput.U,
KeyboardInput.UNKNOWN,
KeyboardInput.UP,
KeyboardInput.V,
KeyboardInput.W,
KeyboardInput.X,
KeyboardInput.Y,
KeyboardInput.Z,
]
# Define a dictionary of token keys representing the possible inputs to the OnKeyboardInput node's
# "keyIn" attribute, and values representing the corresponding carb.input.KeyboardInput. Note that
# not all possible keys are necessarily allowed for detection by the OnKeyboardInput node (e.g.
# "Unknown")
allowed_token_key_inputs = {
"A": KeyboardInput.A,
"B": KeyboardInput.B,
"C": KeyboardInput.C,
"D": KeyboardInput.D,
"E": KeyboardInput.E,
"F": KeyboardInput.F,
"G": KeyboardInput.G,
"H": KeyboardInput.H,
"I": KeyboardInput.I,
"J": KeyboardInput.J,
"K": KeyboardInput.K,
"L": KeyboardInput.L,
"M": KeyboardInput.M,
"N": KeyboardInput.N,
"O": KeyboardInput.O,
"P": KeyboardInput.P,
"Q": KeyboardInput.Q,
"R": KeyboardInput.R,
"S": KeyboardInput.S,
"T": KeyboardInput.T,
"U": KeyboardInput.U,
"V": KeyboardInput.V,
"W": KeyboardInput.W,
"X": KeyboardInput.X,
"Y": KeyboardInput.Y,
"Z": KeyboardInput.Z,
"Apostrophe": KeyboardInput.APOSTROPHE,
"Backslash": KeyboardInput.BACKSLASH,
"Backspace": KeyboardInput.BACKSPACE,
"CapsLock": KeyboardInput.CAPS_LOCK,
"Comma": KeyboardInput.COMMA,
"Del": KeyboardInput.DEL,
"Down": KeyboardInput.DOWN,
"End": KeyboardInput.END,
"Enter": KeyboardInput.ENTER,
"Equal": KeyboardInput.EQUAL,
"Escape": KeyboardInput.ESCAPE,
"F1": KeyboardInput.F1,
"F10": KeyboardInput.F10,
"F11": KeyboardInput.F11,
"F12": KeyboardInput.F12,
"F2": KeyboardInput.F2,
"F3": KeyboardInput.F3,
"F4": KeyboardInput.F4,
"F5": KeyboardInput.F5,
"F6": KeyboardInput.F6,
"F7": KeyboardInput.F7,
"F8": KeyboardInput.F8,
"F9": KeyboardInput.F9,
"GraveAccent": KeyboardInput.GRAVE_ACCENT,
"Home": KeyboardInput.HOME,
"Insert": KeyboardInput.INSERT,
"Key0": KeyboardInput.KEY_0,
"Key1": KeyboardInput.KEY_1,
"Key2": KeyboardInput.KEY_2,
"Key3": KeyboardInput.KEY_3,
"Key4": KeyboardInput.KEY_4,
"Key5": KeyboardInput.KEY_5,
"Key6": KeyboardInput.KEY_6,
"Key7": KeyboardInput.KEY_7,
"Key8": KeyboardInput.KEY_8,
"Key9": KeyboardInput.KEY_9,
"Left": KeyboardInput.LEFT,
"LeftAlt": KeyboardInput.LEFT_ALT,
"LeftBracket": KeyboardInput.LEFT_BRACKET,
"LeftControl": KeyboardInput.LEFT_CONTROL,
"LeftShift": KeyboardInput.LEFT_SHIFT,
"LeftSuper": KeyboardInput.LEFT_SUPER,
"Menu": KeyboardInput.MENU,
"Minus": KeyboardInput.MINUS,
"NumLock": KeyboardInput.NUM_LOCK,
"Numpad0": KeyboardInput.NUMPAD_0,
"Numpad1": KeyboardInput.NUMPAD_1,
"Numpad2": KeyboardInput.NUMPAD_2,
"Numpad3": KeyboardInput.NUMPAD_3,
"Numpad4": KeyboardInput.NUMPAD_4,
"Numpad5": KeyboardInput.NUMPAD_5,
"Numpad6": KeyboardInput.NUMPAD_6,
"Numpad7": KeyboardInput.NUMPAD_7,
"Numpad8": KeyboardInput.NUMPAD_8,
"Numpad9": KeyboardInput.NUMPAD_9,
"NumpadAdd": KeyboardInput.NUMPAD_ADD,
"NumpadDel": KeyboardInput.NUMPAD_DEL,
"NumpadDivide": KeyboardInput.NUMPAD_DIVIDE,
"NumpadEnter": KeyboardInput.NUMPAD_ENTER,
"NumpadEqual": KeyboardInput.NUMPAD_EQUAL,
"NumpadMultiply": KeyboardInput.NUMPAD_MULTIPLY,
"NumpadSubtract": KeyboardInput.NUMPAD_SUBTRACT,
"PageDown": KeyboardInput.PAGE_DOWN,
"PageUp": KeyboardInput.PAGE_UP,
"Pause": KeyboardInput.PAUSE,
"Period": KeyboardInput.PERIOD,
"PrintScreen": KeyboardInput.PRINT_SCREEN,
"Right": KeyboardInput.RIGHT,
"RightAlt": KeyboardInput.RIGHT_ALT,
"RightBracket": KeyboardInput.RIGHT_BRACKET,
"RightControl": KeyboardInput.RIGHT_CONTROL,
"RightShift": KeyboardInput.RIGHT_SHIFT,
"RightSuper": KeyboardInput.RIGHT_SUPER,
"ScrollLock": KeyboardInput.SCROLL_LOCK,
"Semicolon": KeyboardInput.SEMICOLON,
"Slash": KeyboardInput.SLASH,
"Space": KeyboardInput.SPACE,
"Tab": KeyboardInput.TAB,
"Up": KeyboardInput.UP,
}
# Define a list of all possible keyboard event types (for convenience).
# We won't consider KEY_REPEAT and CHAR events here.
keyboard_event_types = [
KeyboardEventType.KEY_PRESS,
KeyboardEventType.KEY_RELEASE,
]
# Create a list of all possible keyboard modifier combinations. Don't need permutations
# since bitwise OR operator is commutative and associative.
modifier_combinations = [
0, # 0
carb.input.KEYBOARD_MODIFIER_FLAG_SHIFT, # 1
carb.input.KEYBOARD_MODIFIER_FLAG_CONTROL, # 2
carb.input.KEYBOARD_MODIFIER_FLAG_SHIFT | carb.input.KEYBOARD_MODIFIER_FLAG_CONTROL, # 3
carb.input.KEYBOARD_MODIFIER_FLAG_ALT, # 4
carb.input.KEYBOARD_MODIFIER_FLAG_SHIFT | carb.input.KEYBOARD_MODIFIER_FLAG_ALT, # 5
carb.input.KEYBOARD_MODIFIER_FLAG_ALT | carb.input.KEYBOARD_MODIFIER_FLAG_CONTROL, # 6
carb.input.KEYBOARD_MODIFIER_FLAG_SHIFT
| carb.input.KEYBOARD_MODIFIER_FLAG_ALT
| carb.input.KEYBOARD_MODIFIER_FLAG_CONTROL, # 7
]
# Create a list of tuples of lists of tuples and indices of all possible input key modifier
# settings on the OnKeyboardInput node.
node_input_modifier_attribute_combinations = [
([("inputs:shiftIn", False), ("inputs:altIn", False), ("inputs:ctrlIn", False)], 0),
([("inputs:shiftIn", True), ("inputs:altIn", False), ("inputs:ctrlIn", False)], 1),
([("inputs:shiftIn", False), ("inputs:altIn", False), ("inputs:ctrlIn", True)], 2),
([("inputs:shiftIn", True), ("inputs:altIn", False), ("inputs:ctrlIn", True)], 3),
([("inputs:shiftIn", False), ("inputs:altIn", True), ("inputs:ctrlIn", False)], 4),
([("inputs:shiftIn", True), ("inputs:altIn", True), ("inputs:ctrlIn", False)], 5),
([("inputs:shiftIn", False), ("inputs:altIn", True), ("inputs:ctrlIn", True)], 6),
([("inputs:shiftIn", True), ("inputs:altIn", True), ("inputs:ctrlIn", True)], 7),
]
# NOTE: Although the below test confirms that the OnKeyboardInput node works for all
# input combinations, it takes a while to run and times out the test (which is typically
# set to automatically crash after 300s). To account for this, each time we run the test
# a random subset of allowed input tokens and input keys are chosen with which
# we perform the test; this cuts down on computation time and still provides some
# decent code coverage.
# Wrap main test driver code in the following method. Note that in addition to testing
# whether specific buttons activate their corresponding code path, we also check that
# no other buttons can activate code paths they don't belong to. Check for all possible
# key + special modifier press/release permutations.
async def _test_onkeyboardinput_node(
quick_run: bool = True, num_keys_to_test: int = 10, num_modifiers_to_test: int = 1
):
_possible_key_inputs = []
_allowed_token_key_inputs = {}
_modifier_combinations = []
_node_input_modifier_attribute_combinations = []
# Codepath for accelerated test (won't time out and still provide some code coverage).
if quick_run:
# Make sure that the input flags make sense.
if num_keys_to_test < 1:
num_keys_to_test = 1
elif num_keys_to_test > 105:
num_keys_to_test = 105
if num_modifiers_to_test < 1:
num_modifiers_to_test = 1
elif num_modifiers_to_test > 8:
num_modifiers_to_test = 8
# Define three sets of random indices that'll determine the combination of inputs:keyIn
# tokens, emulated key inputs, and modifier values that we'll test for the current function call.
# Note that the inputs:keyIn tokens we choose and the emulated keys need not coincide, resulting
# in no output being generated by the OnKeyboardInput node.
rand_indices_0 = set()
while len(rand_indices_0) < num_keys_to_test:
rand_indices_0.add(random.randrange(len(possible_key_inputs)))
rand_indices_1 = set()
while len(rand_indices_1) < num_keys_to_test:
rand_indices_1.add(random.randrange(len(allowed_token_key_inputs)))
rand_indices_2 = set()
while len(rand_indices_2) < num_modifiers_to_test:
rand_indices_2.add(random.randrange(len(modifier_combinations)))
# Convert the sets into lists so that their elements can be accessible by index.
rand_indices_0 = list(rand_indices_0)
rand_indices_1 = list(rand_indices_1)
rand_indices_2 = list(rand_indices_2)
# Create an abbreviated list of possible key inputs.
for i in range(0, num_keys_to_test):
_possible_key_inputs.append(possible_key_inputs[rand_indices_0[i]])
# Create an abbreviated dict of allowed token-key input pairs.
temp_keys_list = list(allowed_token_key_inputs)
temp_values_list = list(allowed_token_key_inputs.values())
for i in range(0, num_keys_to_test):
_allowed_token_key_inputs[temp_keys_list[rand_indices_1[i]]] = temp_values_list[rand_indices_1[i]]
# Create abbreviated lists of modifier values and corresponing input attribute-value pairs.
for rand_idx in rand_indices_2:
_modifier_combinations.append(modifier_combinations[rand_idx])
_node_input_modifier_attribute_combinations.append(
node_input_modifier_attribute_combinations[rand_idx]
)
# Codepath for full test.
else:
_possible_key_inputs = possible_key_inputs
_allowed_token_key_inputs = allowed_token_key_inputs
_modifier_combinations = modifier_combinations
_node_input_modifier_attribute_combinations = node_input_modifier_attribute_combinations
# Loop through each possible inputs:keyIn token, and the set the inputs:keyIn attribute on the
# OnKeyboardInput node.
for token, _ in _allowed_token_key_inputs.items(): # noqa PLR1702
in_keyin_attr.set(token)
# Loop through each possible modification combination, and set the corresponding input attributes
# on the OnKeyboardInput node. Also store an index to represent the current modification state.
for node_input_modifier_attribute_tuple in _node_input_modifier_attribute_combinations:
node_input_modifier_attribute_list_in_tuple = node_input_modifier_attribute_tuple[0]
for input_attr_value_modifier_pair in node_input_modifier_attribute_list_in_tuple:
og.Controller.set(
og.Controller.attribute(input_attr_value_modifier_pair[0], on_keyboard_input_node),
input_attr_value_modifier_pair[1],
)
# Loop through each possible input key.
for key in _possible_key_inputs:
# Loop through all possible modification combinations.
for modifier in _modifier_combinations:
# Loop through each possible keyboard event type.
for event_type in keyboard_event_types:
# Trigger the current keyboard event.
input_provider.buffer_keyboard_key_event(keyboard, event_type, key, modifier)
await app.next_update_async()
# If the currently-pressed key matches the currently-set inputs:keyIn token's
# corresponding KeyboardInput + modifiers, check that the OnKeyboardInput node gets
# correctly activated.
if (
_allowed_token_key_inputs[token] == key # noqa PLR1733
and node_input_modifier_attribute_tuple[1] == modifier
):
# If the key has been pressed, check for the corresponding expected conditions.
if event_type == KeyboardEventType.KEY_PRESS:
self.assertEqual(out_keyout_attr.get(), token)
self.assertEqual(out_pressed_attr.get(), self.E)
self.assertEqual(out_released_attr.get(), self.D)
self.assertTrue(out_ispressed_attr.get())
# If the key has been released, check for the corresponding expected conditions.
elif event_type == KeyboardEventType.KEY_RELEASE:
self.assertEqual(out_keyout_attr.get(), token)
self.assertEqual(out_pressed_attr.get(), self.D)
self.assertEqual(out_released_attr.get(), self.E)
self.assertFalse(out_ispressed_attr.get())
else:
self.assertEqual(out_pressed_attr.get(), self.D)
self.assertEqual(out_released_attr.get(), self.D)
self.assertFalse(out_ispressed_attr.get())
# Test that the OnKeyboardInput node works correctly when its onlyPlayback input is disabled.
in_playbackonly_attr.set(False)
await _test_onkeyboardinput_node()
# Test that the OnKeyboardInput node works correctly when its onlyPlayback input is enabled.
in_playbackonly_attr.set(True)
timeline = omni.timeline.get_timeline_interface()
timeline.set_target_framerate(timeline.get_time_codes_per_seconds())
timeline.play()
await _test_onkeyboardinput_node()
timeline.stop()
# ----------------------------------------------------------------------
# NOTE: Even though the OnLoaded node is threadsafe (its compute method is very simple),
# we don't adapt the below test to check for thread-safety conditions because it relies
# on other nodes (omni.graph.action.SendCustomEvent) which are NOT threadsafe.
async def test_onloaded_node(self):
"""Test OnLoaded node"""
def registered_event_name(event_name):
"""Returns the internal name used for the given custom event name"""
name = "omni.graph.action." + event_name
return carb.events.type_from_string(name)
events = []
def on_event(event):
events.append(event.payload["!path"])
reg_event_name = registered_event_name("foo")
message_bus = omni.kit.app.get_app().get_message_bus_event_stream()
sub = message_bus.create_subscription_to_push_by_type(reg_event_name, on_event)
self.assertIsNotNone(sub)
og.Controller.create_graph({"graph_path": self.TEST_GRAPH_PATH, "evaluator_name": "execution"})
og.Controller.edit(
self.TEST_GRAPH_PATH,
{
self.keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("OnLoaded", "omni.graph.action.OnLoaded"),
("Send1", "omni.graph.action.SendCustomEvent"),
("Send2", "omni.graph.action.SendCustomEvent"),
],
self.keys.CONNECT: [
("OnLoaded.outputs:execOut", "Send1.inputs:execIn"),
("OnTick.outputs:tick", "Send2.inputs:execIn"),
],
self.keys.SET_VALUES: [
("OnTick.inputs:onlyPlayback", False),
("Send1.inputs:eventName", "foo"),
("Send2.inputs:eventName", "foo"),
("Send1.inputs:path", "Loaded"),
("Send2.inputs:path", "Tick"),
],
},
)
# Evaluate once so that graph is in steady state.
await og.Controller.evaluate()
# Verify Loaded came before OnTick.
self.assertListEqual(events, ["Loaded", "Tick"])
# ----------------------------------------------------------------------
async def test_onmessagebusevent_node(self):
"""Test OnMessageBusEvent node"""
og.Controller.create_graph({"graph_path": self.TEST_GRAPH_PATH, "evaluator_name": "execution"})
(_, (on_custom_event, _), _, _,) = og.Controller.edit(
self.TEST_GRAPH_PATH,
{
self.keys.CREATE_NODES: [
("OnCustomEvent", "omni.graph.action.OnMessageBusEvent"),
("Counter1", "omni.graph.action.Counter"),
],
self.keys.CONNECT: [
("OnCustomEvent.outputs:execOut", "Counter1.inputs:execIn"),
],
self.keys.SET_VALUES: [
("OnCustomEvent.inputs:onlyPlayback", False),
("OnCustomEvent.inputs:eventName", "testEvent"),
],
},
)
# One compute for the first-time subscribe.
await omni.kit.app.get_app().next_update_async()
def get_all_supported_types() -> List[str]:
"""Helper to get all the types supported by the node"""
types = []
for attr_type in ogn.supported_attribute_type_names():
if (
attr_type in ("any", "transform", "bundle", "target", "execution")
or (attr_type[:9] == "transform")
or attr_type.startswith("objectId")
or attr_type.startswith("frame")
):
continue
types.append(attr_type)
return types
def assert_are_equal(expected_val, val):
"""Helper to assert two values are equal, sequence container type need not match"""
if isinstance(expected_val, (list, tuple, np.ndarray)):
for left, right in zip(expected_val, val):
return assert_are_equal(left, right)
if isinstance(val, np.ndarray):
self.assertListEqual(expected_val, list(val))
else:
self.assertEqual(expected_val, val)
return True
msg = carb.events.type_from_string("testEvent")
payload = {}
expected_vals = []
for sup_type in sorted(get_all_supported_types()):
payload = {}
name = sup_type.replace("[", "_").replace("]", "_")
manager = ogn.get_attribute_manager_type(sup_type)
# Create a dynamic output attribute on the node which matches the type of the test payload.
og_type = og.AttributeType.type_from_ogn_type_name(sup_type)
attrib = og.Controller.create_attribute(
on_custom_event, f"outputs:{name}", sup_type, og.AttributePortType.ATTRIBUTE_PORT_TYPE_OUTPUT
)
# Get a sample value in python format (nested tuples/lists).
sample_val = manager.sample_values()[0]
is_array = og_type.array_depth > 0
is_tuple = og_type.tuple_count > 1
is_matrix = is_tuple and (
og_type.role in (og.AttributeRole.FRAME, og.AttributeRole.MATRIX, og.AttributeRole.TRANSFORM)
)
payload_val = sample_val
# Convert the sample value into numpy format for use with OG API.
if is_array:
if is_matrix:
payload_val = np.array(sample_val).flatten().flatten().tolist()
elif is_tuple:
payload_val = np.array(sample_val).flatten().tolist()
elif is_matrix:
payload_val = np.array(sample_val).flatten().tolist()
payload[name] = payload_val
expected_vals.append((attrib, sample_val))
# Push the message to kit message bus.
omni.kit.app.get_app().get_message_bus_event_stream().push(msg, payload=payload)
# Wait for one kit update to allow the event-push mechanism to trigger the node callback.
await omni.kit.app.get_app().next_update_async()
# Verify the value.
out_val = og.Controller.get(attrib)
try:
assert_are_equal(out_val, out_val)
except AssertionError as exc:
raise AssertionError(f"{sample_val} != {out_val}") from exc
| 43,381 | Python | 49.561772 | 131 | 0.577234 |
omniverse-code/kit/exts/omni.graph.action/omni/graph/action/tests/test_evaluation2.py | """Action Graph Evaluation Tests Part 2"""
import json
import omni.graph.core as og
import omni.graph.core.tests as ogts
import omni.usd
# ======================================================================
class TestActionGraphEvaluation2(ogts.OmniGraphTestCase):
"""Tests action graph evaluator functionality"""
TEST_GRAPH_PATH = "/World/TestGraph"
async def setUp(self):
"""Set up test environment, to be torn down when done"""
await super().setUp()
og.Controller.edit({"graph_path": self.TEST_GRAPH_PATH, "evaluator_name": "execution"})
# ----------------------------------------------------------------------
async def test_retrigger_latent(self):
"""Test that latent nodes can be re-triggered"""
want_debug = False
e_state = og.ExecutionAttributeState
tick_count = 0
boop_count = 0
exec_in_latent_count = 0
max_ticks = 20
class CancelTickerPy:
"""Helper node type which does latent ticking and can be canceled, and has an independent counter "boop" """
@staticmethod
def compute(context: og.GraphContext, node: og.Node):
nonlocal tick_count
nonlocal boop_count
nonlocal exec_in_latent_count
exec_in = node.get_attribute("inputs:execIn")
exec_in_val = og.Controller.get(exec_in)
cancel = node.get_attribute("inputs:cancel")
cancel_val = og.Controller.get(cancel)
boop = node.get_attribute("inputs:boop")
boop_val = og.Controller.get(boop)
want_debug and print(
f"### {tick_count} execIn={exec_in_val} cancel={cancel_val} boop={boop_val}"
) # noqa: expression-not-assigned
if cancel_val == e_state.ENABLED:
# Finish latent by cancel
og.Controller.set(node.get_attribute("outputs:canceled"), e_state.LATENT_FINISH)
self.assertEqual(exec_in_val, e_state.DISABLED)
self.assertEqual(boop_val, e_state.DISABLED)
tick_count = 0
return True
if exec_in_val == e_state.ENABLED:
self.assertEqual(cancel_val, e_state.DISABLED)
self.assertEqual(boop_val, e_state.DISABLED)
if tick_count > 0:
# execIn triggered while in latent - should not be possible
exec_in_latent_count += 1
else:
og.Controller.set(node.get_attribute("outputs:tick"), e_state.LATENT_PUSH)
return True
# we are ticking
self.assertEqual(cancel_val, e_state.DISABLED)
tick_count += 1
if tick_count < max_ticks:
og.Controller.set(node.get_attribute("outputs:tick"), e_state.ENABLED)
else:
# Finish latent naturally
og.Controller.set(node.get_attribute("outputs:execOut"), e_state.LATENT_FINISH)
tick_count = 0
if boop_val == e_state.ENABLED:
# We get here during latent ticking, if the boop input is enabled
self.assertEqual(exec_in_val, e_state.DISABLED)
self.assertEqual(cancel_val, e_state.DISABLED)
boop_count += 1
return True
@staticmethod
def get_node_type() -> str:
return "omni.graph.test.CancelTickerPy"
@staticmethod
def initialize_type(node_type: og.NodeType):
node_type.add_input(
"inputs:execIn",
"execution",
True,
)
node_type.add_input(
"inputs:cancel",
"execution",
True,
)
node_type.add_input(
"inputs:boop",
"execution",
True,
)
node_type.add_output("outputs:tick", "execution", True)
node_type.add_output("outputs:canceled", "execution", True)
node_type.add_output("outputs:execOut", "execution", True)
return True
og.register_node_type(CancelTickerPy, 1)
controller = og.Controller()
keys = og.Controller.Keys
(graph, (ticker, start, _, cancel, boop, _, _, counter), _, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("Ticker", "omni.graph.test.CancelTickerPy"),
("Start", "omni.graph.action.OnImpulseEvent"),
("Start2", "omni.graph.action.OnImpulseEvent"),
("Cancel", "omni.graph.action.OnImpulseEvent"),
("Boop", "omni.graph.action.OnImpulseEvent"),
("Once", "omni.graph.action.Once"),
("Once2", "omni.graph.action.Once"),
("Counter", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("Start.outputs:execOut", "Ticker.inputs:execIn"),
("Start2.outputs:execOut", "Ticker.inputs:execIn"),
("Cancel.outputs:execOut", "Once.inputs:execIn"),
("Once.outputs:once", "Ticker.inputs:cancel"),
("Once.outputs:after", "Ticker.inputs:cancel"),
("Cancel.outputs:execOut", "Once2.inputs:execIn"),
("Boop.outputs:execOut", "Ticker.inputs:boop"),
("Once2.outputs:once", "Ticker.inputs:cancel"),
("Once2.outputs:after", "Ticker.inputs:cancel"),
("Ticker.outputs:tick", "Counter.inputs:execIn"),
],
keys.SET_VALUES: [
("Start.inputs:onlyPlayback", False),
("Start2.inputs:onlyPlayback", False),
("Cancel.inputs:onlyPlayback", False),
("Boop.inputs:onlyPlayback", False),
],
},
)
# cancel, check nothing happens
og.Controller.set(controller.attribute("state:enableImpulse", cancel), True)
await controller.evaluate(graph)
exec_out = og.Controller.get(controller.attribute("outputs:tick", ticker))
self.assertEqual(exec_out, e_state.DISABLED)
# start ticking
og.Controller.set(controller.attribute("state:enableImpulse", start), True)
await controller.evaluate(graph) # Starts latent state
await controller.evaluate(graph) # Tick 1
self.assertEqual(tick_count, 1)
# Verify the tick has started
exec_out = og.Controller.get(controller.attribute("outputs:tick", ticker))
self.assertEqual(exec_out, e_state.ENABLED)
await controller.evaluate(graph) # Tick 2
self.assertEqual(tick_count, 2)
exec_out = og.Controller.get(controller.attribute("outputs:tick", ticker))
self.assertEqual(exec_out, e_state.ENABLED)
await controller.evaluate(graph) # Tick 3
self.assertEqual(tick_count, 3)
# Boop - node keeps ticking
og.Controller.set(controller.attribute("state:enableImpulse", boop), True)
# Boop will trigger a compute, which increments boop + ticks AND the normal latent tick
await controller.evaluate(graph)
self.assertEqual(boop_count, 1)
self.assertEqual(tick_count, 5)
# Now check that the next tick can run WITHOUT inputs:boop being high
await controller.evaluate(graph)
self.assertEqual(boop_count, 1) # No change in boop count (OM-64856)
self.assertEqual(tick_count, 6)
# Now check that we can't re-trigger execIn
self.assertEqual(exec_in_latent_count, 0)
og.Controller.set(controller.attribute("state:enableImpulse", start), True)
# Start will not trigger any compute because the node is latent
await controller.evaluate(graph)
self.assertEqual(exec_in_latent_count, 0)
self.assertEqual(boop_count, 1)
self.assertEqual(tick_count, 7)
# Now check the normal tick proceeds as normal
await controller.evaluate(graph)
self.assertEqual(boop_count, 1)
self.assertEqual(tick_count, 8)
# Cancel
counter_attr = controller.attribute("outputs:count", counter)
count_0 = og.Controller.get(counter_attr)
og.Controller.set(controller.attribute("state:enableImpulse", cancel), True)
await controller.evaluate(graph) # latent finish
await controller.evaluate(graph) # no action
await controller.evaluate(graph) # no action
count_1 = og.Controller.get(counter_attr)
self.assertEqual(count_0 + 1, count_1)
# ----------------------------------------------------------------------
async def test_cycle_break(self):
"""test that an illegal cycle issues a warning"""
controller = og.Controller()
keys = og.Controller.Keys
(graph, (on_impulse, count_a, count_b), _, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnImpulse", "omni.graph.action.OnImpulseEvent"),
("A", "omni.graph.action.Counter"),
("B", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("OnImpulse.outputs:execOut", "A.inputs:execIn"),
("A.outputs:execOut", "B.inputs:execIn"),
("B.outputs:execOut", "A.inputs:execIn"),
],
keys.SET_VALUES: [
("OnImpulse.state:enableImpulse", True),
("OnImpulse.inputs:onlyPlayback", False),
],
},
)
with ogts.ExpectedError():
await controller.evaluate(graph)
og.Controller.set(controller.attribute("state:enableImpulse", on_impulse), True)
with ogts.ExpectedError():
await controller.evaluate(graph)
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", count_a)), 2)
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", count_b)), 2)
# ----------------------------------------------------------------------
async def test_dep_sort_fan_out(self):
"""Test that dependency sort works when there is data fan-out"""
# +-------------+
# +-------->| |
# | | SwitchTokenA|
# | +--->+-------------+
# +----------+ |
# |OnImpulse + | +--------------+
# +----------+ | | SwitchTokenB |
# | +^-------------+
# +------+-+ +--------+ |
# | ConstA +--->AppendB +---+
# +--------+ +--------+
controller = og.Controller()
keys = og.Controller.Keys
(graph, (_, const_a, _, switch_a, _), _, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnImpulse", "omni.graph.action.OnImpulseEvent"),
("ConstA", "omni.graph.nodes.ConstantToken"),
("AppendB", "omni.graph.nodes.AppendString"),
("SwitchTokenA", "omni.graph.action.SwitchToken"),
("SwitchTokenB", "omni.graph.action.SwitchToken"),
],
keys.CONNECT: [
("OnImpulse.outputs:execOut", "SwitchTokenA.inputs:execIn"),
("ConstA.inputs:value", "SwitchTokenA.inputs:value"),
("ConstA.inputs:value", "AppendB.inputs:value"),
("AppendB.outputs:value", "SwitchTokenB.inputs:value"),
],
keys.SET_VALUES: [
("OnImpulse.inputs:onlyPlayback", False),
("OnImpulse.state:enableImpulse", True),
("AppendB.inputs:suffix", {"value": "Foo", "type": "token"}),
],
},
)
await controller.evaluate(graph)
graph_state = og.OmniGraphInspector().as_json(graph, flags=["evaluation"])
graph_state_obj = json.loads(graph_state)
trace = graph_state_obj["Evaluator"]["LastNonEmptyEvaluation"]["Trace"]
# Verify the evaluation trace includes exactly what we expect
expected_trace = [
const_a.get_prim_path(),
switch_a.get_prim_path(),
]
self.assertListEqual(expected_trace, trace)
# ----------------------------------------------------------------------
async def test_exec_fan_out_shared_deps(self):
"""Test that dependency sort works when there is shared data in exec fan-out"""
# +---------+
# +---------->| Write1 |
# | +----^----+
# | |
# | +----------+
# | |
# +-----------+ | |
# | OnImpulse +-----+-----+----> +---------+
# +-----------+ | | | Write2 |
# | +----->+---------+
# | |
# | | +---------+
# +-----+----->| Write3 |
# | +---------+
# | ^
# +-------+ +---+----+---+
# | Const +----->| Inc |
# +-------+ +--------+
controller = og.Controller()
keys = og.Controller.Keys
(graph, _, _, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnImpulse", "omni.graph.action.OnImpulseEvent"),
("Const", "omni.graph.nodes.ConstantDouble"),
("Inc", "omni.graph.nodes.Increment"),
("Write1", "omni.graph.nodes.WritePrimAttribute"),
("Write2", "omni.graph.nodes.WritePrimAttribute"),
("Write3", "omni.graph.nodes.WritePrimAttribute"),
],
keys.CREATE_PRIMS: [
("/World/TestPrim1", {"val": ("double", 1.0)}),
("/World/TestPrim2", {"val": ("double", 2.0)}),
("/World/TestPrim3", {"val": ("double", 3.0)}),
],
keys.CONNECT: [
("OnImpulse.outputs:execOut", "Write1.inputs:execIn"),
("OnImpulse.outputs:execOut", "Write2.inputs:execIn"),
("OnImpulse.outputs:execOut", "Write3.inputs:execIn"),
("Const.inputs:value", "Inc.inputs:value"),
("Inc.outputs:result", "Write1.inputs:value"),
("Inc.outputs:result", "Write2.inputs:value"),
("Inc.outputs:result", "Write3.inputs:value"),
],
keys.SET_VALUES: [
("OnImpulse.inputs:onlyPlayback", False),
("OnImpulse.state:enableImpulse", True),
("Const.inputs:value", 41.0),
("Inc.inputs:increment", 1.0),
("Write1.inputs:primPath", "/World/TestPrim1"),
("Write1.inputs:usePath", True),
("Write1.inputs:name", "val"),
("Write2.inputs:primPath", "/World/TestPrim2"),
("Write2.inputs:usePath", True),
("Write2.inputs:name", "val"),
("Write3.inputs:primPath", "/World/TestPrim3"),
("Write3.inputs:usePath", True),
("Write3.inputs:name", "val"),
],
},
)
await controller.evaluate(graph)
stage = omni.usd.get_context().get_stage()
for i in (1, 2, 3):
self.assertEqual(stage.GetAttributeAtPath(f"/World/TestPrim{i}.val").Get(), 42.0)
# ----------------------------------------------------------------------
async def test_exec_fan_out_shared_deps2(self):
"""Test that dependency sort works when there is shared data in exec fan-out"""
# ┌───────┐ ┌────────┐
# │Const1 ├───────────►│Append1 │
# └───────┘ │ ├──────────►┌───────────┐
# ┌──►└────────┘ │ WriteVar1 │
# ┌─────────────┐ │ ┌────►└───────────┘
# │ GraphTarget ├───┤ │
# └─────────────┘ └─►┌─────────┐ │ ┌───────────┐
# │ Append2 ├─────┼────►│ WriteVar2 │
# ┌────────┐ ┌─►└─────────┘ │ └───────────┘
# │ Const2 ├────────┘ │ ▲
# └────────┘ │ │
# │ │
# ┌──────────┐ │ │
# │ OnImpulse├────┴──────┘
# └──────────┘
controller = og.Controller()
keys = og.Controller.Keys
(graph, _, _, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnImpulse", "omni.graph.action.OnImpulseEvent"),
("Const1", "omni.graph.nodes.ConstantToken"),
("Const2", "omni.graph.nodes.ConstantToken"),
("Append1", "omni.graph.nodes.AppendPath"),
("Append2", "omni.graph.nodes.AppendPath"),
("GraphTarget", "omni.graph.nodes.GraphTarget"),
("WriteVar1", "omni.graph.core.WriteVariable"),
("WriteVar2", "omni.graph.core.WriteVariable"),
],
keys.CREATE_VARIABLES: [
("path1", og.Type(og.BaseDataType.TOKEN)),
("path2", og.Type(og.BaseDataType.TOKEN)),
],
keys.CONNECT: [
("OnImpulse.outputs:execOut", "WriteVar1.inputs:execIn"),
("OnImpulse.outputs:execOut", "WriteVar2.inputs:execIn"),
("GraphTarget.outputs:primPath", "Append1.inputs:path"),
("GraphTarget.outputs:primPath", "Append2.inputs:path"),
("Const1.inputs:value", "Append1.inputs:suffix"),
("Const2.inputs:value", "Append2.inputs:suffix"),
("Append1.outputs:path", "WriteVar1.inputs:value"),
("Append2.outputs:path", "WriteVar2.inputs:value"),
],
keys.SET_VALUES: [
("WriteVar1.inputs:variableName", "path1"),
("WriteVar2.inputs:variableName", "path2"),
("OnImpulse.inputs:onlyPlayback", False),
("OnImpulse.state:enableImpulse", True),
("Const1.inputs:value", "A"),
("Const2.inputs:value", "B"),
],
},
)
await controller.evaluate(graph)
context = graph.get_default_graph_context()
graph_path = self.TEST_GRAPH_PATH
variable = graph.find_variable("path1")
self.assertEquals(variable.get(context), f"{graph_path}/A")
variable = graph.find_variable("path2")
self.assertEquals(variable.get(context), f"{graph_path}/B")
| 19,995 | Python | 45.719626 | 120 | 0.464766 |
omniverse-code/kit/exts/omni.graph.action/omni/graph/action/tests/test_action_graph_nodes_03.py | """Action Graph Node Tests, Part 3"""
import random
from functools import partial
import carb.input
import omni.graph.core as og
import omni.graph.core.tests as ogts
import omni.kit.app
import omni.kit.test
import omni.usd
from carb.input import MouseEventType
from omni.graph.core import ThreadsafetyTestUtils
from pxr import Gf, OmniGraphSchemaTools, Sdf
# ======================================================================
class TestActionGraphNodes(ogts.OmniGraphTestCase):
"""Tests action graph node functionality"""
TEST_GRAPH_PATH = "/World/TestGraph"
keys = og.Controller.Keys
E = og.ExecutionAttributeState.ENABLED
D = og.ExecutionAttributeState.DISABLED
async def setUp(self):
"""Set up test environment, to be torn down when done"""
await super().setUp()
# ----------------------------------------------------------------------
@ThreadsafetyTestUtils.make_threading_test
def test_onmouseinput_node(self, test_instance_id: int = 0):
"""Test OnMouseInput node"""
# Instance a test graph setup. Note that we append the graph path with the test_instance_id
# so that the graph can be uniquely identified in the thread-safety test!
graph_path = self.TEST_GRAPH_PATH + str(test_instance_id)
og.Controller.create_graph({"graph_path": graph_path, "evaluator_name": "execution"})
(_, (on_mouse_input_node,), _, _) = og.Controller.edit(
graph_path,
{self.keys.CREATE_NODES: ("OnMouseInput", "omni.graph.action.OnMouseInput")},
)
# Obtain necessary attributes.
in_onlyplayback_attr = on_mouse_input_node.get_attribute("inputs:onlyPlayback")
in_mouse_element_attr = on_mouse_input_node.get_attribute("inputs:mouseElement")
out_pressed_attr = on_mouse_input_node.get_attribute("outputs:pressed")
out_released_attr = on_mouse_input_node.get_attribute("outputs:released")
out_valuechanged_attr = on_mouse_input_node.get_attribute("outputs:valueChanged")
out_ispressed_attr = on_mouse_input_node.get_attribute("outputs:isPressed")
out_value_attr = on_mouse_input_node.get_attribute("outputs:value")
# Obtain an interface to the mouse and carb input provider.
mouse = ThreadsafetyTestUtils.add_to_threading_cache(
test_instance_id, omni.appwindow.get_default_app_window().get_mouse()
)
input_provider = ThreadsafetyTestUtils.add_to_threading_cache(
test_instance_id, carb.input.acquire_input_provider()
)
# Define a list of tokens representing the possible inputs to the OnMouseInput node's "mouseElement" attribute.
mouse_tokens = ThreadsafetyTestUtils.add_to_threading_cache(
test_instance_id,
["Left Button", "Middle Button", "Right Button", "Normalized Move", "Pixel Move", "Scroll"],
)
# Define a list of all possible mouse event types (for convenience).
mouse_event_types = ThreadsafetyTestUtils.add_to_threading_cache(
test_instance_id,
[
MouseEventType.LEFT_BUTTON_DOWN,
MouseEventType.LEFT_BUTTON_UP,
MouseEventType.MIDDLE_BUTTON_DOWN,
MouseEventType.MIDDLE_BUTTON_UP,
MouseEventType.RIGHT_BUTTON_DOWN,
MouseEventType.RIGHT_BUTTON_UP,
MouseEventType.MOVE,
MouseEventType.SCROLL,
],
)
# Define some imaginary window dimensions for testing purposes.
window_width = ThreadsafetyTestUtils.add_to_threading_cache(test_instance_id, 1440)
window_height = ThreadsafetyTestUtils.add_to_threading_cache(test_instance_id, 720)
# Wrap main test driver code in the following generator (to leverage the
# ThreadsafetyTestUtils.make_threading_test decorator to its fullest). Note that
# in addition to testing whether specific buttons activate their corresponding code
# path, we also check that no other buttons can activate code paths they don't belong
# to (e.g. that pressing the left mouse button doesn't get registered as a button
# press when "inputs:mouseElement" is set to "Right Button").
def _test_onmouseinput_node():
# Loop through each possible "inputs.mouseElement" token, and set the input attribute
# on the OnMouseInput nodes in each graph.
for token in mouse_tokens:
in_mouse_element_attr.set(token)
# Loop through each possible mouse event type.
for event_type in mouse_event_types:
# Generate a new random test position for the mouse cursor. Note that we
# add it to the threading cache so that each test graph instance runs
# its tests/comparisons against the same randomly-generated position.
pos = ThreadsafetyTestUtils.add_to_threading_cache(
test_instance_id, (random.randint(0, window_width), random.randint(0, window_height))
)
# Trigger the current mouse event.
input_provider.buffer_mouse_event(
mouse, event_type, (pos[0] / window_width, pos[1] / window_height), 0, pos
)
yield ThreadsafetyTestUtils.EVALUATION_WAIT_FRAME # Yielding to compute by waiting for the next app frame.
# Test left/middle/right mouse button pressed functionality. Only left/middle/right
# mouse button presses should activate this codepath.
if (
(event_type == MouseEventType.LEFT_BUTTON_DOWN and token == "Left Button")
or (event_type == MouseEventType.MIDDLE_BUTTON_DOWN and token == "Middle Button")
or (event_type == MouseEventType.RIGHT_BUTTON_DOWN and token == "Right Button")
):
self.assertEqual(out_pressed_attr.get(), self.E)
self.assertEqual(out_released_attr.get(), self.D)
self.assertEqual(out_valuechanged_attr.get(), self.D)
self.assertTrue(out_ispressed_attr.get())
self.assertEqual(len(out_value_attr.get()), 2)
self.assertEqual(out_value_attr.get()[0], 0)
self.assertEqual(out_value_attr.get()[1], 0)
# Test left/middle/right mouse button released functionality. Only left/middle/right mouse
# button releases should activate this codepath.
elif (
(event_type == MouseEventType.LEFT_BUTTON_UP and token == "Left Button")
or (event_type == MouseEventType.MIDDLE_BUTTON_UP and token == "Middle Button")
or (event_type == MouseEventType.RIGHT_BUTTON_UP and token == "Right Button")
):
self.assertEqual(out_pressed_attr.get(), self.D)
self.assertEqual(out_released_attr.get(), self.E)
self.assertEqual(out_valuechanged_attr.get(), self.D)
self.assertFalse(out_ispressed_attr.get())
self.assertEqual(len(out_value_attr.get()), 2)
self.assertEqual(out_value_attr.get()[0], 0)
self.assertEqual(out_value_attr.get()[1], 0)
# Test mouse movement functionality with the "Normalized Move" option enabled. Only mouse movement
# should activate this codepath.
elif event_type == MouseEventType.MOVE and token == "Normalized Move":
self.assertEqual(out_pressed_attr.get(), self.D)
self.assertEqual(out_released_attr.get(), self.D)
self.assertEqual(out_valuechanged_attr.get(), self.E)
self.assertFalse(out_ispressed_attr.get())
self.assertEqual(len(out_value_attr.get()), 2)
self.assertAlmostEqual(out_value_attr.get()[0], pos[0] / window_width)
self.assertAlmostEqual(out_value_attr.get()[1], pos[1] / window_height)
# Test mouse movement functionality with the "Pixel Move" option enabled. Only mouse movement
# should activate this codepath.
elif event_type == MouseEventType.MOVE and token == "Pixel Move":
self.assertEqual(out_pressed_attr.get(), self.D)
self.assertEqual(out_released_attr.get(), self.D)
self.assertEqual(out_valuechanged_attr.get(), self.E)
self.assertFalse(out_ispressed_attr.get())
self.assertEqual(len(out_value_attr.get()), 2)
self.assertEqual(out_value_attr.get()[0], pos[0])
self.assertEqual(out_value_attr.get()[1], pos[1])
# Test mouse scrolling functionality with the "Scroll" option enabled. Only mouse scrolling
# should activate this codepath.
elif event_type == MouseEventType.SCROLL and token == "Scroll":
self.assertEqual(out_pressed_attr.get(), self.D)
self.assertEqual(out_released_attr.get(), self.D)
self.assertEqual(out_valuechanged_attr.get(), self.E)
self.assertFalse(out_ispressed_attr.get())
self.assertEqual(len(out_value_attr.get()), 2)
self.assertAlmostEqual(out_value_attr.get()[0], pos[0] / window_width)
self.assertAlmostEqual(out_value_attr.get()[1], pos[1] / window_height)
# Non-activated codepath.
else:
self.assertEqual(out_pressed_attr.get(), self.D)
self.assertEqual(out_released_attr.get(), self.D)
self.assertEqual(out_valuechanged_attr.get(), self.D)
self.assertFalse(out_ispressed_attr.get())
self.assertEqual(len(out_value_attr.get()), 2)
self.assertEqual(out_value_attr.get()[0], 0)
self.assertEqual(out_value_attr.get()[1], 0)
# Test that the OnMouseInput node works correctly when its onlyPlayback input is disabled.
in_onlyplayback_attr.set(False)
for _ in _test_onmouseinput_node():
yield ThreadsafetyTestUtils.EVALUATION_WAIT_FRAME # Yielding to compute by waiting for the next app frame.
# Test that the OnMouseInput node works correctly when its onlyPlayback input is enabled.
in_onlyplayback_attr.set(True)
timeline = omni.timeline.get_timeline_interface()
timeline.set_target_framerate(timeline.get_time_codes_per_seconds())
timeline.play()
for _ in _test_onmouseinput_node():
yield ThreadsafetyTestUtils.EVALUATION_WAIT_FRAME # Yielding to compute by waiting for the next app frame.
timeline.stop()
# ----------------------------------------------------------------------
@ThreadsafetyTestUtils.make_threading_test
def test_onobjectchange_node(self, test_instance_id: int = 0):
"""Test OnObjectChange node"""
usd_context = ThreadsafetyTestUtils.add_to_threading_cache(test_instance_id, omni.usd.get_context())
stage = ThreadsafetyTestUtils.add_to_threading_cache(test_instance_id, usd_context.get_stage())
cube = ThreadsafetyTestUtils.add_to_threading_cache(
test_instance_id, ogts.create_cube(stage, "Cube", (0.6, 0.4, 0.0))
)
attr_position = ThreadsafetyTestUtils.add_to_threading_cache(
test_instance_id, cube.CreateAttribute("xformOp:translate", Sdf.ValueTypeNames.Float3, False)
)
attr_rotation = ThreadsafetyTestUtils.add_to_threading_cache(
test_instance_id, cube.CreateAttribute("xformOp:rotateXYZ", Sdf.ValueTypeNames.Float3, False)
)
# Instance a test graph setup. Note that we append the graph path with the test_instance_id
# so that the graph can be uniquely identified in the thread-safety test!
graph_path = self.TEST_GRAPH_PATH + str(test_instance_id)
og.Controller.create_graph({"graph_path": graph_path, "evaluator_name": "execution"})
(graph, (on_object_change_node, flip_flop_node), _, _,) = og.Controller.edit(
graph_path,
{
self.keys.CREATE_NODES: [
(graph_path + "/OnObjectChange", "omni.graph.action.OnObjectChange"),
(graph_path + "/FlipFlop", "omni.graph.action.FlipFlop"),
],
self.keys.SET_VALUES: [
(graph_path + "/OnObjectChange.inputs:onlyPlayback", False),
(graph_path + "/OnObjectChange.inputs:prim", attr_position.GetPrimPath()),
(graph_path + "/OnObjectChange.inputs:name", attr_position.GetName()),
],
self.keys.CONNECT: (
graph_path + "/OnObjectChange.outputs:changed",
graph_path + "/FlipFlop.inputs:execIn",
),
},
)
outa = og.Controller.attribute("outputs:a", flip_flop_node)
# Check the current FlipFlop state, and that it isn't changing until we move the cube.
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
ff_a_state_1 = og.Controller.get(outa)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(og.Controller.get(outa), ff_a_state_1)
# Try changing a different (non-translate) attribute - should not trigger.
ThreadsafetyTestUtils.single_evaluation_first_test_instance(
test_instance_id, partial(attr_rotation.Set, Gf.Vec3f(180.0, 0.0, 0.0))
)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(og.Controller.get(outa), ff_a_state_1)
# Now change the translate attribute - should trigger.
ThreadsafetyTestUtils.single_evaluation_first_test_instance(
test_instance_id, partial(attr_position.Set, Gf.Vec3f(1.0, 0.0, 0.0))
)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
ff_a_state_2 = og.Controller.get(outa)
self.assertEqual(ff_a_state_2, 1 if not ff_a_state_1 else 0)
property_path = og.Controller.get(og.Controller.attribute("outputs:propertyName", on_object_change_node))
self.assertEqual(property_path, attr_position.GetName())
# Look at the prim itself.
og.Controller.edit(
graph,
{self.keys.SET_VALUES: (graph_path + "/OnObjectChange.inputs:name", "")},
)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
# Now change the rotate attribute - should trigger.
ThreadsafetyTestUtils.single_evaluation_first_test_instance(
test_instance_id, partial(attr_rotation.Set, Gf.Vec3f(245.0, 0.0, 0.0))
)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
ff_a_state_1 = og.Controller.get(outa)
self.assertEqual(ff_a_state_1, 1 if not ff_a_state_2 else 0)
property_path = og.Controller.get(og.Controller.attribute("outputs:propertyName", on_object_change_node))
self.assertEqual(property_path, attr_rotation.GetName())
# Now use the path input instead of the target. Do this for all OnObjectChange nodes!
og.Controller.edit(
graph,
{
self.keys.SET_VALUES: [
(graph_path + "/OnObjectChange.inputs:path", cube.GetPath().pathString),
(graph_path + "/OnObjectChange.inputs:prim", []),
]
},
)
# Compute once to set up the inputs.
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
# Change the rotate attribute - should trigger.
ThreadsafetyTestUtils.single_evaluation_first_test_instance(
test_instance_id, partial(attr_rotation.Set, Gf.Vec3f(0.0, 0.0, 0.0))
)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
ff_a_state_2 = og.Controller.get(outa)
self.assertEqual(ff_a_state_2, 1 if not ff_a_state_1 else 0)
property_path = og.Controller.get(og.Controller.attribute("outputs:propertyName", on_object_change_node))
self.assertEqual(property_path, attr_rotation.GetName())
# ----------------------------------------------------------------------
@ThreadsafetyTestUtils.make_threading_test
def test_onplaybacktick_node(self, test_instance_id: int = 0):
"""Test OnPlaybackTick node"""
timeline = omni.timeline.get_timeline_interface()
# The stage's default stage is probably 60, set to 30 for this specific case
stage = omni.usd.get_context().get_stage()
fps = 30.0
stage.SetTimeCodesPerSecond(fps)
stage.SetStartTimeCode(1.0)
stage.SetEndTimeCode(10.0)
# Instance a test graph setup. Note that we append the graph path with the test_instance_id
# so that the graph can be uniquely identified in the thread-safety test!
graph_path = self.TEST_GRAPH_PATH + str(test_instance_id)
og.Controller.create_graph({"graph_path": graph_path, "evaluator_name": "execution"})
(_, (on_p_tick_node, _), _, _,) = og.Controller.edit(
graph_path,
{
self.keys.CREATE_NODES: [
("OnPTick", "omni.graph.action.OnPlaybackTick"),
("FlipFlop", "omni.graph.action.FlipFlop"),
],
self.keys.CONNECT: [
("OnPTick.outputs:tick", "FlipFlop.inputs:execIn"),
],
},
)
# Obtain necessary attributes.
tick_time_attr = on_p_tick_node.get_attribute("outputs:time")
tick_frame_attr = on_p_tick_node.get_attribute("outputs:frame")
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
# Check that the OnPlaybackTick node doesn't trigger when playback is not active.
self.assertEqual(tick_time_attr.get(), 0)
self.assertEqual(tick_frame_attr.get(), 0)
# Check that the OnPlaybackTick node does trigger when playback is active, and the values are correct.
timeline.play()
yield ThreadsafetyTestUtils.EVALUATION_WAIT_FRAME # Yielding to compute by waiting for the next app frame.
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertAlmostEqual(tick_time_attr.get(), timeline.get_current_time(), places=5)
self.assertAlmostEqual(tick_frame_attr.get(), tick_time_attr.get() * fps, places=5)
# ----------------------------------------------------------------------
@ThreadsafetyTestUtils.make_threading_test
def test_ontick_node(self, test_instance_id: int = 0):
"""Test OnTick node"""
timeline = omni.timeline.get_timeline_interface()
# The stage's default stage is probably 60, set to 30 for this specific case
stage = omni.usd.get_context().get_stage()
fps = 30.0
stage.SetTimeCodesPerSecond(fps)
stage.SetStartTimeCode(1.0)
stage.SetEndTimeCode(10.0)
# Instance a test graph setup. Note that we append the graph path with the test_instance_id
# so that the graph can be uniquely identified in the thread-safety test!
graph_path = self.TEST_GRAPH_PATH + str(test_instance_id)
og.Controller.create_graph({"graph_path": graph_path, "evaluator_name": "execution"})
(_, (on_tick_node, _), _, _,) = og.Controller.edit(
graph_path,
{
self.keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("FlipFlop", "omni.graph.action.FlipFlop"),
],
self.keys.CONNECT: [
("OnTick.outputs:tick", "FlipFlop.inputs:execIn"),
],
self.keys.SET_VALUES: ("OnTick.inputs:onlyPlayback", True),
},
)
# Obtain necessary attributes.
ontick_time_attr = on_tick_node.get_attribute("outputs:time")
ontick_frame_attr = on_tick_node.get_attribute("outputs:frame")
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
# Check that the OnTick node doesn't trigger when playback is not active.
self.assertEqual(ontick_time_attr.get(), 0)
self.assertEqual(ontick_frame_attr.get(), 0)
# Check that the OnTick node does trigger when playback is active, and the values are correct.
timeline.play()
yield ThreadsafetyTestUtils.EVALUATION_WAIT_FRAME # Yielding to compute by waiting for the next app frame.
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertAlmostEqual(ontick_time_attr.get(), timeline.get_current_time(), places=5)
self.assertAlmostEqual(ontick_frame_attr.get(), ontick_time_attr.get() * fps, places=5)
# ----------------------------------------------------------------------
@ThreadsafetyTestUtils.make_threading_test
def test_onvariablechange_node(self, test_instance_id: int = 0):
"""Test OnVariableChange node"""
# Instance a test graph setup. Note that we append the graph path with the test_instance_id
# so that the graph can be uniquely identified in the thread-safety test!
graph_path = self.TEST_GRAPH_PATH + str(test_instance_id)
og.Controller.create_graph({"graph_path": graph_path, "evaluator_name": "execution"})
(graph, (on_variable_change_node, counter_node), _, _,) = og.Controller.edit(
graph_path,
{
self.keys.CREATE_NODES: [
("OnVariableChange", "omni.graph.action.OnVariableChange"),
("Counter", "omni.graph.action.Counter"),
],
self.keys.SET_VALUES: [
("OnVariableChange.inputs:onlyPlayback", False),
],
self.keys.CONNECT: [("OnVariableChange.outputs:changed", "Counter.inputs:execIn")],
},
)
graph_context = graph.get_default_graph_context()
# Create a new float variable on each graph, and set the OnVariableChange nodes to track this single variable.
var_name = "myVar" + str(test_instance_id)
my_var = og.Controller.create_variable(graph, var_name, "FLOAT")
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
og.Controller.set(og.Controller.attribute("inputs:variableName", on_variable_change_node), var_name)
# Check that the execution output is still set to zero.
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(og.Controller.get(og.Controller.attribute("outputs:changed", on_variable_change_node)), self.D)
# Change the variable's value, check that the execution reads true now.
my_var.set(graph_context, 5.2)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(og.Controller.get(og.Controller.attribute("outputs:changed", on_variable_change_node)), self.E)
# Evaluate a second time to check that the execution gets set back to zero.
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(og.Controller.get(og.Controller.attribute("outputs:changed", on_variable_change_node)), self.D)
# Make sure that the output execution remains off if we try setting the variable
# to the same previous value.
my_var.set(graph_context, 5.2)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(og.Controller.get(og.Controller.attribute("outputs:changed", on_variable_change_node)), self.D)
# Check that setting the inputs:variableName to a nonexistant variable results in the output
# execution pin remaining zero.
og.Controller.set(og.Controller.attribute("inputs:variableName", on_variable_change_node), "nonexistant_name")
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(og.Controller.get(og.Controller.attribute("outputs:changed", on_variable_change_node)), self.D)
# Set the onlyPlayback flag to true, run a similar set of tests to ensure that this setting
# still works with the node. Note that a Counter node is used here to detect the variable change
# because it's trickier to check against outputs:changed directly (harder to time everything with
# all of the async stuff + when the timeline is running).
og.Controller.set(og.Controller.attribute("inputs:variableName", on_variable_change_node), var_name)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
og.Controller.set(og.Controller.attribute("inputs:onlyPlayback", on_variable_change_node), True)
timeline = ThreadsafetyTestUtils.add_to_threading_cache(
test_instance_id, omni.timeline.get_timeline_interface()
)
ThreadsafetyTestUtils.single_evaluation_first_test_instance(
test_instance_id, partial(timeline.set_target_framerate, timeline.get_time_codes_per_seconds())
)
ThreadsafetyTestUtils.single_evaluation_first_test_instance(test_instance_id, partial(timeline.play))
my_var.set(graph_context, 4.6)
yield ThreadsafetyTestUtils.EVALUATION_WAIT_FRAME # Yielding to compute by waiting for the next app frame.
self.assertEqual(og.Controller.get(og.Controller.attribute("outputs:count", counter_node)), 2)
my_var.set(graph_context, 4.6)
yield ThreadsafetyTestUtils.EVALUATION_WAIT_FRAME # Yielding to compute by waiting for the next app frame.
self.assertEqual(og.Controller.get(og.Controller.attribute("outputs:count", counter_node)), 2)
yield ThreadsafetyTestUtils.EVALUATION_WAIT_FRAME # Yielding to compute by waiting for the next app frame.
self.assertEqual(og.Controller.get(og.Controller.attribute("outputs:count", counter_node)), 2)
my_var.set(graph_context, 4.6)
yield ThreadsafetyTestUtils.EVALUATION_WAIT_FRAME # Yielding to compute by waiting for the next app frame.
self.assertEqual(og.Controller.get(og.Controller.attribute("outputs:count", counter_node)), 2)
og.Controller.set(og.Controller.attribute("inputs:variableName", on_variable_change_node), "nonexistant_name_2")
yield ThreadsafetyTestUtils.EVALUATION_WAIT_FRAME # Yielding to compute by waiting for the next app frame.
self.assertEqual(og.Controller.get(og.Controller.attribute("outputs:count", counter_node)), 2)
ThreadsafetyTestUtils.single_evaluation_last_test_instance(test_instance_id, partial(timeline.stop))
# ----------------------------------------------------------------------
async def _run_test_variable_of_type(self, graph_id, var_type: og.Type, initial_value, changed_value):
"""Helper method to run a variable test with different variable types"""
graph_path = self.TEST_GRAPH_PATH + graph_id
og.Controller.create_graph({"graph_path": graph_path, "evaluator_name": "execution"})
(graph, (_, counter_node), _, _,) = og.Controller.edit(
graph_path,
{
self.keys.CREATE_NODES: [
("OnVariableChange", "omni.graph.action.OnVariableChange"),
("Counter", "omni.graph.action.Counter"),
],
self.keys.SET_VALUES: [
("OnVariableChange.inputs:onlyPlayback", False),
("OnVariableChange.inputs:variableName", "var"),
],
self.keys.CONNECT: [("OnVariableChange.outputs:changed", "Counter.inputs:execIn")],
self.keys.CREATE_VARIABLES: [
("var", var_type),
],
},
)
# test that nothing triggered on the initial run
await omni.kit.app.get_app().next_update_async()
self.assertEqual(og.Controller.get(og.Controller.attribute("outputs:count", counter_node)), 0)
var = graph.find_variable("var")
# set the initial value, expected a trigger
var.set(graph.get_context(), initial_value)
await omni.kit.app.get_app().next_update_async()
self.assertEqual(og.Controller.get(og.Controller.attribute("outputs:count", counter_node)), 1)
ogts.verify_values(var.get(graph.get_context()), initial_value, "expected initial value to be set")
# change the value of the variable and makes sure the OnVariableChange triggers
var.set(graph.get_context(), changed_value)
await omni.kit.app.get_app().next_update_async()
self.assertEqual(og.Controller.get(og.Controller.attribute("outputs:count", counter_node)), 2)
ogts.verify_values(var.get(graph.get_context()), changed_value, "expeected changed value to be set")
# ----------------------------------------------------------------------
async def test_onvariablechanged_by_type(self):
"""Tests OnVariableChange for different types of variables, including strings and arrays"""
await self._run_test_variable_of_type("_int", og.Type(og.BaseDataType.INT), 1, 2)
await self._run_test_variable_of_type("_int_array", og.Type(og.BaseDataType.INT, 1, 1), [1, 2, 3], [3, 4, 5])
await self._run_test_variable_of_type(
"_int_array_size", og.Type(og.BaseDataType.INT, 1, 1), [1, 2, 3, 4, 5], [1, 2, 3]
)
await self._run_test_variable_of_type("_tuple", og.Type(og.BaseDataType.INT, 3, 0), (1, 2, 3), (3, 4, 5))
await self._run_test_variable_of_type(
"_string", og.Type(og.BaseDataType.UCHAR, 1, 1, og.AttributeRole.TEXT), "init", "changed"
)
await self._run_test_variable_of_type(
"_tuple_array", og.Type(og.BaseDataType.INT, 3, 1), [(1, 2, 3), (4, 5, 6)], [(3, 4, 5)]
)
# ----------------------------------------------------------------------
async def test_onvariablechange_node_instances(self):
"""Tests that OnVariableChange node works with instancing"""
controller = og.Controller()
keys = og.Controller.Keys
int_range = range(1, 100)
stage = omni.usd.get_context().get_stage()
for i in int_range:
prim_name = f"/World/Prim_{i}"
prim = stage.DefinePrim(prim_name)
OmniGraphSchemaTools.applyOmniGraphAPI(stage, prim_name, self.TEST_GRAPH_PATH)
prim.CreateAttribute("graph:variable:int_var", Sdf.ValueTypeNames.Int).Set(0)
prim.CreateAttribute("graph_output", Sdf.ValueTypeNames.Int).Set(i)
(_, _, _, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnVariableChange", "omni.graph.action.OnVariableChange"),
("GraphTarget", "omni.graph.nodes.GraphTarget"),
("ReadPrimAttr", "omni.graph.nodes.ReadPrimAttribute"),
("WritePrimAttr", "omni.graph.nodes.WritePrimAttribute"),
("Add", "omni.graph.nodes.Add"),
("ConstantInt", "omni.graph.nodes.ConstantInt"),
],
keys.CONNECT: [
("OnVariableChange.outputs:changed", "WritePrimAttr.inputs:execIn"),
("GraphTarget.outputs:primPath", "WritePrimAttr.inputs:primPath"),
("GraphTarget.outputs:primPath", "ReadPrimAttr.inputs:primPath"),
("ReadPrimAttr.outputs:value", "Add.inputs:a"),
("ConstantInt.inputs:value", "Add.inputs:b"),
("Add.outputs:sum", "WritePrimAttr.inputs:value"),
],
keys.CREATE_VARIABLES: [("int_var", og.Type(og.BaseDataType.INT))],
keys.SET_VALUES: [
("OnVariableChange.inputs:onlyPlayback", False),
("OnVariableChange.inputs:variableName", "int_var"),
("WritePrimAttr.inputs:usePath", True),
("WritePrimAttr.inputs:name", "graph_output"),
("ReadPrimAttr.inputs:usePath", True),
("ReadPrimAttr.inputs:name", "graph_output"),
("ConstantInt.inputs:value", 1),
],
},
)
await omni.kit.app.get_app().next_update_async()
# Setting value of int_var from 0 to 1.
for i in int_range:
prim_path = f"/World/Prim_{i}"
prev_val = stage.GetPrimAtPath(prim_path).GetAttribute("graph_output").Get()
stage.GetPrimAtPath(prim_path).GetAttribute("graph:variable:int_var").Set(1)
await omni.kit.app.get_app().next_update_async()
val = stage.GetPrimAtPath(prim_path).GetAttribute("graph_output").Get()
self.assertEqual(val, prev_val + 1, msg=f"{prim_path}")
# Setting value of int_var from 1 to 1.
for i in int_range:
prim_path = f"/World/Prim_{i}"
prev_val = stage.GetPrimAtPath(prim_path).GetAttribute("graph_output").Get()
stage.GetPrimAtPath(prim_path).GetAttribute("graph:variable:int_var").Set(1)
await omni.kit.app.get_app().next_update_async()
val = stage.GetPrimAtPath(prim_path).GetAttribute("graph_output").Get()
self.assertEqual(val, prev_val + 1, msg=f"{prim_path}")
# ----------------------------------------------------------------------
# The SendCustomEvent node is tested in tandem with the OnCustomEvent node above; it is also used
# extensively in other tests, so we skip adding another dedicated test here.
# ----------------------------------------------------------------------
@ThreadsafetyTestUtils.make_threading_test
def test_sequence_node(self, test_instance_id: int = 0):
"""Test Sequence node"""
# Instance a test graph setup. Note that we append the graph path with the test_instance_id
# so that the graph can be uniquely identified in the thread-safety test!
graph_path = self.TEST_GRAPH_PATH + str(test_instance_id)
og.Controller.create_graph({"graph_path": graph_path, "evaluator_name": "execution"})
(_, (_, sequence_node, counter0_node, counter1_node), _, _,) = og.Controller.edit(
graph_path,
{
self.keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("Sequence", "omni.graph.action.Sequence"),
("Counter0", "omni.graph.action.Counter"),
("Counter1", "omni.graph.action.Counter"),
],
self.keys.SET_VALUES: [("OnTick.inputs:onlyPlayback", False)],
self.keys.CONNECT: [
("OnTick.outputs:tick", "Sequence.inputs:execIn"),
("Sequence.outputs:a", "Counter0.inputs:execIn"),
("Sequence.outputs:b", "Counter1.inputs:execIn"),
],
},
)
# Obtain necessary attributes.
in_exec_attr_0 = counter0_node.get_attribute("inputs:execIn")
out_cnt_attr_0 = counter0_node.get_attribute("outputs:count")
out_cnt_attr_1 = counter1_node.get_attribute("outputs:count")
out_a_attr = sequence_node.get_attribute("outputs:a")
out_b_attr = sequence_node.get_attribute("outputs:b")
# Check that the Sequence node correctly executes through its outputs when input
# execution is enabled via the OnTick node. This is done by checking whether
# each counter has been incremented by 1, and if the last output pin on the Sequence
# nodes remain enabled (regardless of the fact that they're not connected downstream).
# Similar test to the Multisequence node.
self.assertEqual(out_cnt_attr_0.get(), 0)
self.assertEqual(out_cnt_attr_1.get(), 0)
self.assertEqual(out_a_attr.get(), self.D)
self.assertEqual(out_b_attr.get(), self.D)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_cnt_attr_0.get(), 1)
self.assertEqual(out_cnt_attr_1.get(), 1)
self.assertEqual(out_a_attr.get(), self.D)
self.assertEqual(out_b_attr.get(), self.E)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_cnt_attr_0.get(), 2)
self.assertEqual(out_cnt_attr_1.get(), 2)
self.assertEqual(out_a_attr.get(), self.D)
self.assertEqual(out_b_attr.get(), self.E)
# Connect output pin b to the Counter1 node. This is because both pins a and b share
# the same terminal input attribute, so both must have the same value. Since
# b is the last attribute and must be set to 1 (due to logic inside this node's
# compute method), output a must also take on this value.
og.Controller.connect(out_b_attr, in_exec_attr_0)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_cnt_attr_0.get(), 4)
self.assertEqual(out_cnt_attr_1.get(), 3)
self.assertEqual(out_a_attr.get(), self.E)
self.assertEqual(out_b_attr.get(), self.E)
# ----------------------------------------------------------------------
async def test_setprimactive_node(self):
await self._setprimactive_node(False)
# ----------------------------------------------------------------------
async def test_setprimactive_node_with_target(self):
await self._setprimactive_node(True)
# ----------------------------------------------------------------------
async def _setprimactive_node(self, use_target_inputs):
"""
Test SetPrimActive node. If use_target_inputs is true, will use the prim target inputs rather than prim path
"""
usd_context = omni.usd.get_context()
stage = usd_context.get_stage()
(_, (_, set_prim_active_node), _, _,) = og.Controller.edit(
self.TEST_GRAPH_PATH,
{
self.keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("SetPrimActive", "omni.graph.action.SetPrimActive"),
],
self.keys.CREATE_PRIMS: [
("/TestPrim", {}),
],
self.keys.CONNECT: [("OnTick.outputs:tick", "SetPrimActive.inputs:execIn")],
self.keys.SET_VALUES: [
("OnTick.inputs:onlyPlayback", False),
("SetPrimActive.inputs:prim", "/TestPrim"),
("SetPrimActive.inputs:active", True),
],
},
)
await og.Controller.evaluate()
if use_target_inputs:
rel = stage.GetRelationshipAtPath(f"{self.TEST_GRAPH_PATH}/SetPrimActive.inputs:primTarget")
omni.kit.commands.execute("AddRelationshipTarget", relationship=rel, target="/TestPrim")
await og.Controller.evaluate()
# Check that the prim we're pointing to remains active when we trigger
# graph evaluation.
prim = stage.GetPrimAtPath("/TestPrim")
self.assertTrue(prim.IsActive())
await og.Controller.evaluate()
self.assertTrue(prim.IsActive())
# Now set the prim to be inactive.
og.Controller.set(og.Controller.attribute("inputs:active", set_prim_active_node), False)
await og.Controller.evaluate()
self.assertFalse(prim.IsActive())
await og.Controller.evaluate()
self.assertFalse(prim.IsActive())
# Again set to be active, make sure that the prim gets reactivated.
og.Controller.set(og.Controller.attribute("inputs:active", set_prim_active_node), True)
await og.Controller.evaluate()
self.assertTrue(prim.IsActive())
# ----------------------------------------------------------------------
@ThreadsafetyTestUtils.make_threading_test
def test_switchtoken_node(self, test_instance_id: int = 0):
"""Test SwitchToken node"""
# Instance a test graph setup. Note that we append the graph path with the test_instance_id
# so that the graph can be uniquely identified in the thread-safety test!
graph_path = self.TEST_GRAPH_PATH + str(test_instance_id)
og.Controller.create_graph({"graph_path": graph_path, "evaluator_name": "execution"})
(_, (_, switch_node), _, _,) = og.Controller.edit(
graph_path,
{
self.keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("Switch", "omni.graph.action.SwitchToken"),
],
self.keys.SET_VALUES: [
("OnTick.inputs:onlyPlayback", False),
],
self.keys.CONNECT: [
("OnTick.outputs:tick", "Switch.inputs:execIn"),
],
},
)
# Test that an execution connection is correctly triggering the downstream nodes once per evaluation.
def add_input(index: int):
og.Controller.create_attribute(
switch_node,
f"inputs:branch{index:02}",
"token",
og.AttributePortType.ATTRIBUTE_PORT_TYPE_INPUT,
)
og.Controller.create_attribute(
switch_node,
f"outputs:output{index:02}",
"execution",
og.AttributePortType.ATTRIBUTE_PORT_TYPE_OUTPUT,
)
og.Controller.set(og.Controller.attribute(f"inputs:branch{index:02}", switch_node), f"{index:02}")
for index in range(5):
add_input(index)
for index in range(5):
og.Controller.set(og.Controller.attribute("inputs:value", switch_node), f"{index:02}")
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
for index2 in range(5):
expected_val = (
og.ExecutionAttributeState.DISABLED if index2 != index else og.ExecutionAttributeState.ENABLED
)
self.assertEqual(
og.Controller.get(og.Controller.attribute(f"outputs:output{index2:02}", switch_node)),
expected_val,
)
# ----------------------------------------------------------------------
@ThreadsafetyTestUtils.make_threading_test
def test_syncgate_node(self, test_instance_id: int = 0):
"""Test SyncGate node"""
# Instance a test graph setup. Note that we append the graph path with the test_instance_id
# so that the graph can be uniquely identified in the thread-safety test!
graph_path = self.TEST_GRAPH_PATH + str(test_instance_id)
og.Controller.create_graph({"graph_path": graph_path, "evaluator_name": "execution"})
(
_,
(on_impulse0_node, on_impulse1_node, on_impulse2_node, on_impulse3_node, sync_gate_node, counter_node),
_,
_,
) = og.Controller.edit(
graph_path,
{
self.keys.CREATE_NODES: [
("OnImpulse0", "omni.graph.action.OnImpulseEvent"),
("OnImpulse1", "omni.graph.action.OnImpulseEvent"),
("OnImpulse2", "omni.graph.action.OnImpulseEvent"),
("OnImpulse3", "omni.graph.action.OnImpulseEvent"),
("SyncGate", "omni.graph.action.SyncGate"),
("Counter", "omni.graph.action.Counter"),
],
self.keys.CONNECT: [
("OnImpulse0.outputs:execOut", "SyncGate.inputs:execIn"),
("OnImpulse1.outputs:execOut", "SyncGate.inputs:execIn"),
("OnImpulse2.outputs:execOut", "SyncGate.inputs:execIn"),
("OnImpulse3.outputs:execOut", "SyncGate.inputs:execIn"),
("SyncGate.outputs:execOut", "Counter.inputs:execIn"),
],
self.keys.SET_VALUES: [
("OnImpulse0.inputs:onlyPlayback", False),
("OnImpulse1.inputs:onlyPlayback", False),
("OnImpulse2.inputs:onlyPlayback", False),
("OnImpulse3.inputs:onlyPlayback", False),
("SyncGate.inputs:syncValue", 5),
],
},
)
# Obtain necessary attributes.
state_enable_impulse_attr_0 = on_impulse0_node.get_attribute("state:enableImpulse")
state_enable_impulse_attr_1 = on_impulse1_node.get_attribute("state:enableImpulse")
state_enable_impulse_attr_2 = on_impulse2_node.get_attribute("state:enableImpulse")
state_enable_impulse_attr_3 = on_impulse3_node.get_attribute("state:enableImpulse")
in_syncvalue_attr = sync_gate_node.get_attribute("inputs:syncValue")
out_syncvalue_attr = sync_gate_node.get_attribute("outputs:syncValue")
out_cnt_attr = counter_node.get_attribute("outputs:count")
# First check that the SyncGate node only gets unblocked when it detects 4+ input
# enabled executions, at which point the Counter node will begin to be incremented.
# Also check that after the 1st graph evaluation the SyncGate's output syncValue gets
# set to its input syncValue.
state_enable_impulse_attr_0.set(True)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_syncvalue_attr.get(), 5)
self.assertEqual(out_cnt_attr.get(), 0)
state_enable_impulse_attr_1.set(True)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_syncvalue_attr.get(), 5)
self.assertEqual(out_cnt_attr.get(), 0)
state_enable_impulse_attr_2.set(True)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_syncvalue_attr.get(), 5)
self.assertEqual(out_cnt_attr.get(), 0)
state_enable_impulse_attr_3.set(True)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_syncvalue_attr.get(), 5)
self.assertEqual(out_cnt_attr.get(), 1)
state_enable_impulse_attr_0.set(True)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_syncvalue_attr.get(), 5)
self.assertEqual(out_cnt_attr.get(), 2)
# Now reset the SyncGate node's syncValue. Check that we once again need 4+ input executions
# with the "enabled" status flowing into the SyncGate in order to unlock it. Also note
# that we don't technically need each separate OnImpulse node to trigger to unlock the gate;
# we could have a single OnImpulse node (per graph) send 4 separate impulses to open each gate
# in each graph, since that would reach the threshold number of accumulated execIn states for
# this setup (equal to 4 since we have 4 nodes connected to the SyncGate's inputs:execIn pin).
in_syncvalue_attr.set(9)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
for i in range(0, 5):
state_enable_impulse_attr_0.set(True)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_syncvalue_attr.get(), 9)
if i < 3:
self.assertEqual(out_cnt_attr.get(), 2)
else:
self.assertEqual(out_cnt_attr.get(), 2 + (i - 2))
# Now reset the syncValue again per node. This time only send 2 input impulses, then reset the syncValue
# immediately after. Check that we'll again need to send 4 input impulses to open the corresponding gate
# (which was essentially "relocked" with the syncValue change).
in_syncvalue_attr.set(21)
state_enable_impulse_attr_0.set(True)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_syncvalue_attr.get(), 21)
self.assertEqual(out_cnt_attr.get(), 4)
state_enable_impulse_attr_2.set(True)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_syncvalue_attr.get(), 21)
self.assertEqual(out_cnt_attr.get(), 4)
in_syncvalue_attr.set(25)
for i in range(0, 5):
state_enable_impulse_attr_2.set(True)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_syncvalue_attr.get(), 25)
if i < 3:
self.assertEqual(out_cnt_attr.get(), 4)
else:
self.assertEqual(out_cnt_attr.get(), 4 + (i - 2))
# ----------------------------------------------------------------------
async def test_countdown_node(self):
"""Test Countdown node"""
duration = 5
period = 3
og.Controller.create_graph({"graph_path": self.TEST_GRAPH_PATH, "evaluator_name": "execution"})
(_, (_, _, counter0_node, counter1_node), _, _,) = og.Controller.edit(
self.TEST_GRAPH_PATH,
{
self.keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("Countdown", "omni.graph.action.Countdown"),
("Counter0", "omni.graph.action.Counter"),
("Counter1", "omni.graph.action.Counter"),
],
self.keys.CONNECT: [
("OnTick.outputs:tick", "Countdown.inputs:execIn"),
("Countdown.outputs:finished", "Counter0.inputs:execIn"),
("Countdown.outputs:tick", "Counter1.inputs:execIn"),
],
# Set the Countdown inputs so that the Counter0 node only gets incremented
# once 4 update ticks have passed since the graph creation, and so that
# the Counter1 node gets incremented every 2 ticks.
self.keys.SET_VALUES: [
("OnTick.inputs:onlyPlayback", False),
("Countdown.inputs:duration", duration),
("Countdown.inputs:period", period),
],
},
)
# Obtain necessary attributes.
out_cnt_attr_0 = counter0_node.get_attribute("outputs:count")
out_cnt_attr_1 = counter1_node.get_attribute("outputs:count")
# Test against a predetermined set of values to make sure that
# this node doesn't break in the future.
cnt0 = [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2]
cnt1 = [0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3]
for i in range(0, 20):
await og.Controller.evaluate()
self.assertEqual(og.Controller.get(out_cnt_attr_0), cnt0[i])
self.assertEqual(og.Controller.get(out_cnt_attr_1), cnt1[i])
| 52,966 | Python | 54.462827 | 127 | 0.598629 |
omniverse-code/kit/exts/omni.graph.action/omni/graph/action/tests/test_actiongraph.py | """Basic tests of the action graph"""
import omni.client
import omni.graph.core as og
import omni.graph.core.tests as ogts
import omni.kit.test
import omni.usd
from pxr import Gf, Sdf
# ======================================================================
class TestActionGraphNodes(ogts.OmniGraphTestCase):
"""Tests action graph node functionality"""
TEST_GRAPH_PATH = "/World/TestGraph"
async def setUp(self):
"""Set up test environment, to be torn down when done"""
await super().setUp()
og.Controller.edit({"graph_path": self.TEST_GRAPH_PATH, "evaluator_name": "execution"})
# ----------------------------------------------------------------------
async def test_basic(self):
"""exercise a basic network of execution nodes"""
controller = og.Controller()
keys = og.Controller.Keys
# Test that an execution connection is correctly triggering the downstream node once per evaluation
(graph, (_, flip_flop_node), _, _,) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [("OnTick", "omni.graph.action.OnTick"), ("FlipFlop", "omni.graph.action.FlipFlop")],
keys.SET_VALUES: [("OnTick.inputs:onlyPlayback", False)],
keys.CONNECT: ("OnTick.outputs:tick", "FlipFlop.inputs:execIn"),
},
)
outa = controller.attribute("outputs:a", flip_flop_node)
outb = controller.attribute("outputs:b", flip_flop_node)
outisa = controller.attribute("outputs:isA", flip_flop_node)
# first eval, 'a'
await controller.evaluate(graph)
self.assertEqual(og.Controller.get(outa), og.ExecutionAttributeState.ENABLED)
self.assertEqual(og.Controller.get(outb), og.ExecutionAttributeState.DISABLED)
self.assertTrue(og.Controller.get(outisa))
# second eval 'b'
await controller.evaluate(graph)
self.assertEqual(og.Controller.get(outa), og.ExecutionAttributeState.DISABLED)
self.assertEqual(og.Controller.get(outb), og.ExecutionAttributeState.ENABLED)
self.assertFalse(og.Controller.get(outisa))
# third eval 'a'
await controller.evaluate(graph)
self.assertEqual(og.Controller.get(outa), og.ExecutionAttributeState.ENABLED)
self.assertEqual(og.Controller.get(outb), og.ExecutionAttributeState.DISABLED)
self.assertTrue(og.Controller.get(outisa))
# Test that non-usd-backed node has execution attribute type correct
_, on_tick_no_usd = og.cmds.CreateNode(
graph=graph,
node_path=f"{self.TEST_GRAPH_PATH}/OnTickNoUSD",
node_type="omni.graph.action.OnTick",
create_usd=False,
)
og.cmds.CreateNode(
graph=graph,
node_path=f"{self.TEST_GRAPH_PATH}/FlipFlopNoUSD",
node_type="omni.graph.action.FlipFlop",
create_usd=False,
)
controller.attribute("inputs:onlyPlayback", on_tick_no_usd, graph).set(False)
og.cmds.ConnectAttrs(
src_attr=f"{self.TEST_GRAPH_PATH}/OnTickNoUSD.outputs:tick",
dest_attr=f"{self.TEST_GRAPH_PATH}/FlipFlopNoUSD.inputs:execIn",
modify_usd=False,
)
outa = controller.attribute("outputs:a", f"{self.TEST_GRAPH_PATH}/FlipFlopNoUSD")
outb = controller.attribute("outputs:b", f"{self.TEST_GRAPH_PATH}/FlipFlopNoUSD")
outisa = controller.attribute("outputs:isA", f"{self.TEST_GRAPH_PATH}/FlipFlopNoUSD")
# first eval, 'a'
await controller.evaluate(graph)
self.assertEqual(og.Controller.get(outa), og.ExecutionAttributeState.ENABLED)
self.assertEqual(og.Controller.get(outb), og.ExecutionAttributeState.DISABLED)
self.assertTrue(og.Controller.get(outisa))
# second eval 'b'
await controller.evaluate(graph)
self.assertEqual(og.Controller.get(outa), og.ExecutionAttributeState.DISABLED)
self.assertEqual(og.Controller.get(outb), og.ExecutionAttributeState.ENABLED)
self.assertFalse(og.Controller.get(outisa))
# third eval 'a'
await controller.evaluate(graph)
self.assertEqual(og.Controller.get(outa), og.ExecutionAttributeState.ENABLED)
self.assertEqual(og.Controller.get(outb), og.ExecutionAttributeState.DISABLED)
self.assertTrue(og.Controller.get(outisa))
# ----------------------------------------------------------------------
async def test_notice(self):
"""Tests the OnObjectChange node"""
usd_context = omni.usd.get_context()
stage = usd_context.get_stage()
controller = og.Controller()
keys = og.Controller.Keys
cube = ogts.create_cube(stage, "Cube", (0.6, 0.4, 0.0))
attr_position = cube.CreateAttribute("xformOp:translate", Sdf.ValueTypeNames.Float3, False)
attr_rotation = cube.CreateAttribute("xformOp:rotateXYZ", Sdf.ValueTypeNames.Float3, False)
(graph, (on_object_change_node, flip_flop_node), _, _,) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnObjectChange", "omni.graph.action.OnObjectChange"),
("FlipFlop", "omni.graph.action.FlipFlop"),
],
keys.SET_VALUES: [
("OnObjectChange.inputs:onlyPlayback", False),
("OnObjectChange.inputs:path", attr_position.GetPath().pathString),
],
keys.CONNECT: ("OnObjectChange.outputs:changed", "FlipFlop.inputs:execIn"),
},
)
outa = controller.attribute("outputs:a", flip_flop_node)
# check current flipflop state, and that it isn't changing until we move the cube
await controller.evaluate(graph)
ff_a_state_1 = og.Controller.get(outa)
await controller.evaluate(graph)
self.assertEqual(og.Controller.get(outa), ff_a_state_1)
# Try changing a different attr - should not trigger
attr_rotation.Set(Gf.Vec3f(180.0, 0.0, 0.0))
await controller.evaluate(graph)
self.assertEqual(og.Controller.get(outa), ff_a_state_1)
# Now change the translate - should trigger
attr_position.Set(Gf.Vec3f(1.0, 0.0, 0.0))
await controller.evaluate(graph)
ff_a_state_2 = og.Controller.get(outa)
self.assertEqual(ff_a_state_2, 1 if not ff_a_state_1 else 0)
property_path = og.Controller.get(controller.attribute("outputs:propertyName", on_object_change_node))
self.assertEqual(property_path, attr_position.GetName())
# Look at prim itself
controller.edit(graph, {keys.SET_VALUES: ("OnObjectChange.inputs:path", cube.GetPath().pathString)})
await controller.evaluate(graph)
# Now change the rotate - should trigger
attr_rotation.Set(Gf.Vec3f(245.0, 0.0, 0.0))
await controller.evaluate(graph)
ff_a_state_1 = og.Controller.get(outa)
self.assertEqual(ff_a_state_1, 1 if not ff_a_state_2 else 0)
property_path = og.Controller.get(controller.attribute("outputs:propertyName", on_object_change_node))
self.assertEqual(property_path, attr_rotation.GetName())
# Now use a prim rel instead of the path input
inputs_prim = stage.GetPrimAtPath(on_object_change_node.get_prim_path()).GetRelationship("inputs:prim")
inputs_prim.AddTarget(cube.GetPath())
controller.edit(graph, {keys.SET_VALUES: ("OnObjectChange.inputs:path", "")})
# compute once to set up the inputs
await controller.evaluate(graph)
# change rotate - should trigger
attr_rotation.Set(Gf.Vec3f(0.0, 0.0, 0.0))
await controller.evaluate(graph)
ff_a_state_2 = og.Controller.get(outa)
self.assertEqual(ff_a_state_2, 1 if not ff_a_state_1 else 0)
property_path = og.Controller.get(controller.attribute("outputs:propertyName", on_object_change_node))
self.assertEqual(property_path, attr_rotation.GetName())
# ----------------------------------------------------------------------
async def test_onplaybacktick(self):
"""Test OnPlaybackTick and OnTick node"""
controller = og.Controller()
keys = og.Controller.Keys
app = omni.kit.app.get_app()
timeline = omni.timeline.get_timeline_interface()
timeline.set_start_time(1.0)
timeline.set_end_time(10.0)
fps = 24.0
timeline.set_time_codes_per_second(fps)
(graph, (on_p_tick_node, on_tick_node, _), _, _,) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnPTick", "omni.graph.action.OnPlaybackTick"),
("OnTick", "omni.graph.action.OnTick"),
("FlipFlop", "omni.graph.action.FlipFlop"),
],
keys.CONNECT: [
("OnPTick.outputs:tick", "FlipFlop.inputs:execIn"),
("OnTick.outputs:tick", "FlipFlop.inputs:execIn"),
],
keys.SET_VALUES: ("OnTick.inputs:onlyPlayback", True),
},
)
await controller.evaluate(graph)
tick_time = controller.attribute("outputs:time", on_p_tick_node)
tick_frame = controller.attribute("outputs:frame", on_p_tick_node)
ontick_time = controller.attribute("outputs:time", on_tick_node)
ontick_frame = controller.attribute("outputs:frame", on_tick_node)
# Check that the OnPlaybackTick node doesn't trigger when playback is not active
self.assertEqual(og.Controller.get(tick_time), 0)
self.assertEqual(og.Controller.get(tick_frame), 0)
self.assertEqual(og.Controller.get(ontick_time), 0)
self.assertEqual(og.Controller.get(ontick_frame), 0)
# Check that the OnPlaybackTick node does trigger when playback is active, and the values are correct
timeline.play()
await app.next_update_async()
await controller.evaluate(graph)
t0 = og.Controller.get(tick_time)
f0 = og.Controller.get(tick_frame)
self.assertAlmostEqual(t0, timeline.get_current_time(), places=5)
self.assertAlmostEqual(f0, t0 * fps, places=5)
t0 = og.Controller.get(ontick_time)
f0 = og.Controller.get(ontick_frame)
self.assertAlmostEqual(t0, timeline.get_current_time(), places=5)
self.assertAlmostEqual(f0, t0 * fps, places=5)
# ----------------------------------------------------------------------
async def test_non_action_subgraph(self):
"""Tests subgraphs in action graph"""
controller = og.Controller()
keys = og.Controller.Keys
# Test that a push subgraph is ticked by the top level Action graph
subgraph_path = self.TEST_GRAPH_PATH + "/push_sub"
graph = omni.graph.core.get_current_graph()
graph.create_subgraph(subgraph_path, evaluator="push")
subgraph = graph.get_subgraph(subgraph_path)
self.assertTrue(subgraph.is_valid())
sub_ff_node = self.TEST_GRAPH_PATH + "/push_sub/FlipFlop"
controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("FlipFlop", "omni.graph.action.FlipFlop"),
],
keys.CONNECT: ("OnTick.outputs:tick", "FlipFlop.inputs:execIn"),
},
)
await og.Controller.evaluate()
controller.edit(subgraph, {keys.CREATE_NODES: (sub_ff_node, "omni.graph.action.FlipFlop")})
sub_flipflop_node = subgraph.get_node(sub_ff_node)
self.assertTrue(sub_flipflop_node.is_valid())
ffstate0 = og.Controller.get(controller.attribute("outputs:isA", sub_ff_node))
await og.Controller.evaluate()
ffstate1 = og.Controller.get(controller.attribute("outputs:isA", sub_ff_node))
self.assertNotEqual(ffstate0, ffstate1)
# ----------------------------------------------------------------------
async def test_custom_events(self):
"""Test SendCustomEvent and OnCustomEvent node"""
controller = og.Controller()
keys = og.Controller.Keys
await omni.kit.app.get_app().next_update_async()
(_, (_, _, event1_node, counter1_node, event2_node, counter2_node), _, _,) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnImpulse", "omni.graph.action.OnImpulseEvent"),
("Send", "omni.graph.action.SendCustomEvent"),
("OnCustomEvent1", "omni.graph.action.OnCustomEvent"),
("Counter1", "omni.graph.action.Counter"),
("OnCustomEvent2", "omni.graph.action.OnCustomEvent"),
("Counter2", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("OnImpulse.outputs:execOut", "Send.inputs:execIn"),
("OnCustomEvent1.outputs:execOut", "Counter1.inputs:execIn"),
("OnCustomEvent2.outputs:execOut", "Counter2.inputs:execIn"),
],
keys.SET_VALUES: [
("OnCustomEvent1.inputs:onlyPlayback", False),
("OnCustomEvent2.inputs:onlyPlayback", False),
("OnImpulse.inputs:onlyPlayback", False),
("Send.inputs:eventName", "foo"),
("Send.inputs:path", "Test Path"),
("OnCustomEvent1.inputs:eventName", "foo"),
("OnCustomEvent2.inputs:eventName", "foo"),
],
},
)
counter1_controller = og.Controller(og.Controller.attribute("outputs:count", counter1_node))
counter2_controller = og.Controller(og.Controller.attribute("outputs:count", counter2_node))
event1_controller = og.Controller(og.Controller.attribute("outputs:path", event1_node))
event2_controller = og.Controller(og.Controller.attribute("outputs:path", event2_node))
await omni.kit.app.get_app().next_update_async()
self.assertEqual(counter1_controller.get(), 0)
# trigger graph once, this will queue up the event for the next evaluation
controller.edit(self.TEST_GRAPH_PATH, {keys.SET_VALUES: ("OnImpulse.state:enableImpulse", True)})
# Note that if this is a push subscription, the receivers will run this frame instead of next
await omni.kit.app.get_app().next_update_async()
self.assertEqual(counter1_controller.get(), 0)
# This evaluation should trigger the receivers
await omni.kit.app.get_app().next_update_async()
# Verify that events were received
self.assertEqual(counter1_controller.get(), 1)
self.assertEqual(event1_controller.get(), "Test Path")
self.assertEqual(counter2_controller.get(), 1)
self.assertEqual(event2_controller.get(), "Test Path")
# Verify the contents of the associated bundle
# FIXME: Authored bundle is always empty?
# bundle_contents = og.BundleContents(graph.get_default_graph_context(), event1_node, "outputs:bundle", True)
# self.assertEqual(1, bundle_contents.size)
# Modify the event name one receiver and sender and ensure it still works
controller.edit(
self.TEST_GRAPH_PATH,
{
keys.SET_VALUES: [("Send.inputs:eventName", "bar"), ("OnImpulse.state:enableImpulse", True)],
},
)
await omni.kit.app.get_app().next_update_async()
await omni.kit.app.get_app().next_update_async()
# We changed the sender event name, so counter should _not_ have triggered again
self.assertEqual(counter1_controller.get(), 1)
# Change the receiver name to match
controller.edit(self.TEST_GRAPH_PATH, {keys.SET_VALUES: ("OnCustomEvent1.inputs:eventName", "bar")})
await omni.kit.app.get_app().next_update_async()
# trigger send again and verify we get it (1 frame lag for pop)
controller.edit(self.TEST_GRAPH_PATH, {keys.SET_VALUES: ("OnImpulse.state:enableImpulse", True)})
await omni.kit.app.get_app().next_update_async()
await omni.kit.app.get_app().next_update_async()
self.assertEqual(counter1_controller.get(), 2)
# ----------------------------------------------------------------------
async def test_request_driven_node(self):
"""Test that RequestDriven nodes are computed as expected"""
controller = og.Controller()
keys = og.Controller.Keys
(graph, (_, counter_node), _, _,) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnImpulse", "omni.graph.action.OnImpulseEvent"),
("Counter", "omni.graph.action.Counter"),
],
keys.SET_VALUES: [("OnImpulse.inputs:onlyPlayback", False)],
keys.CONNECT: ("OnImpulse.outputs:execOut", "Counter.inputs:execIn"),
},
)
# After several updates, there should have been no compute calls
await controller.evaluate(graph)
await controller.evaluate(graph)
await controller.evaluate(graph)
counter_controller = og.Controller(og.Controller.attribute("outputs:count", counter_node))
self.assertEqual(counter_controller.get(), 0)
# change OnImpulse state attrib. The node should now request compute
controller.edit(self.TEST_GRAPH_PATH, {keys.SET_VALUES: ("OnImpulse.state:enableImpulse", True)})
await controller.evaluate(graph)
self.assertEqual(counter_controller.get(), 1)
# more updates should not result in more computes
await controller.evaluate(graph)
await controller.evaluate(graph)
await controller.evaluate(graph)
self.assertEqual(counter_controller.get(), 1)
# ----------------------------------------------------------------------
async def test_stage_events(self):
"""Test OnStageEvent"""
controller = og.Controller()
keys = og.Controller.Keys
(_, (_, _, _, counter_node, counter2_node, counter3_node), _, _,) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnStageEvent", "omni.graph.action.OnStageEvent"),
("OnStageEvent2", "omni.graph.action.OnStageEvent"),
("OnStageEvent3", "omni.graph.action.OnStageEvent"),
("Counter", "omni.graph.action.Counter"),
("Counter2", "omni.graph.action.Counter"),
("Counter3", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("OnStageEvent.outputs:execOut", "Counter.inputs:execIn"),
("OnStageEvent2.outputs:execOut", "Counter2.inputs:execIn"),
("OnStageEvent3.outputs:execOut", "Counter3.inputs:execIn"),
],
keys.SET_VALUES: [
("OnStageEvent.inputs:eventName", "Selection Changed"),
("OnStageEvent.inputs:onlyPlayback", False),
("OnStageEvent2.inputs:eventName", "Animation Stop Play"),
("OnStageEvent2.inputs:onlyPlayback", True),
("OnStageEvent3.inputs:eventName", "Animation Start Play"),
("OnStageEvent3.inputs:onlyPlayback", True),
],
},
)
await omni.kit.app.get_app().next_update_async()
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", counter_node)), 0)
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", counter2_node)), 0)
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", counter3_node)), 0)
selection = omni.usd.get_context().get_selection()
selection.set_selected_prim_paths([self.TEST_GRAPH_PATH + "/OnStageEvent"], False)
# 1 frame delay on the pop, 1 frame delay on the compute
await omni.kit.app.get_app().next_update_async()
await omni.kit.app.get_app().next_update_async()
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", counter_node)), 1)
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", counter2_node)), 0)
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", counter3_node)), 0)
# Verify that start/stop events work when only-plackback is true
timeline = omni.timeline.get_timeline_interface()
timeline.set_start_time(1.0)
timeline.set_end_time(10.0)
timeline.play()
await omni.kit.app.get_app().next_update_async()
await omni.kit.app.get_app().next_update_async()
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", counter2_node)), 0)
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", counter3_node)), 1)
await omni.kit.app.get_app().next_update_async()
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", counter2_node)), 0)
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", counter3_node)), 1)
timeline.stop()
await omni.kit.app.get_app().next_update_async()
await omni.kit.app.get_app().next_update_async()
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", counter2_node)), 1)
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", counter3_node)), 1)
await controller.evaluate()
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", counter2_node)), 1)
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", counter3_node)), 1)
# -----------------------------------------------------------------------
async def test_add_prim_relationship(self):
"""Test AddPrimRelationship"""
controller = og.Controller()
# Check that we can add relationship to a prim and get that relationship
#
# +---------+ +---------------------+
# | ONTICK +-->| AddPrimRelationship +
# +---------+ +---------------------+
#
usd_context = omni.usd.get_context()
stage = usd_context.get_stage()
controller = og.Controller()
keys = og.Controller.Keys
controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("AddRel", "omni.graph.action.AddPrimRelationship"),
],
keys.CREATE_PRIMS: [
("/Test", {}),
("/Target", {}),
],
keys.CONNECT: [("OnTick.outputs:tick", "AddRel.inputs:execIn")],
keys.SET_VALUES: [
("OnTick.inputs:onlyPlayback", False),
("AddRel.inputs:path", "/Test"),
("AddRel.inputs:name", "rel"),
("AddRel.inputs:target", "/Target"),
],
},
)
prim = stage.GetPrimAtPath("/Test")
await controller.evaluate()
rel = prim.GetRelationship("rel")
targets = rel.GetTargets()
self.assertEqual(len(targets), 1)
self.assertTrue(str(targets[0]) == "/Target")
# ----------------------------------------------------------------------
async def test_switch(self):
"""Test the Switch nodes"""
controller = og.Controller()
keys = og.Controller.Keys
# Test that an execution connection is correctly triggering the downstream node once per evaluation
(graph, (_, switch_node), _, _,) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("Switch", "omni.graph.action.SwitchToken"),
],
keys.SET_VALUES: [
("OnTick.inputs:onlyPlayback", False),
],
keys.CONNECT: [
("OnTick.outputs:tick", "Switch.inputs:execIn"),
],
},
)
def add_input(index: int):
controller.create_attribute(
switch_node,
f"inputs:branch{index:02}",
"token",
og.AttributePortType.ATTRIBUTE_PORT_TYPE_INPUT,
)
controller.create_attribute(
switch_node,
f"outputs:output{index:02}",
"execution",
og.AttributePortType.ATTRIBUTE_PORT_TYPE_OUTPUT,
)
og.Controller.set(controller.attribute(f"inputs:branch{index:02}", switch_node), f"{index:02}")
for index in range(5):
add_input(index)
for index in range(5):
og.Controller.set(controller.attribute("inputs:value", switch_node), f"{index:02}")
await controller.evaluate(graph)
for index2 in range(5):
expected_val = (
og.ExecutionAttributeState.DISABLED if index2 != index else og.ExecutionAttributeState.ENABLED
)
self.assertEqual(
og.Controller.get(controller.attribute(f"outputs:output{index2:02}", switch_node)), expected_val
)
| 25,795 | Python | 45.987249 | 120 | 0.584028 |
omniverse-code/kit/exts/omni.graph.action/omni/graph/action/tests/test_action_graph_compounds.py | """Action Graph with Compounds"""
import carb.events
import omni.graph.core as og
import omni.graph.core._unstable as ogu
import omni.graph.core.tests as ogts
import omni.kit.app
# ======================================================================
class TestActionGraphCompounds(ogts.OmniGraphTestCase):
"""Tests action graph evaluator functionality"""
TEST_GRAPH_PATH = "/World/TestGraph"
async def setUp(self):
"""Set up test environment, to be torn down when done"""
await super().setUp()
og.Controller.edit({"graph_path": self.TEST_GRAPH_PATH, "evaluator_name": "execution"})
# ------------------------------------------------------------------------
async def test_create_action_subgraph_with_command(self):
"""Validates the create compound subgraph command in an action graph"""
# "Add" will only compute if the subgraph is executed as a push graph OR
# is flattened into the parent graph
# ┌───────────────┐
# │Subgraph │
# ┌─────┐ │ ┌─────┐ │
# │Const├───►────► Add ├───►├────┐
# └─────┘ │ └──▲──┘ │ │
# │ │ │ │
# │ ┌─────┴┐ │ │
# │ │Const2│ │ │
# │ └──────┘ │ │
# │ │ │
# └───────────────┘ │
# ┌───────┐ ┌▼────────────┐
# │OnTick ├──────────────────────►WriteVariable│
# └───────┘ └─────────────┘
controller = og.Controller()
keys = controller.Keys
(graph, (_, _, const_2, add, _), _, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("Const", "omni.graph.nodes.ConstantInt"),
("Const2", "omni.graph.nodes.ConstantInt"),
("Add", "omni.graph.nodes.Add"),
("WriteVariable", "omni.graph.core.WriteVariable"),
],
keys.CREATE_VARIABLES: [
("int_var", og.Type(og.BaseDataType.INT), 0),
],
keys.CONNECT: [
("Const.inputs:value", "Add.inputs:a"),
("Const2.inputs:value", "Add.inputs:b"),
("Add.outputs:sum", "WriteVariable.inputs:value"),
("OnTick.outputs:tick", "WriteVariable.inputs:execIn"),
],
keys.SET_VALUES: [
("OnTick.inputs:onlyPlayback", False),
("WriteVariable.inputs:variableName", "int_var"),
],
},
)
await og.Controller.evaluate(graph)
self.assertEquals(graph.get_variables()[0].get(graph.get_default_graph_context()), 0)
controller.edit(
self.TEST_GRAPH_PATH, {keys.SET_VALUES: [("Const.inputs:value", 22), ("Const2.inputs:value", 20)]}
)
ogu.cmds.ReplaceWithCompoundSubgraph(
nodes=[add.get_prim_path(), const_2.get_prim_path()], compound_name="Subgraph"
)
await og.Controller.evaluate(graph)
self.assertEquals(graph.get_variables()[0].get(graph.get_default_graph_context()), 42.0)
# ------------------------------------------------------------------------
async def test_event_node_in_subgraph(self):
"""
Test that a compute-on-request node inside a subgraph works
"""
controller = og.Controller(update_usd=True)
keys = controller.Keys
(graph, (compound_node,), _, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
(
"CompoundOuter",
{
keys.CREATE_NODES: [
(
"CompoundInner",
{
keys.CREATE_NODES: [
("OnEvent", "omni.graph.action.OnMessageBusEvent"),
("Counter", "omni.graph.action.Counter"),
],
keys.CONNECT: [("OnEvent.outputs:execOut", "Counter.inputs:execIn")],
keys.SET_VALUES: [
("OnEvent.inputs:onlyPlayback", False),
("OnEvent.inputs:eventName", "testEvent"),
],
},
)
]
},
),
],
},
)
# One compute for the first-time subscribe.
await omni.kit.app.get_app().next_update_async()
msg = carb.events.type_from_string("testEvent")
counter_attr = og.Controller.attribute(
f"{compound_node.get_prim_path()}/Subgraph/CompoundInner/Subgraph/Counter.outputs:count"
)
await og.Controller.evaluate(graph)
self.assertEqual(counter_attr.get(), 0)
omni.kit.app.get_app().get_message_bus_event_stream().push(msg)
# Wait for one kit update to allow the event-push mechanism to trigger the node callback.
await omni.kit.app.get_app().next_update_async()
self.assertEqual(counter_attr.get(), 1)
await omni.kit.app.get_app().next_update_async()
self.assertEqual(counter_attr.get(), 1)
omni.kit.app.get_app().get_message_bus_event_stream().push(msg)
await omni.kit.app.get_app().next_update_async()
self.assertEqual(counter_attr.get(), 2)
| 5,943 | Python | 43.358209 | 110 | 0.434629 |
omniverse-code/kit/exts/omni.graph.action/omni/graph/action/tests/test_action_graph_evaluation_02.py | """Action Graph Evaluation Tests, Part 2"""
from enum import Enum, auto
import omni.graph.core as og
import omni.graph.core.tests as ogts
import omni.kit.test
import omni.usd
from omni.graph.action import get_interface
from pxr import Gf
# ======================================================================
class TestActionGraphEvaluation(ogts.OmniGraphTestCase):
"""Tests action graph evaluator functionality"""
TEST_GRAPH_PATH = "/World/TestGraph"
async def setUp(self):
"""Set up test environment, to be torn down when done"""
await super().setUp()
og.Controller.edit({"graph_path": self.TEST_GRAPH_PATH, "evaluator_name": "execution"})
# ----------------------------------------------------------------------
async def test_latent_fan_out(self):
"""Test latent nodes when part of parallel evaluation"""
# +------------+
# +---->|TickCounterA|
# | +------------+
# |
# +--------++ +----------+
# +-> TickA +--->|FinishedA |
# | +---------+ +----------+
# +---------+ +-----------+ |
# |OnImpulse+-->|TickCounter+-+
# +---------+ +-----------+ |
# | +---------+ +----------+
# +>| TickB +--->|FinishedB |
# +--------++ +----------+
# |
# | +------------+
# +---->|TickCounterB|
# +------------+
controller = og.Controller()
keys = og.Controller.Keys
(graph, nodes, _, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnImpulse", "omni.graph.action.OnImpulseEvent"),
("TickA", "omni.graph.action.Countdown"),
("TickB", "omni.graph.action.Countdown"),
("TickCounter", "omni.graph.action.Counter"),
("TickCounterA", "omni.graph.action.Counter"),
("TickCounterB", "omni.graph.action.Counter"),
("FinishCounterA", "omni.graph.action.Counter"),
("FinishCounterB", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("OnImpulse.outputs:execOut", "TickCounter.inputs:execIn"),
("TickCounter.outputs:execOut", "TickA.inputs:execIn"),
("TickCounter.outputs:execOut", "TickB.inputs:execIn"),
("TickA.outputs:tick", "TickCounterA.inputs:execIn"),
("TickB.outputs:tick", "TickCounterB.inputs:execIn"),
("TickA.outputs:finished", "FinishCounterA.inputs:execIn"),
("TickB.outputs:finished", "FinishCounterB.inputs:execIn"),
],
keys.SET_VALUES: [
("TickA.inputs:duration", 2),
("TickB.inputs:duration", 2),
("OnImpulse.inputs:onlyPlayback", False),
("OnImpulse.state:enableImpulse", True),
],
},
)
(_, _, _, tick_counter, tick_counter_a, tick_counter_b, finish_counter_a, finish_counter_b) = nodes
def check_counts(t_def, t_a, t_b, f_a, f_b):
for node, expected in (
(tick_counter, t_def),
(tick_counter_a, t_a),
(tick_counter_b, t_b),
(finish_counter_a, f_a),
(finish_counter_b, f_b),
):
count = og.Controller.get(controller.attribute("outputs:count", node))
self.assertEqual(count, expected, node.get_prim_path())
await controller.evaluate(graph)
check_counts(1, 0, 0, 0, 0)
await controller.evaluate(graph)
check_counts(1, 1, 1, 0, 0)
await controller.evaluate(graph)
check_counts(1, 2, 2, 0, 0)
await controller.evaluate(graph)
check_counts(1, 2, 2, 1, 1)
# ----------------------------------------------------------------------
async def test_loop_cancel(self):
"""Test loop canceling"""
# Check that a loop can be canceled.
# We set up the loop to run for 7 iterations, but cancel when we hit iteration 2.
#
# +--------+
# +------------->|COUNTER2|
# | finish +--------+
# +---------+ +----------+ +----------+ +-------+ +---------+
# | IMPULSE +-->| FOR-LOOP +--->| COMPARE +--->|BRANCH +--->| COUNTER1|
# +---------+ +----------+ +----------+ +-------+ +---------+
# ^ |
# | cancel |
# +------------------------------+
controller = og.Controller()
keys = og.Controller.Keys
(graph, (_, for_loop, _, _, count_1, count_2), _, _,) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnImpulse", "omni.graph.action.OnImpulseEvent"),
("ForLoop", "omni.graph.action.ForLoop"),
("Compare", "omni.graph.nodes.Compare"),
("Branch", "omni.graph.action.Branch"),
("Counter1", "omni.graph.action.Counter"),
("Counter2", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("OnImpulse.outputs:execOut", "ForLoop.inputs:execIn"),
("ForLoop.outputs:loopBody", "Branch.inputs:execIn"),
("ForLoop.outputs:index", "Compare.inputs:a"),
("ForLoop.outputs:finished", "Counter2.inputs:execIn"),
("Compare.outputs:result", "Branch.inputs:condition"),
("Branch.outputs:execFalse", "Counter1.inputs:execIn"),
("Branch.outputs:execTrue", "ForLoop.inputs:breakLoop"),
],
keys.SET_VALUES: [
("OnImpulse.inputs:onlyPlayback", False),
("Compare.inputs:b", 2, "int"),
("Compare.inputs:operation", "=="),
("ForLoop.inputs:stop", 6),
],
},
)
await controller.evaluate(graph)
# Trigger graph once.
controller.edit(self.TEST_GRAPH_PATH, {keys.SET_VALUES: ("OnImpulse.state:enableImpulse", True)})
await controller.evaluate(graph)
# Verify the loop body only counted 2 times, and finish once.
c_1 = og.Controller.get(og.Controller.attribute("outputs:count", count_1))
c_2 = og.Controller.get(og.Controller.attribute("outputs:count", count_2))
index = og.Controller.get(og.Controller.attribute("outputs:index", for_loop))
self.assertEqual(index, 2)
self.assertEqual(c_1, 2)
self.assertEqual(c_2, 1)
# ----------------------------------------------------------------------
async def test_loop_sideffects(self):
"""Test that nodes in loops can read results of external side effects"""
# +----------+ +---------+ +-------+ +--------------------+
# | OnImpulse+----->| ForLoop +----->| Add +------>| WritePrimAttribute |
# +----------+ +---------+ +-------+ +--------------------+
# ^
# +-----------------+ |
# |ReadPrimAttribute +---+
# +-----------------+
controller = og.Controller()
keys = og.Controller.Keys
(graph, _, prims, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnImpulse", "omni.graph.action.OnImpulseEvent"),
("Get", "omni.graph.nodes.ReadPrimAttribute"),
("ForLoop1", "omni.graph.action.ForLoop"),
("Add", "omni.graph.nodes.Add"),
("Set", "omni.graph.nodes.WritePrimAttribute"),
],
keys.CREATE_PRIMS: ("/World/Accumulator", {"acc": ("int", 0)}),
keys.SET_VALUES: [
("Get.inputs:name", "acc"),
("Get.inputs:usePath", True),
("Get.inputs:primPath", "/World/Accumulator"),
("ForLoop1.inputs:start", 0),
("ForLoop1.inputs:stop", 5),
("Set.inputs:name", "acc"),
("Set.inputs:usePath", True),
("Set.inputs:primPath", "/World/Accumulator"),
("OnImpulse.inputs:onlyPlayback", False),
],
keys.CONNECT: [
("OnImpulse.outputs:execOut", "ForLoop1.inputs:execIn"),
("ForLoop1.outputs:loopBody", "Set.inputs:execIn"),
("Get.outputs:value", "Add.inputs:a"),
("ForLoop1.outputs:value", "Add.inputs:b"),
("Add.outputs:sum", "Set.inputs:value"),
],
},
)
await controller.evaluate(graph)
# Trigger graph evaluation once.
# Should result in sum = 0 + 0 + 1 + 2 + 3 + 4.
controller.edit(self.TEST_GRAPH_PATH, {keys.SET_VALUES: [("OnImpulse.state:enableImpulse", True)]})
await controller.evaluate(graph)
self.assertEqual(prims[0].GetAttribute("acc").Get(), 10)
# ----------------------------------------------------------------------
async def test_nested_forloop(self):
"""Test nested ForLoop nodes"""
keys = og.Controller.Keys
controller = og.Controller()
(graph, (_, _, _, _, _, _, write_node), _, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("Const", "omni.graph.nodes.ConstantInt2"),
("Add", "omni.graph.nodes.Add"),
("Branch", "omni.graph.action.Branch"),
("For", "omni.graph.action.ForLoop"),
("For2", "omni.graph.action.ForLoop"),
("Write1", "omni.graph.nodes.WritePrimAttribute"),
],
keys.CREATE_PRIMS: (
"/World/TestPrim",
{
"val1": ("int[2]", Gf.Vec2i(1, 1)),
},
),
keys.SET_VALUES: [
("OnTick.inputs:onlyPlayback", False),
("Const.inputs:value", [1, 2]),
("Write1.inputs:name", "val1"),
("Write1.inputs:primPath", "/World/TestPrim"),
("Write1.inputs:usePath", True),
("For.inputs:stop", 3),
("For2.inputs:start", 4),
("For2.inputs:step", 2),
("For2.inputs:stop", 10),
("Branch.inputs:condition", True),
],
keys.CONNECT: [
("OnTick.outputs:tick", "For.inputs:execIn"),
("For.outputs:loopBody", "Branch.inputs:execIn"),
("Branch.outputs:execTrue", "For2.inputs:execIn"),
("For2.outputs:loopBody", "Write1.inputs:execIn"),
("For2.outputs:value", "Add.inputs:a"),
("Const.inputs:value", "Add.inputs:b"),
("Add.outputs:sum", "Write1.inputs:value"),
],
},
)
await controller.evaluate(graph)
context = omni.usd.get_context()
stage = context.get_stage()
# For2 should loop over the range [4, 6, 8, 10).
self.assertEqual(9, write_node.get_compute_count())
# [1, 2] + 8 = [9, 10].
self.assertListEqual([9, 10], list(stage.GetAttributeAtPath("/World/TestPrim.val1").Get()))
# ----------------------------------------------------------------------
async def test_om_63924(self):
"""Test OM-63924 bug is fixed"""
# The problem here was that if there was fan in to a node which was
# computed once and then totally unwound before the other history was
# processed, there would never be a deferred activation and so the 2nd
# compute would never happen. Instead we want to only unwind one history
# at a time to ensure each one is fully evaluated.
i = 2
class OnForEachEventPy:
"""Helper Python node that implements ForEach logic"""
@staticmethod
def compute(context: og.GraphContext, node: og.Node):
"""Compute method"""
nonlocal i
inputs_go = node.get_attribute("inputs:go")
go_val = og.Controller.get(inputs_go)
if not go_val:
return True
if i > 0:
og.Controller.set(
node.get_attribute("outputs:execOut"), og.ExecutionAttributeState.ENABLED_AND_PUSH
)
og.Controller.set(node.get_attribute("outputs:syncValue"), i)
i -= 1
return True
@staticmethod
def get_node_type() -> str:
"""Get node type"""
return "omni.graph.test.OnForEachEventPy"
@staticmethod
def initialize_type(node_type: og.NodeType):
"""Initialize node attributes"""
node_type.add_input(
"inputs:go",
"bool",
False,
)
node_type.add_output("outputs:execOut", "execution", True)
node_type.add_output("outputs:syncValue", "uint64", True)
return True
og.register_node_type(OnForEachEventPy, 1)
class NoOpPy:
"""Helper Python node that performs no internal operation"""
@staticmethod
def compute(context: og.GraphContext, node: og.Node):
"""Compute method"""
og.Controller.set(node.get_attribute("outputs:execOut"), og.ExecutionAttributeState.ENABLED)
return True
@staticmethod
def get_node_type() -> str:
"""Get node type"""
return "omni.graph.test.NoOpPy"
@staticmethod
def initialize_type(node_type: og.NodeType):
"""Initialize node attributes"""
node_type.add_input(
"inputs:execIn",
"execution",
True,
)
node_type.add_output("outputs:execOut", "execution", True)
return True
og.register_node_type(NoOpPy, 1)
controller = og.Controller()
keys = og.Controller.Keys
(graph, (for_each, _, _, _, _, no_op_2), _, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("PostProcessDispatcher", "omni.graph.test.OnForEachEventPy"),
("TSA1", "omni.graph.action.SyncGate"),
("TSA0", "omni.graph.action.SyncGate"),
("TestSyncAccum", "omni.graph.action.SyncGate"),
("TestPrimBbox", "omni.graph.test.NoOpPy"),
("NoOpPy2", "omni.graph.test.NoOpPy"),
],
keys.CONNECT: [
("PostProcessDispatcher.outputs:execOut", "TSA0.inputs:execIn"),
("PostProcessDispatcher.outputs:execOut", "TSA1.inputs:execIn"),
("TSA1.outputs:execOut", "TestSyncAccum.inputs:execIn"),
("TSA0.outputs:execOut", "TestPrimBbox.inputs:execIn"),
("TestPrimBbox.outputs:execOut", "TestSyncAccum.inputs:execIn"),
("TestSyncAccum.outputs:execOut", "NoOpPy2.inputs:execIn"),
("PostProcessDispatcher.outputs:syncValue", "TSA1.inputs:syncValue"),
("PostProcessDispatcher.outputs:syncValue", "TSA0.inputs:syncValue"),
("PostProcessDispatcher.outputs:syncValue", "TestSyncAccum.inputs:syncValue"),
],
},
)
og.Controller.set(controller.attribute("inputs:go", for_each), True)
await controller.evaluate(graph)
# Verify the final sync gate triggered due to being computed 2x.
exec_out = og.Controller.get(controller.attribute("outputs:execOut", no_op_2))
self.assertEqual(exec_out, og.ExecutionAttributeState.ENABLED)
# ----------------------------------------------------------------------
async def test_retrigger_latent(self):
"""Test that latent nodes can be re-triggered"""
want_debug = False
e_state = og.ExecutionAttributeState
tick_count = 0
boop_count = 0
exec_in_latent_count = 0
max_ticks = 20
class CancelTickerPy:
"""Helper node type which does latent ticking and can be canceled, and has an independent counter "boop" """
@staticmethod
def compute(context: og.GraphContext, node: og.Node):
"""Compute method"""
nonlocal tick_count
nonlocal boop_count
nonlocal exec_in_latent_count
exec_in = node.get_attribute("inputs:execIn")
exec_in_val = og.Controller.get(exec_in)
cancel = node.get_attribute("inputs:cancel")
cancel_val = og.Controller.get(cancel)
boop = node.get_attribute("inputs:boop")
boop_val = og.Controller.get(boop)
if want_debug:
print(f"### {tick_count} execIn={exec_in_val} cancel={cancel_val} boop={boop_val}")
if cancel_val == e_state.ENABLED:
# Finish latent by cancel.
og.Controller.set(node.get_attribute("outputs:canceled"), e_state.LATENT_FINISH)
self.assertEqual(exec_in_val, e_state.DISABLED)
self.assertEqual(boop_val, e_state.DISABLED)
tick_count = 0
return True
if exec_in_val == e_state.ENABLED:
self.assertEqual(cancel_val, e_state.DISABLED)
self.assertEqual(boop_val, e_state.DISABLED)
if tick_count > 0:
# execIn triggered while in latent - should not be possible.
exec_in_latent_count += 1
else:
og.Controller.set(node.get_attribute("outputs:tick"), e_state.LATENT_PUSH)
return True
# We are ticking.
self.assertEqual(cancel_val, e_state.DISABLED)
tick_count += 1
if tick_count < max_ticks:
og.Controller.set(node.get_attribute("outputs:tick"), e_state.ENABLED)
else:
# Finish latent naturally.
og.Controller.set(node.get_attribute("outputs:execOut"), e_state.LATENT_FINISH)
tick_count = 0
if boop_val == e_state.ENABLED:
# We get here during latent ticking, if the boop input is enabled.
self.assertEqual(exec_in_val, e_state.DISABLED)
self.assertEqual(cancel_val, e_state.DISABLED)
boop_count += 1
return True
@staticmethod
def get_node_type() -> str:
"""Get node type"""
return "omni.graph.test.CancelTickerPy"
@staticmethod
def initialize_type(node_type: og.NodeType):
"""Initialize node attributes"""
node_type.add_input(
"inputs:execIn",
"execution",
True,
)
node_type.add_input(
"inputs:cancel",
"execution",
True,
)
node_type.add_input(
"inputs:boop",
"execution",
True,
)
node_type.add_output("outputs:tick", "execution", True)
node_type.add_output("outputs:canceled", "execution", True)
node_type.add_output("outputs:execOut", "execution", True)
return True
og.register_node_type(CancelTickerPy, 1)
controller = og.Controller()
keys = og.Controller.Keys
(graph, (ticker, start, _, cancel, boop, _, _, counter), _, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("Ticker", "omni.graph.test.CancelTickerPy"),
("Start", "omni.graph.action.OnImpulseEvent"),
("Start2", "omni.graph.action.OnImpulseEvent"),
("Cancel", "omni.graph.action.OnImpulseEvent"),
("Boop", "omni.graph.action.OnImpulseEvent"),
("Once", "omni.graph.action.Once"),
("Once2", "omni.graph.action.Once"),
("Counter", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("Start.outputs:execOut", "Ticker.inputs:execIn"),
("Start2.outputs:execOut", "Ticker.inputs:execIn"),
("Cancel.outputs:execOut", "Once.inputs:execIn"),
("Once.outputs:once", "Ticker.inputs:cancel"),
("Once.outputs:after", "Ticker.inputs:cancel"),
("Cancel.outputs:execOut", "Once2.inputs:execIn"),
("Boop.outputs:execOut", "Ticker.inputs:boop"),
("Once2.outputs:once", "Ticker.inputs:cancel"),
("Once2.outputs:after", "Ticker.inputs:cancel"),
("Ticker.outputs:tick", "Counter.inputs:execIn"),
],
keys.SET_VALUES: [
("Start.inputs:onlyPlayback", False),
("Start2.inputs:onlyPlayback", False),
("Cancel.inputs:onlyPlayback", False),
("Boop.inputs:onlyPlayback", False),
],
},
)
# Cancel, check nothing happens.
og.Controller.set(controller.attribute("state:enableImpulse", cancel), True)
await controller.evaluate(graph)
exec_out = og.Controller.get(controller.attribute("outputs:tick", ticker))
self.assertEqual(exec_out, e_state.DISABLED)
# Start ticking.
og.Controller.set(controller.attribute("state:enableImpulse", start), True)
await controller.evaluate(graph) # Starts latent state.
await controller.evaluate(graph) # Tick 1
self.assertEqual(tick_count, 1)
# Verify the tick has started.
exec_out = og.Controller.get(controller.attribute("outputs:tick", ticker))
self.assertEqual(exec_out, e_state.ENABLED)
await controller.evaluate(graph) # Tick 2
self.assertEqual(tick_count, 2)
exec_out = og.Controller.get(controller.attribute("outputs:tick", ticker))
self.assertEqual(exec_out, e_state.ENABLED)
await controller.evaluate(graph) # Tick 3
self.assertEqual(tick_count, 3)
# Boop - node keeps ticking.
og.Controller.set(controller.attribute("state:enableImpulse", boop), True)
# Boop will trigger a compute, which increments boop + ticks AND the normal latent tick.
await controller.evaluate(graph)
self.assertEqual(boop_count, 1)
self.assertEqual(tick_count, 5)
# Now check that the next tick can run WITHOUT inputs:boop being high.
await controller.evaluate(graph)
self.assertEqual(boop_count, 1) # No change in boop count (OM-64856).
self.assertEqual(tick_count, 6)
# Now check that we can't re-trigger execIn.
self.assertEqual(exec_in_latent_count, 0)
og.Controller.set(controller.attribute("state:enableImpulse", start), True)
# Start will not trigger any compute because the node is latent.
await controller.evaluate(graph)
self.assertEqual(exec_in_latent_count, 0)
self.assertEqual(boop_count, 1)
self.assertEqual(tick_count, 7)
# Unset the impulse.
og.Controller.set(controller.attribute("state:enableImpulse", start), False)
# Now check the normal tick proceeds as normal.
await controller.evaluate(graph)
self.assertEqual(boop_count, 1)
self.assertEqual(tick_count, 8)
# Cancel.
counter_attr = controller.attribute("outputs:count", counter)
count_0 = og.Controller.get(counter_attr)
og.Controller.set(controller.attribute("state:enableImpulse", cancel), True)
await controller.evaluate(graph) # Latent finish.
await controller.evaluate(graph) # No action.
await controller.evaluate(graph) # No action.
count_1 = og.Controller.get(counter_attr)
self.assertEqual(count_0 + 1, count_1)
# ----------------------------------------------------------------------
async def test_simple_expression(self):
"""Test ActionGraph simple expression of add nodes"""
keys = og.Controller.Keys
controller = og.Controller()
(graph, _, _, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("Const", "omni.graph.nodes.ConstantDouble3"),
("Add", "omni.graph.nodes.Add"),
("Add2", "omni.graph.nodes.Add"),
("Add3", "omni.graph.nodes.Add"),
("PostAdd", "omni.graph.nodes.Add"),
("Write1", "omni.graph.nodes.WritePrimAttribute"),
("Write2", "omni.graph.nodes.WritePrimAttribute"),
("Write3", "omni.graph.nodes.WritePrimAttribute"),
],
keys.CREATE_PRIMS: (
"/World/TestPrim",
{
"val1": ("double[3]", Gf.Vec3d(1, 1, 1)),
"val2": ("double[3]", Gf.Vec3d(2, 2, 2)),
"val3": ("double[3]", Gf.Vec3d(2, 2, 2)),
},
),
keys.SET_VALUES: [
("OnTick.inputs:onlyPlayback", False),
("Const.inputs:value", [1, 2, 3]),
("Write1.inputs:name", "val1"),
("Write1.inputs:primPath", "/World/TestPrim"),
("Write1.inputs:usePath", True),
("Write2.inputs:name", "val2"),
("Write2.inputs:primPath", "/World/TestPrim"),
("Write2.inputs:usePath", True),
("Write3.inputs:name", "val3"),
("Write3.inputs:primPath", "/World/TestPrim"),
("Write3.inputs:usePath", True),
],
keys.CONNECT: [
("OnTick.outputs:tick", "Write1.inputs:execIn"),
("Write1.outputs:execOut", "Write2.inputs:execIn"),
("Write2.outputs:execOut", "Write3.inputs:execIn"),
("Const.inputs:value", "Add.inputs:a"),
("Const.inputs:value", "Add.inputs:b"),
("Const.inputs:value", "Add2.inputs:a"),
("Const.inputs:value", "Add2.inputs:b"),
("Add.outputs:sum", "Add3.inputs:a"),
("Add2.outputs:sum", "Add3.inputs:b"),
("Add3.outputs:sum", "Write1.inputs:value"),
("Add3.outputs:sum", "Write2.inputs:value"),
("Add3.outputs:sum", "PostAdd.inputs:a"),
("Add3.outputs:sum", "PostAdd.inputs:b"),
("PostAdd.outputs:sum", "Write3.inputs:value"),
],
},
)
await controller.evaluate(graph)
context = omni.usd.get_context()
stage = context.get_stage()
await omni.kit.app.get_app().next_update_async()
self.assertListEqual([4, 8, 12], list(stage.GetAttributeAtPath("/World/TestPrim.val1").Get()))
self.assertListEqual([4, 8, 12], list(stage.GetAttributeAtPath("/World/TestPrim.val2").Get()))
self.assertListEqual([8, 16, 24], list(stage.GetAttributeAtPath("/World/TestPrim.val3").Get()))
# ----------------------------------------------------------------------
async def test_stateful_flowcontrol_evaluation(self):
"""Test that stateful flow control nodes are fully evaluated"""
# b
# +----------+ +---------+
# +--->| Sequence +-->|Counter1 |
# | +----------+ +---------+
# +-----------+ |
# | OnImpulse +-+
# +-----------+ |
# | +----------+ +----------+
# +--->| ForLoop1 +-->| Counter2 |
# +----------+ +----------+
# finished
controller = og.Controller()
keys = og.Controller.Keys
(graph, (_, _, counter1_node, _, counter2_node), _, _,) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnImpulse", "omni.graph.action.OnImpulseEvent"),
("Sequence", "omni.graph.action.Sequence"),
("Counter1", "omni.graph.action.Counter"),
("ForLoop1", "omni.graph.action.ForLoop"),
("Counter2", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("OnImpulse.outputs:execOut", "Sequence.inputs:execIn"),
("Sequence.outputs:b", "Counter1.inputs:execIn"),
("OnImpulse.outputs:execOut", "ForLoop1.inputs:execIn"),
("ForLoop1.outputs:finished", "Counter2.inputs:execIn"),
],
keys.SET_VALUES: [("OnImpulse.inputs:onlyPlayback", False), ("ForLoop1.inputs:stop", 10)],
},
)
await controller.evaluate(graph)
# Trigger graph once.
controller.edit(self.TEST_GRAPH_PATH, {keys.SET_VALUES: ("OnImpulse.state:enableImpulse", True)})
await controller.evaluate(graph)
# Verify that counter was called in spite of sequence 'a' being disconnected.
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", counter1_node)), 1)
# Verify that counter was called in spite of there being no loopBody - execution evaluator has to still trigger
# the loop 11 times despite there being no downstream connection.
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", counter2_node)), 1)
# ----------------------------------------------------------------------
async def test_unresolve_on_disconnect(self):
"""Tests unresolving attribs when action node is disconnected"""
controller = og.Controller()
keys = og.Controller.Keys
(graph, (_, for_each_node, _, _), _, _,) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("ForEach", "omni.graph.action.ForEach"),
("IntArrayPrim", "omni.graph.nodes.ReadPrimAttribute"),
("IntSinkPrim", "omni.graph.nodes.WritePrimAttribute"),
],
keys.CREATE_PRIMS: [
("/World/IntArray", {"myintarray": ("int[]", range(10))}),
("/World/IntSink", {"myint": ("int", 0)}),
],
keys.CONNECT: [
("OnTick.outputs:tick", "ForEach.inputs:execIn"),
("IntArrayPrim.outputs:value", "ForEach.inputs:arrayIn"),
("ForEach.outputs:element", "IntSinkPrim.inputs:value"),
],
keys.SET_VALUES: [
("IntArrayPrim.inputs:name", "myintarray"),
("IntArrayPrim.inputs:primPath", "/World/IntArray"),
("IntArrayPrim.inputs:usePath", True),
("IntSinkPrim.inputs:name", "myint"),
("IntSinkPrim.inputs:primPath", "/World/IntSink"),
("IntSinkPrim.inputs:usePath", True),
("OnTick.inputs:onlyPlayback", False),
],
},
)
await controller.evaluate(graph)
array_attr = controller.attribute("inputs:arrayIn", for_each_node)
element_attr = controller.attribute("outputs:element", for_each_node)
self.assertEqual(element_attr.get_resolved_type(), og.Type(og.BaseDataType.INT, 1, 0))
# When we disconnect all data connections, they should unresolve even though we still
# have the execution connection in place.
controller.edit(
self.TEST_GRAPH_PATH,
{
keys.DISCONNECT: [
("IntArrayPrim.outputs:value", "ForEach.inputs:arrayIn"),
("ForEach.outputs:element", "IntSinkPrim.inputs:value"),
]
},
)
await controller.evaluate(graph)
self.assertEqual(array_attr.get_resolved_type(), og.Type(og.BaseDataType.UNKNOWN))
self.assertEqual(element_attr.get_resolved_type(), og.Type(og.BaseDataType.UNKNOWN))
async def test_actiongraph_abi(self):
"""Tests IActionGraph ABI functions"""
i_ag = get_interface()
g_exec_out = 0
class State(Enum):
ENABLED = auto()
ENABLED_AND_PUSH = auto()
START = auto()
END = auto()
class _TestActionGraphPy:
"""Helper node to test behavior"""
class _State:
val = None
instance_states = {}
@staticmethod
def initialize(context, node):
_TestActionGraphPy.instance_states[node] = _TestActionGraphPy._State()
@staticmethod
def compute(_: og.GraphContext, node: og.Node) -> bool:
"""Compute method"""
nonlocal g_exec_out
if not i_ag.get_latent_state():
self.assertTrue(i_ag.get_execution_enabled("inputs:execIn"))
self.assertFalse(i_ag.get_execution_enabled("inputs:execUnused"))
self.assertFalse(i_ag.get_execution_enabled("inputs:boolUnused"))
state = _TestActionGraphPy.instance_states[node]
if state.val == State.ENABLED_AND_PUSH:
# Must be re-entering
i_ag.set_execution_enabled("outputs:finished_from_pushed")
state.val = None
return True
if state.val == State.START:
# Must be in latent state tick
self.assertTrue(i_ag.get_latent_state())
i_ag.end_latent_state()
i_ag.set_execution_enabled("outputs:finished_from_latent")
state.val = None
return True
if g_exec_out == State.ENABLED:
i_ag.set_execution_enabled("outputs:execOut")
elif g_exec_out == State.ENABLED_AND_PUSH:
i_ag.set_execution_enabled_and_pushed("outputs:execOut")
elif g_exec_out == State.START:
self.assertFalse(i_ag.get_latent_state())
i_ag.start_latent_state()
elif g_exec_out == State.END:
i_ag.end_latent_state()
state.val = g_exec_out
return True
@staticmethod
def get_node_type() -> str:
"""Get node type"""
return "omni.graph.action._TestActionGraphPy"
@staticmethod
def initialize_type(node_type: og.NodeType):
"""Initialize node attributes"""
node_type.add_input("inputs:execIn", "execution", True)
node_type.add_input("inputs:execUnused", "execution", True)
node_type.add_input("inputs:boolUnused", "bool", True)
node_type.add_output("outputs:execOut", "execution", True)
node_type.add_output("outputs:finished_from_latent", "execution", True)
node_type.add_output("outputs:finished_from_pushed", "execution", True)
og.register_node_type(_TestActionGraphPy, 1)
controller = og.Controller()
keys = og.Controller.Keys
(graph, (_, _, _, _, _), _, _,) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("Test", "omni.graph.action._TestActionGraphPy"),
("CounterOut", "omni.graph.action.Counter"),
("CounterLatent", "omni.graph.action.Counter"),
("CounterPushed", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("OnTick.outputs:tick", "Test.inputs:execIn"),
("Test.outputs:execOut", "CounterOut.inputs:execIn"),
("Test.outputs:finished_from_latent", "CounterLatent.inputs:execIn"),
("Test.outputs:finished_from_pushed", "CounterPushed.inputs:execIn"),
],
keys.SET_VALUES: [
("OnTick.inputs:onlyPlayback", False),
],
},
)
await controller.evaluate(graph)
self.assertEqual(og.Controller.get(f"{self.TEST_GRAPH_PATH}/CounterOut.outputs:count"), 0)
g_exec_out = State.ENABLED
await controller.evaluate(graph)
self.assertEqual(og.Controller.get(f"{self.TEST_GRAPH_PATH}/CounterOut.outputs:count"), 1)
g_exec_out = State.ENABLED_AND_PUSH
await controller.evaluate(graph)
self.assertEqual(og.Controller.get(f"{self.TEST_GRAPH_PATH}/CounterOut.outputs:count"), 2)
self.assertEqual(og.Controller.get(f"{self.TEST_GRAPH_PATH}/CounterPushed.outputs:count"), 1)
g_exec_out = State.START
await controller.evaluate(graph)
self.assertEqual(og.Controller.get(f"{self.TEST_GRAPH_PATH}/CounterOut.outputs:count"), 2)
self.assertEqual(og.Controller.get(f"{self.TEST_GRAPH_PATH}/CounterPushed.outputs:count"), 1)
self.assertEqual(og.Controller.get(f"{self.TEST_GRAPH_PATH}/CounterLatent.outputs:count"), 0)
g_exec_out = State.END
await controller.evaluate(graph)
self.assertEqual(og.Controller.get(f"{self.TEST_GRAPH_PATH}/CounterOut.outputs:count"), 2)
self.assertEqual(og.Controller.get(f"{self.TEST_GRAPH_PATH}/CounterPushed.outputs:count"), 1)
self.assertEqual(og.Controller.get(f"{self.TEST_GRAPH_PATH}/CounterLatent.outputs:count"), 1)
with ogts.ExpectedError():
with self.assertRaises(RuntimeError):
i_ag.start_latent_state()
# ------------------------------------------------------------------------
async def test_node_self_destruction(self):
"""Test that we don't crash if a very bad node causes a resync of the stage"""
# Executing this node may cause errors and fail the test as the underlying
# authoring node is destroyed while the task is executing. We just verify that only
# the expected error is generated.
class _TestBadNodePy:
@staticmethod
def compute(_: og.GraphContext, node: og.Node) -> bool:
context = omni.usd.get_context()
stage = context.get_stage()
stage.RemovePrim(node.get_prim_path())
@staticmethod
def get_node_type() -> str:
"""Get node type"""
return "omni.graph.action._TestBadNodePy"
@staticmethod
def initialize_type(node_type: og.NodeType):
"""Initialize node attributes"""
node_type.add_input("inputs:execIn", "execution", True)
node_type.add_output("outputs:execOut", "execution", True)
og.register_node_type(_TestBadNodePy, 1)
controller = og.Controller()
keys = og.Controller.Keys
(graph, _, _, _,) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("Test", "omni.graph.action._TestBadNodePy"),
],
keys.CONNECT: [
("OnTick.outputs:tick", "Test.inputs:execIn"),
],
keys.SET_VALUES: [
("OnTick.inputs:onlyPlayback", False),
],
},
)
with ogts.ExpectedError(): # Authoring node for Test was lost
await controller.evaluate(graph)
| 42,238 | Python | 45.776301 | 120 | 0.487168 |
omniverse-code/kit/exts/omni.graph.action/omni/graph/action/tests/test_action_graph_nodes_01.py | """Action Graph Node Tests, Part 1"""
import time
import carb
import omni.graph.core as og
import omni.graph.core.tests as ogts
import omni.kit.app
import omni.kit.test
import omni.usd
from omni.graph.core import ThreadsafetyTestUtils
from pxr import Gf, OmniGraphSchemaTools, Sdf, Vt
# ======================================================================
class TestActionGraphNodes(ogts.OmniGraphTestCase):
"""Tests action graph node functionality"""
TEST_GRAPH_PATH = "/World/TestGraph"
keys = og.Controller.Keys
E = og.ExecutionAttributeState.ENABLED
D = og.ExecutionAttributeState.DISABLED
async def setUp(self):
"""Set up test environment, to be torn down when done"""
await super().setUp()
# -----------------------------------------------------------------------
async def test_addprimrelationship_node(self):
"""Test AddPrimRelationship node"""
# Check that we can add a relationship to a prim and get that relationship.
#
# +---------+ +---------------------+
# | ONTICK +-->| AddPrimRelationship +
# +---------+ +---------------------+
#
usd_context = omni.usd.get_context()
stage = usd_context.get_stage()
og.Controller.create_graph({"graph_path": self.TEST_GRAPH_PATH, "evaluator_name": "execution"})
og.Controller.edit(
self.TEST_GRAPH_PATH,
{
self.keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("AddRel", "omni.graph.action.AddPrimRelationship"),
],
self.keys.CREATE_PRIMS: [
("/Test", {}),
("/Target", {}),
],
self.keys.CONNECT: [("OnTick.outputs:tick", "AddRel.inputs:execIn")],
self.keys.SET_VALUES: [
("OnTick.inputs:onlyPlayback", False),
("AddRel.inputs:path", "/Test"),
("AddRel.inputs:name", "rel"),
("AddRel.inputs:target", "/Target"),
],
},
)
prim = stage.GetPrimAtPath("/Test")
await og.Controller.evaluate()
rel = prim.GetRelationship("rel")
targets = rel.GetTargets()
self.assertEqual(len(targets), 1)
self.assertTrue(str(targets[0]) == "/Target")
# ----------------------------------------------------------------------
# The Branch node has a built-in test construct in its .ogn file located at ../../nodes/OgnBranch.ogn
# (relative to the source location of the currently-opened testing script) AND is used in other testing
# methods, so we skip adding extra node-specific tests for it here.
# ----------------------------------------------------------------------
@ThreadsafetyTestUtils.make_threading_test
def test_counter_node(self, test_instance_id: int = 0):
"""Test Counter node"""
# Instance a test graph setup. Note that we append the graph path with the test_instance_id
# so that the graph can be uniquely identified in the thread-safety test!
graph_path = self.TEST_GRAPH_PATH + str(test_instance_id)
og.Controller.create_graph({"graph_path": graph_path, "evaluator_name": "execution"})
(_, (ontick_node, counter_node), _, _,) = og.Controller.edit(
graph_path,
{
self.keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("Counter", "omni.graph.action.Counter"),
],
self.keys.SET_VALUES: [("OnTick.inputs:onlyPlayback", False)],
self.keys.CONNECT: ("OnTick.outputs:tick", "Counter.inputs:execIn"),
},
)
# Obtain necessary attributes.
in_exec_attr = counter_node.get_attribute("inputs:execIn")
in_reset_attr = counter_node.get_attribute("inputs:reset")
state_cnt_attr = counter_node.get_attribute("state:count")
out_exec_attr = counter_node.get_attribute("outputs:execOut")
out_cnt_attr = counter_node.get_attribute("outputs:count")
out_tick_attr = ontick_node.get_attribute("outputs:tick")
# Check that the counter node gets correctly incremented when executing.
self.assertEqual(state_cnt_attr.get(), 0)
self.assertEqual(out_cnt_attr.get(), 0)
self.assertEqual(out_exec_attr.get(), self.D)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(state_cnt_attr.get(), 1)
self.assertEqual(out_cnt_attr.get(), 1)
self.assertEqual(out_exec_attr.get(), self.E)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(state_cnt_attr.get(), 2)
self.assertEqual(out_cnt_attr.get(), 2)
self.assertEqual(out_exec_attr.get(), self.E)
# Check that the counter node doesn't increment when not executing.
og.Controller.disconnect(
out_tick_attr,
in_exec_attr,
)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(state_cnt_attr.get(), 2)
self.assertEqual(out_cnt_attr.get(), 2)
self.assertEqual(out_exec_attr.get(), self.E)
# Check that the reset flag for the Counter node instance works correctly when
# inputs:execIn is set to 0 (i.e. when the Counter node is NOT supposed to be
# executing).
og.Controller.connect(out_tick_attr, in_reset_attr)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(state_cnt_attr.get(), 0)
self.assertEqual(out_cnt_attr.get(), 0)
self.assertEqual(out_exec_attr.get(), self.E)
# ----------------------------------------------------------------------
@ThreadsafetyTestUtils.make_threading_test
def test_delay_node(self, test_instance_id: int = 0):
"""Test Delay node"""
# Instance a test graph setup. Note that we append the graph path with the test_instance_id
# so that the graph can be uniquely identified in the thread-safety test!
graph_path = self.TEST_GRAPH_PATH + str(test_instance_id)
og.Controller.create_graph({"graph_path": graph_path, "evaluator_name": "execution"})
(_, (on_impulse_node, _, counter_node), _, _,) = og.Controller.edit(
graph_path,
{
self.keys.CREATE_NODES: [
("OnImpulse", "omni.graph.action.OnImpulseEvent"),
("Delay", "omni.graph.action.Delay"),
("Counter", "omni.graph.action.Counter"),
],
self.keys.CONNECT: [
("OnImpulse.outputs:execOut", "Delay.inputs:execIn"),
("Delay.outputs:finished", "Counter.inputs:execIn"),
],
self.keys.SET_VALUES: [
("OnImpulse.inputs:onlyPlayback", False),
("Delay.inputs:duration", 0.01),
],
},
)
# Obtain necessary attributes.
out_cnt_attr = counter_node.get_attribute("outputs:count")
state_enable_impulse_attr = on_impulse_node.get_attribute("state:enableImpulse")
# Trigger the graph(s) once.
state_enable_impulse_attr.set(True)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
# Downstream execution is delayed, so the counter node won't be incremented.
self.assertEqual(out_cnt_attr.get(), 0)
# Wait to ensure that the delay finishes before checking that the counter node
# has indeed been incremented.
time.sleep(0.02)
yield ThreadsafetyTestUtils.EVALUATION_WAIT_FRAME # Yielding to compute by waiting for the next app frame.
self.assertEqual(out_cnt_attr.get(), 1)
# ----------------------------------------------------------------------
@ThreadsafetyTestUtils.make_threading_test
def test_flipflop_node(self, test_instance_id: int = 0):
"""Test FlipFlop node"""
# Instance a test graph setup. Note that we append the graph path with the test_instance_id
# so that the graph can be uniquely identified in the thread-safety test!
graph_path = self.TEST_GRAPH_PATH + str(test_instance_id)
og.Controller.create_graph({"graph_path": graph_path, "evaluator_name": "execution"})
(graph, (_, flip_flop_node), _, _,) = og.Controller.edit(
graph_path,
{
self.keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("FlipFlop", "omni.graph.action.FlipFlop"),
],
self.keys.SET_VALUES: [("OnTick.inputs:onlyPlayback", False)],
self.keys.CONNECT: (
"OnTick.outputs:tick",
"FlipFlop.inputs:execIn",
),
},
)
# Obtain necessary attributes.
out_a_attr = flip_flop_node.get_attribute("outputs:a")
out_b_attr = flip_flop_node.get_attribute("outputs:b")
out_isa_attr = flip_flop_node.get_attribute("outputs:isA")
# First eval, 'a'.
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_a_attr.get(), self.E)
self.assertEqual(out_b_attr.get(), self.D)
self.assertTrue(out_isa_attr.get())
# Second eval 'b'.
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_a_attr.get(), self.D)
self.assertEqual(out_b_attr.get(), self.E)
self.assertFalse(out_isa_attr.get())
# Third eval 'a'.
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_a_attr.get(), self.E)
self.assertEqual(out_b_attr.get(), self.D)
self.assertTrue(out_isa_attr.get())
# Test that non-usd-backed FlipFlop nodes correctly
# set their execution-type attributes.
# Make sure that the node paths are prefaced with the path to the
# graph that they should reside in (so that each node creation command
# can be processed to produce unique nodes for each test instance)!
_, on_tick_no_usd = og.cmds.CreateNode(
graph=graph,
node_path=f"{graph_path}/OnTickNoUSD",
node_type="omni.graph.action.OnTick",
create_usd=False,
)
_, flip_flop_no_usd = og.cmds.CreateNode(
graph=graph,
node_path=f"{graph_path}/FlipFlopNoUSD",
node_type="omni.graph.action.FlipFlop",
create_usd=False,
)
# Obtain necessary attributes.
out_a_attr = flip_flop_no_usd.get_attribute("outputs:a")
out_b_attr = flip_flop_no_usd.get_attribute("outputs:b")
out_isa_attr = flip_flop_no_usd.get_attribute("outputs:isA")
on_tick_no_usd.get_attribute("inputs:onlyPlayback").set(False)
# Make sure that the node attribute paths are prefaced with the graph
# path that they reside in (so that each instanced node attribute can
# be uniquely processed)!
og.cmds.ConnectAttrs(
src_attr=f"{graph_path}/OnTickNoUSD.outputs:tick",
dest_attr=f"{graph_path}/FlipFlopNoUSD.inputs:execIn",
modify_usd=False,
)
# First eval, 'a'.
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_a_attr.get(), self.E)
self.assertEqual(out_b_attr.get(), self.D)
self.assertTrue(out_isa_attr.get())
# Second eval 'b'.
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_a_attr.get(), self.D)
self.assertEqual(out_b_attr.get(), self.E)
self.assertFalse(out_isa_attr.get())
# Third eval 'a'.
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_a_attr.get(), self.E)
self.assertEqual(out_b_attr.get(), self.D)
self.assertTrue(out_isa_attr.get())
# ----------------------------------------------------------------------
# The ForEach node has a built-in test construct in its .ogn file located at ../../nodes/OgnForEach.ogn
# (relative to the source location of the currently-opened testing script), so we skip adding extra
# node-specific tests for it here.
# ----------------------------------------------------------------------
@ThreadsafetyTestUtils.make_threading_test
def test_forloop_node(self, test_instance_id: int = 0):
"""Test ForLoop node"""
context = omni.usd.get_context()
stage = context.get_stage()
# Since we want to use the same prim across all graph instances in the
# thread-safety test, we add it to the threading cache like so:
prim = ThreadsafetyTestUtils.add_to_threading_cache(test_instance_id, stage.DefinePrim("/World/TestPrim"))
ThreadsafetyTestUtils.add_to_threading_cache(
test_instance_id,
prim.CreateAttribute("val1", Sdf.ValueTypeNames.Int2, False).Set(Gf.Vec2i(1, 1)),
)
# Instance a test graph setup. Note that we append the graph path with the test_instance_id
# so that the graph can be uniquely identified in the thread-safety test!
graph_path = self.TEST_GRAPH_PATH + str(test_instance_id)
og.Controller.create_graph({"graph_path": graph_path, "evaluator_name": "execution"})
(_, (_, _, _, _, _, _, write_node, finish_counter), _, _) = og.Controller.edit(
graph_path,
{
self.keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("Const", "omni.graph.nodes.ConstantInt2"),
("StopNum", "omni.graph.nodes.ConstantInt"),
("Add", "omni.graph.nodes.Add"),
("Branch", "omni.graph.action.Branch"),
("For", "omni.graph.action.ForLoop"),
("Write1", "omni.graph.nodes.WritePrimAttribute"),
("FinishCounter", "omni.graph.action.Counter"),
],
self.keys.SET_VALUES: [
("OnTick.inputs:onlyPlayback", False),
("Const.inputs:value", [1, 2]),
("StopNum.inputs:value", 3),
("Write1.inputs:name", "val1"),
("Write1.inputs:primPath", "/World/TestPrim"),
("Write1.inputs:usePath", True),
("Branch.inputs:condition", True),
],
self.keys.CONNECT: [
("OnTick.outputs:tick", "For.inputs:execIn"),
("StopNum.inputs:value", "For.inputs:stop"),
("For.outputs:loopBody", "Branch.inputs:execIn"),
("For.outputs:finished", "FinishCounter.inputs:execIn"),
("Branch.outputs:execTrue", "Write1.inputs:execIn"),
("For.outputs:value", "Add.inputs:a"),
("Const.inputs:value", "Add.inputs:b"),
("Add.outputs:sum", "Write1.inputs:value"),
],
},
)
# Evaluate the graph(s).
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertListEqual([3, 4], list(stage.GetAttributeAtPath("/World/TestPrim.val1").Get()))
self.assertEqual(3, write_node.get_compute_count())
self.assertEqual(1, finish_counter.get_compute_count())
# This tests make sure that a for loop writing arrays to diferent prims
# work as expected (OM-84129)
async def test_foreach_node_write_multiple_prim(self, test_instance_id: int = 0):
"""Test the foreach node writing arrays to output prims"""
og.Controller.create_graph({"graph_path": self.TEST_GRAPH_PATH, "evaluator_name": "execution"})
(_, _, (prim1, prim2), _) = og.Controller.edit(
self.TEST_GRAPH_PATH,
{
self.keys.CREATE_PRIMS: [
("/World/Prim1", {"graph_output": ("Int[]", [])}),
("/World/Prim2", {"graph_output": ("Int[]", [])}),
],
self.keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("For", "omni.graph.action.ForEach"),
("Write", "omni.graph.nodes.WritePrimAttribute"),
("MakeArray", "omni.graph.nodes.ConstructArray"),
],
self.keys.SET_VALUES: [
("For.inputs:arrayIn", {"type": "token[]", "value": ["/World/Prim1", "/World/Prim2"]}),
("OnTick.inputs:onlyPlayback", False),
("Write.inputs:name", "graph_output"),
("Write.inputs:usePath", True),
("Write.inputs:usdWriteBack", True),
("MakeArray.inputs:arraySize", 1),
],
self.keys.CONNECT: [
("OnTick.outputs:tick", "For.inputs:execIn"),
("For.outputs:loopBody", "Write.inputs:execIn"),
("For.outputs:element", "Write.inputs:primPath"),
("For.outputs:arrayIndex", "MakeArray.inputs:input0"),
("MakeArray.outputs:array", "Write.inputs:value"),
],
},
)
await omni.kit.app.get_app().next_update_async()
prim1_out = prim1.GetAttribute("graph_output")
prim2_out = prim2.GetAttribute("graph_output")
self.assertEqual(prim1_out.Get(), Vt.IntArray([0]))
self.assertEqual(prim2_out.Get(), Vt.IntArray([1]))
# ----------------------------------------------------------------------
# The Gate node has a built-in test construct in its .ogn file located at ../../nodes/OgnGate.ogn
# (relative to the source location of the currently-opened testing script) AND is used in other
# testing methods, so we skip adding extra node-specific tests for it here.
# ----------------------------------------------------------------------
@ThreadsafetyTestUtils.make_threading_test
def test_multigate_node(self, test_instance_id: int = 0):
"""Test Multigate node"""
# Instance a test graph setup. Note that we append the graph path with the test_instance_id
# so that the graph can be uniquely identified in the thread-safety test!
graph_path = self.TEST_GRAPH_PATH + str(test_instance_id)
(_, (_, multigate_node), _, _) = og.Controller.edit(
{"graph_path": graph_path, "evaluator_name": "execution"},
{
self.keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("Multigate", "omni.graph.action.Multigate"),
],
self.keys.SET_VALUES: [("OnTick.inputs:onlyPlayback", False)],
self.keys.CONNECT: [
("OnTick.outputs:tick", "Multigate.inputs:execIn"),
],
},
)
# Add 5 extra outputs to the Multigate node.
for i in range(1, 6):
og.Controller.create_attribute(
multigate_node,
f"outputs:output{i}",
og.Type(og.BaseDataType.UINT, 1, 0, og.AttributeRole.EXECUTION),
og.AttributePortType.ATTRIBUTE_PORT_TYPE_OUTPUT,
)
# Obtain necessary attributes.
out_attr_0 = multigate_node.get_attribute("outputs:output0")
out_attr_1 = multigate_node.get_attribute("outputs:output1")
out_attr_2 = multigate_node.get_attribute("outputs:output2")
out_attr_3 = multigate_node.get_attribute("outputs:output3")
out_attr_4 = multigate_node.get_attribute("outputs:output4")
out_attr_5 = multigate_node.get_attribute("outputs:output5")
# Check that the Multigate node correctly cycles through each of its outputs.
# Note that we trigger an execution through the Multigate node via the OnTick node,
# whose onlyPlayback input we've set to False in order to trigger an execution each
# time we evaluate the graph(s).
for i in range(0, 6):
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(multigate_node.get_attribute(f"outputs:output{(i + 0) % 6}").get(), self.E)
self.assertEqual(multigate_node.get_attribute(f"outputs:output{(i + 1) % 6}").get(), self.D)
self.assertEqual(multigate_node.get_attribute(f"outputs:output{(i + 2) % 6}").get(), self.D)
self.assertEqual(multigate_node.get_attribute(f"outputs:output{(i + 3) % 6}").get(), self.D)
self.assertEqual(multigate_node.get_attribute(f"outputs:output{(i + 4) % 6}").get(), self.D)
self.assertEqual(multigate_node.get_attribute(f"outputs:output{(i + 5) % 6}").get(), self.D)
# Next try removing some output attributes during evaluation and test if the
# Multigate node correctly cycles through.
for _ in range(4):
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_attr_0.get(), self.D)
self.assertEqual(out_attr_1.get(), self.D)
self.assertEqual(out_attr_2.get(), self.D)
self.assertEqual(out_attr_3.get(), self.E)
self.assertEqual(out_attr_4.get(), self.D)
self.assertEqual(out_attr_5.get(), self.D)
multigate_node.remove_attribute("outputs:output4")
# The Multigate node cycles back to 0 instead of going to 5 since it thinks that it's
# reached the end of its outputs list (i.e. it expects to jump from pin 3 to 4, but b/c
# there is no such pin it goes back to 0 rather than 5).
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_attr_0.get(), self.E)
self.assertEqual(out_attr_1.get(), self.D)
self.assertEqual(out_attr_2.get(), self.D)
self.assertEqual(out_attr_3.get(), self.D)
self.assertEqual(out_attr_5.get(), self.D)
# Further showing that executing 4 times brings us back to pin 0 rather than pin 5.
for _ in range(4):
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_attr_0.get(), self.E)
self.assertEqual(out_attr_1.get(), self.D)
self.assertEqual(out_attr_2.get(), self.D)
self.assertEqual(out_attr_3.get(), self.D)
self.assertEqual(out_attr_5.get(), self.D)
# Execute the graph(s) once, then remove the currently-enabled output pin. The Multigate node will think
# that it's reached the end of the outputs list, and cycle back. Because we removed pin 1, it'll go
# back to pin 0 and never cycle through the other outputs since it cannot make the jump from
# pin 0 to 2 (as mentioned previously).
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
multigate_node.remove_attribute("outputs:output1")
for _ in range(3):
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_attr_0.get(), self.E)
self.assertEqual(out_attr_2.get(), self.D)
self.assertEqual(out_attr_3.get(), self.D)
self.assertEqual(out_attr_5.get(), self.D)
# ----------------------------------------------------------------------
@ThreadsafetyTestUtils.make_threading_test
def test_multisequence_node(self, test_instance_id: int = 0):
"""Test Multisequence node"""
# Instance a test graph setup. Note that we append the graph path with the test_instance_id
# so that the graph can be uniquely identified in the thread-safety test!
graph_path = self.TEST_GRAPH_PATH + str(test_instance_id)
og.Controller.create_graph({"graph_path": graph_path, "evaluator_name": "execution"})
(
_,
(on_tick_node, multisequence_node, counter0_node, counter1_node, counter2_node),
_,
_,
) = og.Controller.edit(
graph_path,
{
self.keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("Multisequence", "omni.graph.action.Multisequence"),
("Counter0", "omni.graph.action.Counter"),
("Counter1", "omni.graph.action.Counter"),
("Counter2", "omni.graph.action.Counter"),
],
self.keys.SET_VALUES: [("OnTick.inputs:onlyPlayback", False)],
self.keys.CONNECT: [
("OnTick.outputs:tick", "Multisequence.inputs:execIn"),
],
},
)
# Add 3 extra outputs to the Multisequence node.
for j in range(1, 4):
og.Controller.create_attribute(
multisequence_node,
f"outputs:output{j}",
og.Type(og.BaseDataType.UINT, 1, 0, og.AttributeRole.EXECUTION),
og.AttributePortType.ATTRIBUTE_PORT_TYPE_OUTPUT,
)
# Obtain necessary attributes.
out_attr_0 = multisequence_node.get_attribute("outputs:output0")
out_attr_1 = multisequence_node.get_attribute("outputs:output1")
out_attr_2 = multisequence_node.get_attribute("outputs:output2")
out_attr_3 = multisequence_node.get_attribute("outputs:output3")
in_exec_attr_0 = counter0_node.get_attribute("inputs:execIn")
in_exec_attr_1 = counter1_node.get_attribute("inputs:execIn")
in_exec_attr_2 = counter2_node.get_attribute("inputs:execIn")
out_cnt_attr_0 = counter0_node.get_attribute("outputs:count")
out_cnt_attr_1 = counter1_node.get_attribute("outputs:count")
out_cnt_attr_2 = counter2_node.get_attribute("outputs:count")
in_onlyplayback_attr = on_tick_node.get_attribute("inputs:onlyPlayback")
# Connect Multisequence node output attributes to the counter nodes.
og.Controller.connect(out_attr_0, in_exec_attr_0)
og.Controller.connect(out_attr_2, in_exec_attr_1)
og.Controller.connect(out_attr_1, in_exec_attr_2)
# Check that the Multisequence node correctly executes through its outputs when input
# execution is enabled via the OnTick node. This is done by checking whether
# each counter has been incremented by 1, and if the last output pin on the
# Multisequence node remains enabled (regardless of the fact that it's not connected
# downstream).
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_cnt_attr_0.get(), 1)
self.assertEqual(out_cnt_attr_1.get(), 1)
self.assertEqual(out_cnt_attr_2.get(), 1)
self.assertEqual(out_attr_0.get(), self.D)
self.assertEqual(out_attr_1.get(), self.D)
self.assertEqual(out_attr_2.get(), self.D)
self.assertEqual(out_attr_3.get(), self.E)
# Connect the Counter2 node to another Multisequence output pin.
og.Controller.connect(out_attr_3, in_exec_attr_2)
# Once again evaluate the graph(s). In this situation the Counter2 node should be incremented twice
# (since it's connected to 2 separate Multisequence output pins). Also Multisequence output pins
# 1 AND 3 should both be enabled by the end of the execution; this is because pin 3 would
# typically be the last output that gets enabled, but because pin 3 shares a downstream
# node with pin 1 (that node being Counter2), both outputs need to be enabled by the end.
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_cnt_attr_0.get(), 2)
self.assertEqual(out_cnt_attr_1.get(), 2)
self.assertEqual(out_cnt_attr_2.get(), 3)
self.assertEqual(out_attr_0.get(), self.D)
self.assertEqual(out_attr_1.get(), self.E)
self.assertEqual(out_attr_2.get(), self.D)
self.assertEqual(out_attr_3.get(), self.E)
# Set the OnTick node to only trigger downstream execution when playback is enabled; check
# that in this situation the Multisequence node correctly skips executing through its outputs
# (i.e. that the Counter nodes don't get incremented). The state of the Multisequence's output
# pins should not have changed since the last graph evaluation.
in_onlyplayback_attr.set(True)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_cnt_attr_0.get(), 2)
self.assertEqual(out_cnt_attr_1.get(), 2)
self.assertEqual(out_cnt_attr_2.get(), 3)
self.assertEqual(out_attr_0.get(), self.D)
self.assertEqual(out_attr_1.get(), self.E)
self.assertEqual(out_attr_2.get(), self.D)
self.assertEqual(out_attr_3.get(), self.E)
# Try removing an output attribute from the Multisequence node, check that all other
# outputs that come before in the list get triggered. In this example we remove outputs2,
# which means that outputs3 won't get triggered at all (as evidenced by the fact that the
# Counter2 node only gets incremented once by output1).
og.Controller.disconnect(out_attr_2, in_exec_attr_1)
multisequence_node.remove_attribute("outputs:output2")
in_onlyplayback_attr.set(False)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_cnt_attr_0.get(), 3)
self.assertEqual(out_cnt_attr_1.get(), 2)
self.assertEqual(out_cnt_attr_2.get(), 4)
self.assertEqual(out_attr_0.get(), self.D)
self.assertEqual(out_attr_1.get(), self.E)
self.assertEqual(out_attr_3.get(), self.D)
# ----------------------------------------------------------------------
@ThreadsafetyTestUtils.make_threading_test
def test_once_node(self, test_instance_id: int = 0):
"""Test Once node"""
# Instance a test graph setup. Note that we append the graph path with the test_instance_id
# so that the graph can be uniquely identified in the thread-safety test!
graph_path = self.TEST_GRAPH_PATH + str(test_instance_id)
og.Controller.create_graph({"graph_path": graph_path, "evaluator_name": "execution"})
(_, (ontick_node, once_node), _, _,) = og.Controller.edit(
graph_path,
{
self.keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("Once", "omni.graph.action.Once"),
],
self.keys.SET_VALUES: [("OnTick.inputs:onlyPlayback", False)],
self.keys.CONNECT: ("OnTick.outputs:tick", "Once.inputs:execIn"),
},
)
# Obtain necessary attributes.
in_exec_attr = once_node.get_attribute("inputs:execIn")
in_reset_attr = once_node.get_attribute("inputs:reset")
out_once_attr = once_node.get_attribute("outputs:once")
out_after_attr = once_node.get_attribute("outputs:after")
out_tick_attr = ontick_node.get_attribute("outputs:tick")
# Check that the Once node controls flow of execution by passing flow
# differently the first time it's executed compared to all subsequent
# executions.
self.assertEqual(out_once_attr.get(), self.D)
self.assertEqual(out_after_attr.get(), self.D)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_once_attr.get(), self.E)
self.assertEqual(out_after_attr.get(), self.D)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_once_attr.get(), self.D)
self.assertEqual(out_after_attr.get(), self.E)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_once_attr.get(), self.D)
self.assertEqual(out_after_attr.get(), self.E)
# Check that the reset flag works correctly when inputs:execIn is set to 0 (i.e. when
# the Once node is NOT supposed to be executing).
og.Controller.disconnect(out_tick_attr, in_exec_attr)
og.Controller.connect(out_tick_attr, in_reset_attr)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_once_attr.get(), self.D)
self.assertEqual(out_after_attr.get(), self.D)
og.Controller.disconnect(out_tick_attr, in_reset_attr)
og.Controller.connect(out_tick_attr, in_exec_attr)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_once_attr.get(), self.E)
self.assertEqual(out_after_attr.get(), self.D)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
self.assertEqual(out_once_attr.get(), self.D)
self.assertEqual(out_after_attr.get(), self.E)
# Check that when both the execIn and reset input attributes get triggered, the latter
# overrides the former and execution flow does not pass through outputs:after.
# FIXME: Something about the 2nd connection here is messing up the data model such
# that inputs:reset is being read as 0 inside the node
og.Controller.connect(out_tick_attr, in_reset_attr)
yield # Yielding to wait for compute to happen across all graph instances before continuing the test.
# self.assertEqual(out_once_attr.get(), self.D)
# self.assertEqual(out_after_attr.get(), self.D)
# ----------------------------------------------------------------------
# NOTE: Even though the OnClosing node is threadsafe (its compute method is very simple),
# we don't adapt the below test to check for thread-safety conditions because it relies
# on other nodes (omni.graph.action.SendCustomEvent and omni.graph.nodes.GraphTarget)
# which are NOT threadsafe.
async def test_onclosing_node(self):
"""Test OnClosing node"""
og.Controller.edit({"graph_path": self.TEST_GRAPH_PATH, "evaluator_name": "execution"})
# Testing OnClosing is tricky because OG is being destroyed when it happens -
# so test by sending a custom event when the network is triggered
# and then checking if we got that event.
def registered_event_name(event_name):
"""Returns the internal name used for the given custom event name"""
name = "omni.graph.action." + event_name
return carb.events.type_from_string(name)
got_event = [0]
def on_event(_):
got_event[0] = got_event[0] + 1
reg_event_name = registered_event_name("foo")
message_bus = omni.kit.app.get_app().get_message_bus_event_stream()
sub = message_bus.create_subscription_to_push_by_type(reg_event_name, on_event)
self.assertIsNotNone(sub)
async def set_up_graph():
controller = og.Controller()
keys = og.Controller.Keys
controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnClosing", "omni.graph.action.OnClosing"),
("Send", "omni.graph.action.SendCustomEvent"),
("GraphTarget", "omni.graph.nodes.GraphTarget"),
],
keys.CONNECT: [
("OnClosing.outputs:execOut", "Send.inputs:execIn"),
("GraphTarget.outputs:primPath", "Send.inputs:path"),
],
keys.SET_VALUES: [("Send.inputs:eventName", "foo")],
},
)
await set_up_graph()
# Evaluate once so that the standalone graph is in steady state.
await omni.kit.app.get_app().next_update_async()
self.assertEqual(got_event[0], 0)
# Close the stage.
usd_context = omni.usd.get_context()
(result, _) = await usd_context.close_stage_async()
self.assertTrue(result)
# Check our handler was called.
self.assertEqual(got_event[0], 1)
# Reset the counter.
got_event[0] = 0
# Now check that the same works with instanced graphs.
await usd_context.new_stage_async()
await set_up_graph()
og.cmds.SetEvaluationMode(
graph=og.get_graph_by_path(self.TEST_GRAPH_PATH),
new_evaluation_mode=og.GraphEvaluationMode.GRAPH_EVALUATION_MODE_INSTANCED,
)
stage = usd_context.get_stage()
prims = [stage.DefinePrim(f"/prim_{i}") for i in range(0, 100)]
for prim in prims:
OmniGraphSchemaTools.applyOmniGraphAPI(stage, prim.GetPath(), self.TEST_GRAPH_PATH)
# Wait an update for the graphs to get set up.
await omni.kit.app.get_app().next_update_async()
# Close the stage.
(result, _) = await usd_context.close_stage_async()
self.assertTrue(result)
# Check that our handler was called.
self.assertEqual(got_event[0], len(prims))
# ----------------------------------------------------------------------
async def test_oncustomevent_and_sendcustomevent_nodes(self):
"""Test OnCustomEvent and SendCustomEvent nodes"""
controller = og.Controller()
controller.create_graph({"graph_path": self.TEST_GRAPH_PATH, "evaluator_name": "execution"})
(_, (_, _, event1_node, counter1_node, event2_node, counter2_node), _, _,) = controller.edit(
self.TEST_GRAPH_PATH,
{
self.keys.CREATE_NODES: [
("OnImpulse", "omni.graph.action.OnImpulseEvent"),
("Send", "omni.graph.action.SendCustomEvent"),
("OnCustomEvent1", "omni.graph.action.OnCustomEvent"),
("Counter1", "omni.graph.action.Counter"),
("OnCustomEvent2", "omni.graph.action.OnCustomEvent"),
("Counter2", "omni.graph.action.Counter"),
],
self.keys.CONNECT: [
("OnImpulse.outputs:execOut", "Send.inputs:execIn"),
("OnCustomEvent1.outputs:execOut", "Counter1.inputs:execIn"),
("OnCustomEvent2.outputs:execOut", "Counter2.inputs:execIn"),
],
self.keys.SET_VALUES: [
("OnCustomEvent1.inputs:onlyPlayback", False),
("OnCustomEvent2.inputs:onlyPlayback", False),
("OnImpulse.inputs:onlyPlayback", False),
("Send.inputs:eventName", "foo"),
("Send.inputs:path", "Test Path"),
("OnCustomEvent1.inputs:eventName", "foo"),
("OnCustomEvent2.inputs:eventName", "foo"),
],
},
)
counter1_controller = og.Controller(og.Controller.attribute("outputs:count", counter1_node))
counter2_controller = og.Controller(og.Controller.attribute("outputs:count", counter2_node))
event1_controller = og.Controller(og.Controller.attribute("outputs:path", event1_node))
event2_controller = og.Controller(og.Controller.attribute("outputs:path", event2_node))
await omni.kit.app.get_app().next_update_async()
self.assertEqual(counter1_controller.get(), 0)
# Trigger graph once, this will queue up the event for the next evaluation.
controller.edit(self.TEST_GRAPH_PATH, {self.keys.SET_VALUES: ("OnImpulse.state:enableImpulse", True)})
# Note that if this is a push subscription, the receivers will run this frame instead of next.
await omni.kit.app.get_app().next_update_async()
self.assertEqual(counter1_controller.get(), 0)
# This evaluation should trigger the receivers.
await omni.kit.app.get_app().next_update_async()
# Verify that events were received.
self.assertEqual(counter1_controller.get(), 1)
self.assertEqual(event1_controller.get(), "Test Path")
self.assertEqual(counter2_controller.get(), 1)
self.assertEqual(event2_controller.get(), "Test Path")
# Verify the contents of the associated bundle.
# FIXME: Authored bundle is always empty?
# bundle_contents = og.BundleContents(graph.get_default_graph_context(), event1_node, "outputs:bundle", True)
# self.assertEqual(1, bundle_contents.size)
# Modify the event name one receiver and sender and ensure it still works.
controller.edit(
self.TEST_GRAPH_PATH,
{
self.keys.SET_VALUES: [("Send.inputs:eventName", "bar"), ("OnImpulse.state:enableImpulse", True)],
},
)
await omni.kit.app.get_app().next_update_async()
await omni.kit.app.get_app().next_update_async()
# We changed the sender event name, so counter should NOT have triggered again.
self.assertEqual(counter1_controller.get(), 1)
# Change the receiver name to match.
controller.edit(self.TEST_GRAPH_PATH, {self.keys.SET_VALUES: ("OnCustomEvent1.inputs:eventName", "bar")})
await omni.kit.app.get_app().next_update_async()
# Trigger send again and verify we get it (1 frame lag for pop).
controller.edit(self.TEST_GRAPH_PATH, {self.keys.SET_VALUES: ("OnImpulse.state:enableImpulse", True)})
await omni.kit.app.get_app().next_update_async()
await omni.kit.app.get_app().next_update_async()
self.assertEqual(counter1_controller.get(), 2)
| 42,965 | Python | 50.089179 | 117 | 0.59218 |
omniverse-code/kit/exts/omni.graph.action/omni/graph/action/tests/test_action_graph_evaluation_01.py | """Action Graph Evaluation Tests, Part 1"""
import asyncio
import json
import omni.graph.core as og
import omni.graph.core.tests as ogts
import omni.kit.test
import omni.usd
# ======================================================================
class TestActionGraphEvaluation(ogts.OmniGraphTestCase):
"""Tests action graph evaluator functionality"""
TEST_GRAPH_PATH = "/World/TestGraph"
async def setUp(self):
"""Set up test environment, to be torn down when done"""
await super().setUp()
og.Controller.edit({"graph_path": self.TEST_GRAPH_PATH, "evaluator_name": "execution"})
# ----------------------------------------------------------------------
async def test_active_latent(self):
"""Exercise a latent node that executes downstream nodes while latent"""
# +--------+ +----------+finished+-------------+
# | OnTick+-->| Countdown+-------->FinishCounter|
# +--------+ | | +-------------+
# | +-+
# +----------+ | +------------+ +------------+ +------------+
# +-----> TickCounter+----->TickCounter2+---->TickCounter3|
# tick +------------+ +------------+ +------------+
controller = og.Controller()
keys = og.Controller.Keys
(graph, nodes, _, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("Countdown", "omni.graph.action.Countdown"),
("FinishCounter", "omni.graph.action.Counter"),
("TickCounter", "omni.graph.action.Counter"),
("TickCounter2", "omni.graph.action.Counter"),
("TickCounter3", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("OnTick.outputs:tick", "Countdown.inputs:execIn"),
("Countdown.outputs:finished", "FinishCounter.inputs:execIn"),
("Countdown.outputs:tick", "TickCounter.inputs:execIn"),
("TickCounter.outputs:execOut", "TickCounter2.inputs:execIn"),
("TickCounter2.outputs:execOut", "TickCounter3.inputs:execIn"),
],
keys.SET_VALUES: [("Countdown.inputs:duration", 3), ("OnTick.inputs:onlyPlayback", False)],
},
)
(_, _, finish_counter, tick_counter, _, tick_counter_3) = nodes
finish_counter_controller = og.Controller(og.Controller.attribute("outputs:count", finish_counter))
tick_counter_controller = og.Controller(og.Controller.attribute("outputs:count", tick_counter))
tick_counter_3_controller = og.Controller(og.Controller.attribute("outputs:count", tick_counter_3))
await controller.evaluate(graph)
self.assertEqual(finish_counter_controller.get(), 0)
await controller.evaluate(graph)
self.assertEqual(tick_counter_controller.get(), 1)
await controller.evaluate(graph)
self.assertEqual(finish_counter_controller.get(), 0)
self.assertEqual(tick_counter_controller.get(), 2)
await controller.evaluate(graph)
self.assertEqual(finish_counter_controller.get(), 0)
self.assertEqual(tick_counter_3_controller.get(), 3)
await controller.evaluate(graph)
self.assertEqual(finish_counter_controller.get(), 1)
self.assertEqual(tick_counter_3_controller.get(), 3)
await controller.evaluate(graph)
self.assertEqual(finish_counter_controller.get(), 1)
self.assertEqual(tick_counter_3_controller.get(), 3)
# ----------------------------------------------------------------------
async def test_async_nodes(self):
"""Test asynchronous action nodes"""
# Check that a nested loop state is maintained when executing a latent delay.
#
# +---------+ +----------+ +----------+ +-------+ +--------+
# | IMPULSE +-->| FOR-LOOP +--->| FOR-LOOP +--->| DELAY +--->| COUNTER|
# +---------+ +----------+ +----------+ +-------+ +--------+
#
controller = og.Controller()
keys = og.Controller.Keys
(graph, (_, _, _, _, counter_node, _, _), _, _,) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnImpulse", "omni.graph.action.OnImpulseEvent"),
("ForLoop1", "omni.graph.action.ForLoop"),
("ForLoop2", "omni.graph.action.ForLoop"),
("Delay", "omni.graph.action.Delay"),
("Counter", "omni.graph.action.Counter"),
("OnTick", "omni.graph.action.OnTick"),
("Counter2", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("OnImpulse.outputs:execOut", "ForLoop1.inputs:execIn"),
("ForLoop1.outputs:loopBody", "ForLoop2.inputs:execIn"),
("ForLoop2.outputs:loopBody", "Delay.inputs:execIn"),
("Delay.outputs:finished", "Counter.inputs:execIn"),
("OnTick.outputs:tick", "Counter2.inputs:execIn"),
],
keys.SET_VALUES: [
("OnTick.inputs:onlyPlayback", False),
("OnImpulse.inputs:onlyPlayback", False),
("Delay.inputs:duration", 0.1),
("ForLoop1.inputs:stop", 2),
("ForLoop2.inputs:stop", 5),
],
},
)
await controller.evaluate(graph)
# Trigger graph once.
controller.edit(self.TEST_GRAPH_PATH, {keys.SET_VALUES: ("OnImpulse.state:enableImpulse", True)})
await controller.evaluate(graph)
# In delay now, no count.
counter_controller = og.Controller(og.Controller.attribute("outputs:count", counter_node))
self.assertEqual(counter_controller.get(), 0)
# Wait to ensure the first 5 delays compute.
for _ in range(5):
await asyncio.sleep(0.2)
await controller.evaluate(graph)
count_val = counter_controller.get()
self.assertGreater(count_val, 4)
# Wait and verify the remainder go through.
for _ in range(5):
await asyncio.sleep(0.1)
await controller.evaluate(graph)
self.assertEqual(counter_controller.get(), 10)
# ----------------------------------------------------------------------
async def test_chained_stateful_nodes(self):
"""Test that chaining loop nodes works"""
controller = og.Controller()
keys = og.Controller.Keys
(graph, (_, _, _, counter_node), _, _,) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("ForLoop1", "omni.graph.action.ForLoop"),
("ForLoop2", "omni.graph.action.ForLoop"),
("Counter", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("OnTick.outputs:tick", "ForLoop1.inputs:execIn"),
("ForLoop1.outputs:loopBody", "ForLoop2.inputs:execIn"),
("ForLoop2.outputs:loopBody", "Counter.inputs:execIn"),
],
keys.SET_VALUES: [
("OnTick.inputs:onlyPlayback", False),
("ForLoop1.inputs:stop", 5),
("ForLoop2.inputs:stop", 5),
],
},
)
await controller.evaluate(graph)
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", counter_node)), 5 * 5)
# ----------------------------------------------------------------------
async def test_cycle_break(self):
"""Test that an illegal cycle issues a warning"""
controller = og.Controller()
keys = og.Controller.Keys
(graph, (on_impulse, count_a, count_b), _, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnImpulse", "omni.graph.action.OnImpulseEvent"),
("A", "omni.graph.action.Counter"),
("B", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("OnImpulse.outputs:execOut", "A.inputs:execIn"),
("A.outputs:execOut", "B.inputs:execIn"),
("B.outputs:execOut", "A.inputs:execIn"),
],
keys.SET_VALUES: [
("OnImpulse.state:enableImpulse", True),
("OnImpulse.inputs:onlyPlayback", False),
],
},
)
with ogts.ExpectedError():
await controller.evaluate(graph)
og.Controller.set(controller.attribute("state:enableImpulse", on_impulse), True)
with ogts.ExpectedError():
await controller.evaluate(graph)
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", count_a)), 2)
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", count_b)), 2)
# ----------------------------------------------------------------------
async def test_dep_sort_fan_out(self):
"""Test that dependency sort works when there is data fan-out"""
# +-------------+
# +-------->| |
# | | SwitchTokenA|
# | +--->+-------------+
# +----------+ |
# |OnImpulse +----|------+ +--------------+
# +----------+ | +---------->| SwitchTokenB |
# | +^-------------+
# +------+-+ +--------+ |
# | ConstA +--->AppendB +---+
# +--------+ +--------+
controller = og.Controller()
keys = og.Controller.Keys
(graph, (_, _, _, _, _), _, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnImpulse", "omni.graph.action.OnImpulseEvent"),
("ConstA", "omni.graph.nodes.ConstantToken"),
("AppendB", "omni.graph.nodes.AppendString"),
("SwitchTokenA", "omni.graph.action.SwitchToken"),
("SwitchTokenB", "omni.graph.action.SwitchToken"),
],
keys.CONNECT: [
("OnImpulse.outputs:execOut", "SwitchTokenA.inputs:execIn"),
("OnImpulse.outputs:execOut", "SwitchTokenB.inputs:execIn"),
("ConstA.inputs:value", "SwitchTokenA.inputs:value"),
("ConstA.inputs:value", "AppendB.inputs:value"),
("AppendB.outputs:value", "SwitchTokenB.inputs:value"),
],
keys.SET_VALUES: [
("OnImpulse.inputs:onlyPlayback", False),
("OnImpulse.state:enableImpulse", True),
("AppendB.inputs:suffix", {"value": "Foo", "type": "token"}),
],
},
)
await controller.evaluate(graph)
graph_state = og.OmniGraphInspector().as_json(graph, flags=["evaluation"])
graph_state_obj = json.loads(graph_state)
trace = graph_state_obj["Evaluator"]["Instances"][0]["LastNonEmptyEvaluation"]["Trace"]
# The switches can run in any order
self.assertTrue(
trace in (["SwitchTokenA", "AppendB", "SwitchTokenB"], ["AppendB", "SwitchTokenB", "SwitchTokenA"])
)
# ----------------------------------------------------------------------
async def test_diamond_fan(self):
"""Test latent nodes in parallel fan-out which fan-in to a downstream node"""
# +--------++ +----------+
# +--> TickA +--->|FinishedA |---+
# | +---------+ +----------+ |
# +---------+ +-----------+ | | +------------+
# |OnImpulse+-->|TickCounter+-+ +-->|MergeCounter|
# +---------+ +-----------+ | | +------------+
# | +---------+ +----------+ |
# +-->| TickB +--->|FinishedB |--+
# +--------++ +----------+
# | +---------+
# +-->| TickC |
# +--------++
controller = og.Controller()
keys = og.Controller.Keys
(graph, nodes, _, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnImpulse", "omni.graph.action.OnImpulseEvent"),
("TickCounter", "omni.graph.action.Counter"),
("TickA", "omni.graph.action.Countdown"),
("TickB", "omni.graph.action.Countdown"),
("TickC", "omni.graph.action.Countdown"),
("FinishCounterA", "omni.graph.action.Counter"),
("FinishCounterB", "omni.graph.action.Counter"),
("MergeCounter", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("OnImpulse.outputs:execOut", "TickCounter.inputs:execIn"),
("TickCounter.outputs:execOut", "TickA.inputs:execIn"),
("TickCounter.outputs:execOut", "TickB.inputs:execIn"),
("TickCounter.outputs:execOut", "TickC.inputs:execIn"),
("TickA.outputs:finished", "FinishCounterA.inputs:execIn"),
("TickB.outputs:finished", "FinishCounterB.inputs:execIn"),
("FinishCounterA.outputs:execOut", "MergeCounter.inputs:execIn"),
("FinishCounterB.outputs:execOut", "MergeCounter.inputs:execIn"),
],
keys.SET_VALUES: [
("TickA.inputs:duration", 1),
("TickB.inputs:duration", 1),
("TickC.inputs:duration", 1),
("OnImpulse.inputs:onlyPlayback", False),
("OnImpulse.state:enableImpulse", True),
],
},
)
(_, tick_counter, _, _, tick_c, finish_counter_a, finish_counter_b, merge_counter) = nodes
def check_counts(t_c, f_a, f_b, m_c, tick_c_count):
for node, expected in (
(tick_counter, t_c),
(finish_counter_a, f_a),
(finish_counter_b, f_b),
(merge_counter, m_c),
):
count = og.Controller.get(controller.attribute("outputs:count", node))
self.assertEqual(count, expected, node.get_prim_path())
self.assertEqual(tick_c.get_compute_count(), tick_c_count)
self.assertEqual(tick_c.get_compute_count(), 0)
# Set up latent tickers.
await controller.evaluate(graph)
check_counts(1, 0, 0, 0, 1)
# Latent ticks.
await controller.evaluate(graph)
check_counts(1, 0, 0, 0, 2)
# Both branches complete.
await controller.evaluate(graph)
check_counts(1, 1, 1, 2, 3)
# No count changes + no additional computes of tickC.
await controller.evaluate(graph)
check_counts(1, 1, 1, 2, 3)
# ----------------------------------------------------------------------
async def test_diamond_latent_fan(self):
"""Test latent nodes in parallel fan-out which fan-in to a latent downstream node"""
# +--------++
# +--> TickA +--+
# | +---------+ |
# +---------+ | | +-------+ +-------+
# |OnImpulse+-->+ +-->|TickD +-+--->|CountF |
# +---------+ | | +-------+ | +-------+
# | +--------+ | +--->+-------+
# +-->| TickB +--+ |TickE |
# | +--------+ +--->+-------+
# | +--------+ |
# +-->| TickC +----------------+
# +--------+
# Note that when TickA triggers TickD into latent state, then TickB hits TickD subsequently. This subsequent
# evaluation is _transient_. Meaning that TickB will not block on a new copy of TickD.
# This is because there is only one TickD so there can be only one state (latent or not).
controller = og.Controller()
keys = og.Controller.Keys
(graph, nodes, _, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnImpulse", "omni.graph.action.OnImpulseEvent"),
("TickA", "omni.graph.action.Countdown"),
("TickB", "omni.graph.action.Countdown"),
("TickC", "omni.graph.action.Countdown"),
("TickD", "omni.graph.action.Countdown"),
("TickE", "omni.graph.action.Countdown"),
("CountF", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("OnImpulse.outputs:execOut", "TickA.inputs:execIn"),
("OnImpulse.outputs:execOut", "TickB.inputs:execIn"),
("OnImpulse.outputs:execOut", "TickC.inputs:execIn"),
("TickA.outputs:finished", "TickD.inputs:execIn"),
("TickB.outputs:finished", "TickD.inputs:execIn"),
("TickC.outputs:finished", "TickE.inputs:execIn"),
("TickD.outputs:finished", "TickE.inputs:execIn"),
("TickD.outputs:finished", "CountF.inputs:execIn"),
],
keys.SET_VALUES: [
("TickA.inputs:duration", 1),
("TickB.inputs:duration", 1),
("TickC.inputs:duration", 2),
("TickD.inputs:duration", 1),
("TickE.inputs:duration", 1),
("OnImpulse.inputs:onlyPlayback", False),
("OnImpulse.state:enableImpulse", True),
],
},
)
(_, tick_a, tick_b, tick_c, tick_d, tick_e, count_f) = nodes
def check_counts(i, t_a, t_b, t_c, t_d, t_e):
for node, expected in ((tick_a, t_a), (tick_b, t_b), (tick_c, t_c), (tick_d, t_d), (tick_e, t_e)):
self.assertEqual(node.get_compute_count(), expected, f"Check {i} for {node.get_prim_path()}")
# A, B, C, D, E
compute_counts = [
(1, 1, 1, 0, 0), # 0. fan out to trigger A, B, C into latent state
(2, 2, 2, 0, 0), # 1. A, B, C tick
(3, 3, 3, 2, 0), # 2. A, B end latent, D into latent via A or B, D ticks via A or B, C ticks
(3, 3, 4, 3, 2), # 3.
(3, 3, 4, 3, 3), # 4.
(3, 3, 4, 3, 3), # 5.
(3, 3, 4, 3, 3), # 6.
]
for i, c_c in enumerate(compute_counts):
await controller.evaluate(graph)
check_counts(i, *c_c)
# Verify that CountF has computed 1x due to the fan-in at TickD NOT acting like separate threads.
self.assertEqual(count_f.get_compute_count(), 1)
# ----------------------------------------------------------------------
async def test_dynamic_exec_pins(self):
"""Test that adding execution pins to a non-action node works"""
controller = og.Controller()
keys = og.Controller.Keys
(_, (on_tick, to_string), _, _,) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("ToString", "omni.graph.nodes.ToString"),
],
keys.SET_VALUES: [
("ToString.inputs:value", 42, "double"),
("OnTick.inputs:onlyPlayback", False),
],
},
)
# Verify to_string has not been computed.
await controller.evaluate()
self.assertEqual(0, to_string.get_compute_count())
self.assertEqual(1, on_tick.get_compute_count())
# Add execution attribs and verify it still doesn't get computed.
attrib = og.Controller.create_attribute(
to_string,
"inputs:execIn",
"execution",
og.AttributePortType.ATTRIBUTE_PORT_TYPE_INPUT,
)
self.assertIsNotNone(attrib)
await controller.evaluate()
self.assertEqual(0, to_string.get_compute_count())
self.assertEqual(2, on_tick.get_compute_count())
# Hook up to OnTick and verify it is now computing.
controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CONNECT: [
("OnTick.outputs:tick", "ToString.inputs:execIn"),
]
},
)
for i in range(10):
await controller.evaluate()
self.assertEqual(i + 1, to_string.get_compute_count())
self.assertEqual(i + 3, on_tick.get_compute_count())
# ----------------------------------------------------------------------
async def test_exec_fan_out(self):
"""Test that fanning out from an exec port works"""
controller = og.Controller()
keys = og.Controller.Keys
(graph, nodes, _, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("FF1", "omni.graph.action.FlipFlop"),
("FF2", "omni.graph.action.FlipFlop"),
("FF11", "omni.graph.action.FlipFlop"),
("FF12", "omni.graph.action.FlipFlop"),
],
keys.SET_VALUES: [
("OnTick.inputs:onlyPlayback", False),
],
keys.CONNECT: [
("OnTick.outputs:tick", "FF1.inputs:execIn"),
("OnTick.outputs:tick", "FF2.inputs:execIn"),
("FF1.outputs:a", "FF11.inputs:execIn"),
("FF1.outputs:a", "FF12.inputs:execIn"),
],
},
)
# 1. OnTick triggers FF1 which triggers FF11 and FF12, then FF2.
# 2. OnTick triggers FF1 and FF2.
# 3. OnTick triggers FF1 which triggers FF11 and FF12, then FF2.
await controller.evaluate(graph)
flip_flops = nodes[1:]
ff_state = [og.Controller.get(controller.attribute("outputs:isA", node)) for node in flip_flops]
self.assertEqual(ff_state, [True, True, True, True])
await controller.evaluate(graph)
ff_state = [og.Controller.get(controller.attribute("outputs:isA", node)) for node in flip_flops]
self.assertEqual(ff_state, [False, False, True, True])
await controller.evaluate(graph)
ff_state = [og.Controller.get(controller.attribute("outputs:isA", node)) for node in flip_flops]
self.assertEqual(ff_state, [True, True, False, False])
# ----------------------------------------------------------------------
async def test_exec_fan_out_shared_deps(self):
"""Test that dependency sort works when there is shared data in exec fan-out"""
# +---------+
# +---------->| Write1 |
# | +----^----+
# | |
# | +----------+
# | |
# +-----------+ | |
# | OnImpulse +-----+-----+----> +---------+
# +-----------+ | | | Write2 |
# | +----->+---------+
# | |
# | | +---------+
# +-----+----->| Write3 |
# | +---------+
# | ^
# +-------+ +---+----+---+
# | Const +----->| Inc |
# +-------+ +--------+
controller = og.Controller()
keys = og.Controller.Keys
(graph, _, _, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnImpulse", "omni.graph.action.OnImpulseEvent"),
("Const", "omni.graph.nodes.ConstantDouble"),
("Inc", "omni.graph.nodes.Increment"),
("Write1", "omni.graph.nodes.WritePrimAttribute"),
("Write2", "omni.graph.nodes.WritePrimAttribute"),
("Write3", "omni.graph.nodes.WritePrimAttribute"),
],
keys.CREATE_PRIMS: [
("/World/TestPrim1", {"val": ("double", 1.0)}),
("/World/TestPrim2", {"val": ("double", 2.0)}),
("/World/TestPrim3", {"val": ("double", 3.0)}),
],
keys.CONNECT: [
("OnImpulse.outputs:execOut", "Write1.inputs:execIn"),
("OnImpulse.outputs:execOut", "Write2.inputs:execIn"),
("OnImpulse.outputs:execOut", "Write3.inputs:execIn"),
("Const.inputs:value", "Inc.inputs:value"),
("Inc.outputs:result", "Write1.inputs:value"),
("Inc.outputs:result", "Write2.inputs:value"),
("Inc.outputs:result", "Write3.inputs:value"),
],
keys.SET_VALUES: [
("OnImpulse.inputs:onlyPlayback", False),
("OnImpulse.state:enableImpulse", True),
("Const.inputs:value", 41.0),
("Inc.inputs:increment", 1.0),
("Write1.inputs:primPath", "/World/TestPrim1"),
("Write1.inputs:usePath", True),
("Write1.inputs:name", "val"),
("Write2.inputs:primPath", "/World/TestPrim2"),
("Write2.inputs:usePath", True),
("Write2.inputs:name", "val"),
("Write3.inputs:primPath", "/World/TestPrim3"),
("Write3.inputs:usePath", True),
("Write3.inputs:name", "val"),
],
},
)
await controller.evaluate(graph)
stage = omni.usd.get_context().get_stage()
for i in (1, 2, 3):
self.assertEqual(stage.GetAttributeAtPath(f"/World/TestPrim{i}.val").Get(), 42.0)
# ----------------------------------------------------------------------
async def test_exec_sort_failure(self):
"""Test that sorting dependencies with non-trivial authored graph"""
# Our global sort excludes exec nodes, so a global topo (Kahn) sort will fail such that Inc3 doesn't get
# computed until after Add2, so instead we sort each dep network independently. This test verifies the case
# where that matters.
#
# +-----------------------------> Write1(var) +----------------------------------------+
# | ^ | |
# | | | v
# OnTick --------------------+ | +-----------Inc------------+ Write2(var2)
# | | | ^
# v | | |
# Read1(var)------------> Add1 --Inc2--+ v |
# Inc3 --------------> Add2 ---------------+
controller = og.Controller()
keys = og.Controller.Keys
(_, (on_tick, a_1, a_2, _, _, _, _, _, _), _, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("A1", "omni.graph.nodes.Add"),
("A2", "omni.graph.nodes.Add"),
("Write1", "omni.graph.core.WriteVariable"),
("Write2", "omni.graph.core.WriteVariable"),
("Read1", "omni.graph.core.ReadVariable"),
("Inc", "omni.graph.nodes.Increment"),
("Inc2", "omni.graph.nodes.Increment"),
("Inc3", "omni.graph.nodes.Increment"),
],
keys.CREATE_VARIABLES: [
("var", og.Type(og.BaseDataType.DOUBLE)),
("var2", og.Type(og.BaseDataType.DOUBLE)),
],
keys.SET_VALUES: [
("OnTick.inputs:onlyPlayback", False),
("Inc3.inputs:value", {"type": "double", "value": 42.0}),
("Write1.inputs:variableName", "var"),
("Write2.inputs:variableName", "var2"),
("Read1.inputs:variableName", "var"),
],
keys.CONNECT: [
("OnTick.outputs:tick", "Write1.inputs:execIn"),
("OnTick.outputs:timeSinceStart", "A1.inputs:a"),
("Read1.outputs:value", "A1.inputs:b"),
("A1.outputs:sum", "Inc2.inputs:value"),
("Inc2.outputs:result", "Write1.inputs:value"),
("Write1.outputs:execOut", "Write2.inputs:execIn"),
("Write1.outputs:value", "Inc.inputs:value"),
("Inc.outputs:result", "A2.inputs:a"),
("Inc3.outputs:result", "A2.inputs:b"),
("A2.outputs:sum", "Write2.inputs:value"),
],
},
)
await omni.kit.app.get_app().next_update_async()
a_1_v = og.Controller.get(controller.attribute("outputs:sum", a_1))
a_2_v = og.Controller.get(controller.attribute("outputs:sum", a_2))
on_tick_dt = og.Controller.get(controller.attribute("outputs:timeSinceStart", on_tick))
a_1_expected = 0 + on_tick_dt
a_2_expected = (a_1_expected + 1.0 + 1.0) + (42.0 + 1.0)
self.assertAlmostEqual(a_1_v, a_1_expected, places=3)
self.assertAlmostEqual(a_2_v, a_2_expected, places=3)
# ----------------------------------------------------------------------
async def test_fan_in(self):
"""Test that fan-in of execution connections works as expected (from a loaded test .usda file)"""
(result, error) = await ogts.load_test_file("TestActionFanIn.usda", use_caller_subdirectory=True)
self.assertTrue(result, error)
graph_path = "/World/ActionGraph"
controller = og.Controller()
graph = controller.graph(graph_path)
# Trigger the loop.
og.Controller.set(controller.attribute(f"{graph_path}/on_impulse_event.state:enableImpulse"), True)
await controller.evaluate(graph)
graph_state = og.OmniGraphInspector().as_json(controller.graph(graph_path), flags=["evaluation"])
graph_state_obj = json.loads(graph_state)
trace = graph_state_obj["Evaluator"]["Instances"][0]["LastNonEmptyEvaluation"]["Trace"]
# Verify the first loop iteration.
self.assertEqual("for_loop", trace[0])
# These nodes can compute in any order
self.assertEqual(["counter", "counter_01"], sorted(trace[1:3]))
expected_trace = [
"to_uint64",
"sync_gate",
"to_uint64",
"sync_gate",
]
self.assertListEqual(expected_trace, trace[3:7])
trace[0:7] = []
# Verify downstream from sync gate.
expected_trace = [
"counter_02",
]
self.assertListEqual(expected_trace, trace[0:1])
trace[0 : len(expected_trace)] = []
# Verify second iteration.
self.assertEqual("for_loop", trace[0])
# These nodes can compute in any order
self.assertEqual(["counter", "counter_01"], sorted(trace[1:3]))
expected_trace = [
"to_uint64",
"sync_gate",
"to_uint64",
"sync_gate",
]
self.assertListEqual(expected_trace, trace[3:7])
# ----------------------------------------------------------------------
async def test_loading_type_resol(self):
"""Test that loading a file with weird type resolution pattern works"""
(result, error) = await ogts.load_test_file("load_with_type_resol.usda", use_caller_subdirectory=True)
self.assertTrue(result, error)
graph_path = "/World/ActionGraph"
controller = og.Controller()
graph = controller.graph(graph_path)
# eval
await controller.evaluate(graph)
# check result
var = graph.find_variable("Result")
val = var.get_array(graph.get_default_graph_context(), False, 0)
self.assertTrue((val == [(-50, -50, -50), (50, 50, 50)]).all())
# ----------------------------------------------------------------------
async def test_fan_in_exec(self):
"""Test that execution fan-in is handled correctly"""
# The evaluator has to consider the case that gate.enter will have contradicting upstream values.
# Gate needs to know which input is active, it needs the value of enter to be ENABLED when it
# is triggered by OnTick, even though OnTickDisabled has set it's output to the same attrib as DISABLED.
#
# +--------------+
# |OnImpulseEvent+---+ +-----------+
# +--------------+ | |Gate |
# +--->toggle |
# +--------------+ | |
# |OnTick +------>|enter |
# +--------------+ +^-----exit-+
# |
# +--------------+ |
# |OnTickDisabled+--------+
# +--------------+
controller = og.Controller()
keys = og.Controller.Keys
(graph, (_, _, gate, on_impulse_event), _, _,) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("OnTickDisabled", "omni.graph.action.OnTick"),
("Gate", "omni.graph.action.Gate"),
("OnImpulseEvent", "omni.graph.action.OnImpulseEvent"),
],
keys.CONNECT: [
("OnTick.outputs:tick", "Gate.inputs:enter"),
("OnTickDisabled.outputs:tick", "Gate.inputs:enter"),
("OnImpulseEvent.outputs:execOut", "Gate.inputs:toggle"),
],
keys.SET_VALUES: [
("Gate.inputs:startClosed", True),
("OnTick.inputs:onlyPlayback", False),
("OnImpulseEvent.inputs:onlyPlayback", False),
],
},
)
await controller.evaluate(graph)
gate_exit = controller.attribute("outputs:exit", gate)
# Verify the Gate has not triggered.
self.assertFalse(gate_exit.get())
await controller.evaluate(graph)
self.assertFalse(gate_exit.get())
# Toggle the gate and verify that the tick goes through. The first evaluate it isn't known if the Gate will
# trigger because the order that entry points are executed is not defined... FIXME.
controller.attribute("state:enableImpulse", on_impulse_event).set(True)
await controller.evaluate(graph)
await controller.evaluate(graph)
self.assertTrue(gate_exit.get())
# ----------------------------------------------------------------------
async def test_fan_out_exec(self):
"""Test that execution fan-out is handled correctly"""
# We want to reset the execution attribute states before the node compute() to avoid bugs
# that arise when authors forget to fully specify the output states. However we can't
# do this in the middle of traversal, because fan-out from a connection requires that the state
# be preserved for every downstream node which may read from it (like Gate).
controller = og.Controller()
keys = og.Controller.Keys
(graph, (_, gate, _), _, _,) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("Gate", "omni.graph.action.Gate"),
("Counter", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("OnTick.outputs:tick", "Counter.inputs:execIn"),
("OnTick.outputs:tick", "Gate.inputs:enter"),
],
keys.SET_VALUES: [
("Gate.inputs:startClosed", False),
("OnTick.inputs:onlyPlayback", False),
],
},
)
await controller.evaluate(graph)
gate_exit = controller.attribute("outputs:exit", gate)
# Verify the Gate has triggered.
self.assertTrue(gate_exit.get())
await controller.evaluate(graph)
self.assertTrue(gate_exit.get())
# ----------------------------------------------------------------------
async def test_latent_and_push(self):
"""Exercise latent nodes in combination with stateful loop node"""
#
# +---------+ +-------+ tick +--------+ loopBody +-------+ +------------+
# |OnImpulse+-->|TickA +----------->ForLoop1++--------->|TickB +-+->|TickCounterB|
# +---------+ +----+--+ +--------++ +-------+ | +------------+
# | finish | |
# | | |
# | +--------------+ +v----------------+ +-v------------+
# +----->|FinishCounterA| |FinishLoopCounter| |FinishCounterB|
# +--------------+ +-----------------+ +--------------+
controller = og.Controller()
keys = og.Controller.Keys
(graph, nodes, _, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnImpulse", "omni.graph.action.OnImpulseEvent"),
("TickA", "omni.graph.action.Countdown"),
("ForLoop1", "omni.graph.action.ForLoop"),
("TickB", "omni.graph.action.Countdown"),
("FinishLoopCounter", "omni.graph.action.Counter"),
("FinishCounterA", "omni.graph.action.Counter"),
("FinishCounterB", "omni.graph.action.Counter"),
("TickCounterB", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("OnImpulse.outputs:execOut", "TickA.inputs:execIn"),
("TickA.outputs:tick", "ForLoop1.inputs:execIn"),
("TickA.outputs:finished", "FinishCounterA.inputs:execIn"),
("ForLoop1.outputs:loopBody", "TickB.inputs:execIn"),
("ForLoop1.outputs:finished", "FinishLoopCounter.inputs:execIn"),
("TickB.outputs:tick", "TickCounterB.inputs:execIn"),
("TickB.outputs:finished", "FinishCounterB.inputs:execIn"),
],
keys.SET_VALUES: [
("ForLoop1.inputs:start", 0),
("ForLoop1.inputs:stop", 3),
("TickA.inputs:duration", 2),
("TickB.inputs:duration", 2),
("OnImpulse.state:enableImpulse", True),
("OnImpulse.inputs:onlyPlayback", False),
],
},
)
(_, _, _, _, finish_loop_counter, finish_counter_a, finish_counter_b, tick_counter_b) = nodes
for _ in range(20):
await controller.evaluate(graph)
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", finish_counter_a)), 1)
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", tick_counter_b)), 12)
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", finish_counter_b)), 6)
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", finish_loop_counter)), 2)
# ----------------------------------------------------------------------
async def test_latent_chain(self):
"""Exercise a chain of latent nodes"""
# +---------+ +-------+ tick +-------+ tick +-------+
# |OnImpulse+-->TickA +-------->TickB +-------->|LatentC|
# +---------+ +-----+-+ +------++ +-------+-----+
# | finish | finish |
# finish | +-------------+ | +-------------+ +-v----------+
# +->TickCounterA | +-->| TickCounterB| |TickCounterC|
# +-------------+ +-------------+ +------------+
controller = og.Controller()
keys = og.Controller.Keys
(graph, nodes, _, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnImpulse", "omni.graph.action.OnImpulseEvent"),
("TickA", "omni.graph.action.Countdown"),
("TickB", "omni.graph.action.Countdown"),
("LatentC", "omni.graph.action.Countdown"),
("TickCounterA", "omni.graph.action.Counter"),
("TickCounterB", "omni.graph.action.Counter"),
("TickCounterC", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("OnImpulse.outputs:execOut", "TickA.inputs:execIn"),
("TickA.outputs:tick", "TickB.inputs:execIn"),
("TickA.outputs:finished", "TickCounterA.inputs:execIn"),
("TickB.outputs:tick", "LatentC.inputs:execIn"),
("TickB.outputs:finished", "TickCounterB.inputs:execIn"),
("LatentC.outputs:finished", "TickCounterC.inputs:execIn"),
],
keys.SET_VALUES: [
("TickA.inputs:duration", 2),
("TickB.inputs:duration", 2),
("LatentC.inputs:duration", 2),
("OnImpulse.inputs:onlyPlayback", False),
("OnImpulse.state:enableImpulse", True),
],
},
)
(_, _, _, _, tick_counter_a, tick_counter_b, tick_counter_c) = nodes
for _ in range(16):
await controller.evaluate(graph)
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", tick_counter_a)), 1)
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", tick_counter_b)), 2)
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", tick_counter_c)), 4)
| 44,222 | Python | 47.225736 | 116 | 0.453349 |
omniverse-code/kit/exts/omni.graph.action/omni/graph/action/tests/test_on_stage_event_node.py | """Basic tests of the OnStageEvent node"""
import omni.client
import omni.graph.core as og
import omni.graph.core.tests as ogts
import omni.graph.tools.ogn as ogn
import omni.kit.app
import omni.kit.test
import omni.usd
from pxr import Sdf
# ======================================================================
class TestOnStageEventNode(ogts.OmniGraphTestCase):
"""Tests OnStageEvent node functionality"""
TEST_GRAPH_PATH = "/World/TestGraph"
async def setUp(self):
"""Set up test environment, to be torn down when done"""
await super().setUp()
og.Controller.edit({"graph_path": self.TEST_GRAPH_PATH, "evaluator_name": "execution"})
async def test_backward_compatibility_v3(self):
"""Validate backward compatibility for legacy versions of OnStageEvent node."""
# load the test scene which contains a OnStageEvent V2 node
(result, error) = await ogts.load_test_file("TestOnStageEventNode_v2.usda", use_caller_subdirectory=True)
self.assertTrue(result, error)
action_graph_path = "/World/ActionGraph"
action_graph = og.get_graph_by_path(action_graph_path)
on_stage_event_node = action_graph.get_node(action_graph_path + "/on_stage_event")
self.assertTrue(on_stage_event_node.is_valid())
# The "Hierarchy Changed" event has been introduced since V3. Validate that it is
# automatically included by the list of allowed tokens after loading V2.
attr = on_stage_event_node.get_attribute("inputs:eventName")
allowed_tokens = attr.get_metadata(ogn.MetadataKeys.ALLOWED_TOKENS)
self.assertTrue(isinstance(allowed_tokens, str))
self.assertTrue("Hierarchy Changed" in allowed_tokens.split(","))
async def test_stage_events(self):
"""Test OnStageEvent"""
controller = og.Controller()
keys = og.Controller.Keys
(_, (on_stage_node, _, _, counter_sel_node, counter_stop_node, counter_start_node), _, _,) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnStageEvent", "omni.graph.action.OnStageEvent"),
("OnStageEvent2", "omni.graph.action.OnStageEvent"),
("OnStageEvent3", "omni.graph.action.OnStageEvent"),
("Counter", "omni.graph.action.Counter"),
("Counter2", "omni.graph.action.Counter"),
("Counter3", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("OnStageEvent.outputs:execOut", "Counter.inputs:execIn"),
("OnStageEvent2.outputs:execOut", "Counter2.inputs:execIn"),
("OnStageEvent3.outputs:execOut", "Counter3.inputs:execIn"),
],
keys.SET_VALUES: [
("OnStageEvent.inputs:eventName", "Selection Changed"),
("OnStageEvent.inputs:onlyPlayback", False),
("OnStageEvent2.inputs:eventName", "Animation Stop Play"),
("OnStageEvent2.inputs:onlyPlayback", True),
("OnStageEvent3.inputs:eventName", "Animation Start Play"),
("OnStageEvent3.inputs:onlyPlayback", True),
],
},
)
async def wait_2():
await omni.kit.app.get_app().next_update_async()
await omni.kit.app.get_app().next_update_async()
def get_start_count():
return og.Controller.get(controller.attribute("outputs:count", counter_start_node))
def get_stop_count():
return og.Controller.get(controller.attribute("outputs:count", counter_stop_node))
await omni.kit.app.get_app().next_update_async()
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", counter_sel_node)), 0)
self.assertEqual(get_stop_count(), 0)
self.assertEqual(get_start_count(), 0)
selection = omni.usd.get_context().get_selection()
selection.set_selected_prim_paths([self.TEST_GRAPH_PATH + "/OnStageEvent"], False)
# 1 frame delay on the pop, 1 frame delay on the compute
await omni.kit.app.get_app().next_update_async()
await omni.kit.app.get_app().next_update_async()
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", counter_sel_node)), 1)
self.assertEqual(get_stop_count(), 0)
self.assertEqual(get_start_count(), 0)
# change the tracked event, verify selection doesn't fire
og.Controller.set(controller.attribute("inputs:eventName", on_stage_node), "Saved")
selection.set_selected_prim_paths([self.TEST_GRAPH_PATH + "/OnStageEvent2"], False)
await omni.kit.app.get_app().next_update_async()
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", counter_sel_node)), 1)
await omni.kit.app.get_app().next_update_async()
# change it back, verify it does fire when selection changes again
og.Controller.set(controller.attribute("inputs:eventName", on_stage_node), "Selection Changed")
await omni.kit.app.get_app().next_update_async()
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", counter_sel_node)), 1)
selection.set_selected_prim_paths([self.TEST_GRAPH_PATH + "/OnStageEvent"], False)
await wait_2()
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", counter_sel_node)), 2)
# Verify that start/stop events work when only-playback is true
timeline = omni.timeline.get_timeline_interface()
timeline.set_start_time(1.0)
timeline.set_end_time(10.0)
timeline.set_target_framerate(timeline.get_time_codes_per_seconds())
timeline.play()
await wait_2()
self.assertEqual(get_stop_count(), 0)
self.assertEqual(get_start_count(), 1)
await omni.kit.app.get_app().next_update_async()
self.assertEqual(get_stop_count(), 0)
self.assertEqual(get_start_count(), 1)
# Check that pausing / resuming does not trigger
timeline.pause()
await wait_2()
self.assertEqual(get_stop_count(), 0)
self.assertEqual(get_start_count(), 1)
timeline.play()
await wait_2()
self.assertEqual(get_stop_count(), 0)
self.assertEqual(get_start_count(), 1)
timeline.stop()
await wait_2()
self.assertEqual(get_stop_count(), 1)
self.assertEqual(get_start_count(), 1)
await controller.evaluate()
self.assertEqual(get_stop_count(), 1)
self.assertEqual(get_start_count(), 1)
# Verify that stopping while paused triggers the event
timeline.play()
await wait_2()
self.assertEqual(get_stop_count(), 1)
self.assertEqual(get_start_count(), 2)
timeline.pause()
await wait_2()
self.assertEqual(get_stop_count(), 1)
self.assertEqual(get_start_count(), 2)
timeline.stop()
await wait_2()
self.assertEqual(get_stop_count(), 2)
self.assertEqual(get_start_count(), 2)
# ----------------------------------------------------------------------
async def test_stage_hierarchy_changed_event(self):
"""Test the Hierarchy Changed event"""
app = omni.kit.app.get_app()
controller = og.Controller()
keys = og.Controller.Keys
usd_context = omni.usd.get_context()
stage = usd_context.get_stage()
root_path = Sdf.Path.absoluteRootPath
# Create Xform
omni.kit.commands.execute("CreatePrim", prim_type="Xform")
xform_path = root_path.AppendChild("Xform")
xform = stage.GetPrimAtPath(xform_path)
self.assertTrue(xform)
# Create Material
omni.kit.commands.execute("CreatePrim", prim_type="Material")
material_path = root_path.AppendChild("Material")
material = stage.GetPrimAtPath(material_path)
self.assertTrue(material)
# Create action graph
(_, (on_stage_node, counter_node), _, _,) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnStageEvent", "omni.graph.action.OnStageEvent"),
("Counter", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("OnStageEvent.outputs:execOut", "Counter.inputs:execIn"),
],
keys.SET_VALUES: [
("OnStageEvent.inputs:eventName", "Hierarchy Changed"),
("OnStageEvent.inputs:onlyPlayback", False),
],
},
)
outputs_count_attr = controller.attribute("outputs:count", counter_node)
expected_hierarchy_changed_event_count = 0
await app.next_update_async()
self.assertEqual(og.Controller.get(outputs_count_attr), expected_hierarchy_changed_event_count)
###########################################################
# Create cube
omni.kit.commands.execute("CreatePrim", prim_type="Cube")
cube_path = root_path.AppendChild("Cube")
cube = stage.GetPrimAtPath(cube_path)
self.assertTrue(cube)
# 1 frame delay on the pop, 1 frame delay on the compute
await app.next_update_async()
await app.next_update_async()
expected_hierarchy_changed_event_count += 1
self.assertEqual(og.Controller.get(outputs_count_attr), expected_hierarchy_changed_event_count)
###########################################################
# Reparent cube
cube_path_reparented = xform_path.AppendChild("Cube")
omni.kit.commands.execute("MovePrim", path_from=cube_path, path_to=cube_path_reparented)
await app.next_update_async()
await app.next_update_async()
expected_hierarchy_changed_event_count += 1
self.assertEqual(og.Controller.get(outputs_count_attr), expected_hierarchy_changed_event_count)
###########################################################
# Rename cube to lowercase
cube_path_lowercase = xform_path.AppendChild("cube")
omni.kit.commands.execute("MovePrim", path_from=cube_path_reparented, path_to=cube_path_lowercase)
await app.next_update_async()
await app.next_update_async()
expected_hierarchy_changed_event_count += 1
self.assertEqual(og.Controller.get(outputs_count_attr), expected_hierarchy_changed_event_count)
###########################################################
# Modify size attribute.
cube = stage.GetPrimAtPath(cube_path_lowercase)
self.assertTrue(cube)
cube.GetAttribute("size").Set(1.0)
await app.next_update_async()
await app.next_update_async()
# The "Hierarchy Changed" event is not expected for attribute change.
self.assertEqual(og.Controller.get(outputs_count_attr), expected_hierarchy_changed_event_count)
###########################################################
# Modify material binding.
rel = cube.CreateRelationship("material:binding", False)
rel.SetTargets([material_path])
await app.next_update_async()
await app.next_update_async()
# The "Hierarchy Changed" event is not expected for relationship change.
self.assertEqual(og.Controller.get(outputs_count_attr), expected_hierarchy_changed_event_count)
###########################################################
# Change the tracked event
og.Controller.set(controller.attribute("inputs:eventName", on_stage_node), "Saved")
await og.Controller.evaluate()
omni.kit.commands.execute("MovePrim", path_from=cube_path_lowercase, path_to=cube_path)
await app.next_update_async()
await app.next_update_async()
# verify hierarchy changed event doesn't fire
self.assertEqual(og.Controller.get(outputs_count_attr), expected_hierarchy_changed_event_count)
###########################################################
# Change it back, verify it does fire when hierarchy changes again
og.Controller.set(controller.attribute("inputs:eventName", on_stage_node), "Hierarchy Changed")
await og.Controller.evaluate()
# Remove cube
omni.kit.commands.execute("DeletePrims", paths=[cube_path])
await app.next_update_async()
await app.next_update_async()
expected_hierarchy_changed_event_count += 1
self.assertEqual(og.Controller.get(outputs_count_attr), expected_hierarchy_changed_event_count)
| 12,775 | Python | 42.016835 | 117 | 0.599687 |
omniverse-code/kit/exts/omni.graph.action/omni/graph/action/tests/test_evaluation.py | """Action Graph Evaluation Tests"""
import asyncio
import carb.events
import omni.client
import omni.graph.core as og
import omni.graph.core.tests as ogts
import omni.kit.test
import omni.usd
# ======================================================================
class TestActionGraphEvaluation(ogts.OmniGraphTestCase):
"""Tests action graph evaluator functionality"""
TEST_GRAPH_PATH = "/World/TestGraph"
async def setUp(self):
"""Set up test environment, to be torn down when done"""
await super().setUp()
og.Controller.edit({"graph_path": self.TEST_GRAPH_PATH, "evaluator_name": "execution"})
# ----------------------------------------------------------------------
async def test_exec_fan_out(self):
"""Test that fanning out from an exec port works"""
controller = og.Controller()
keys = og.Controller.Keys
(graph, nodes, _, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("FF1", "omni.graph.action.FlipFlop"),
("FF2", "omni.graph.action.FlipFlop"),
("FF11", "omni.graph.action.FlipFlop"),
("FF12", "omni.graph.action.FlipFlop"),
],
keys.SET_VALUES: [
("OnTick.inputs:onlyPlayback", False),
],
keys.CONNECT: [
("OnTick.outputs:tick", "FF1.inputs:execIn"),
("OnTick.outputs:tick", "FF2.inputs:execIn"),
("FF1.outputs:a", "FF11.inputs:execIn"),
("FF1.outputs:a", "FF12.inputs:execIn"),
],
},
)
# 1. OnTick triggers FF1 which triggers FF11 and FF12, then FF2
# 2. OnTick triggers FF1 and FF2
# 3. OnTick triggers FF1 which triggers FF11 and FF12, then FF2
await controller.evaluate(graph)
flip_flops = nodes[1:]
ff_state = [og.Controller.get(controller.attribute("outputs:isA", node)) for node in flip_flops]
self.assertEqual(ff_state, [True, True, True, True])
await controller.evaluate(graph)
ff_state = [og.Controller.get(controller.attribute("outputs:isA", node)) for node in flip_flops]
self.assertEqual(ff_state, [False, False, True, True])
await controller.evaluate(graph)
ff_state = [og.Controller.get(controller.attribute("outputs:isA", node)) for node in flip_flops]
self.assertEqual(ff_state, [True, True, False, False])
# ----------------------------------------------------------------------
async def test_chained_stateful_nodes(self):
"""Test that chaining loop nodes works"""
controller = og.Controller()
keys = og.Controller.Keys
(graph, (_, _, _, counter_node), _, _,) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("ForLoop1", "omni.graph.action.ForLoop"),
("ForLoop2", "omni.graph.action.ForLoop"),
("Counter", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("OnTick.outputs:tick", "ForLoop1.inputs:execIn"),
("ForLoop1.outputs:loopBody", "ForLoop2.inputs:execIn"),
("ForLoop2.outputs:loopBody", "Counter.inputs:execIn"),
],
keys.SET_VALUES: [
("OnTick.inputs:onlyPlayback", False),
("ForLoop1.inputs:stop", 5),
("ForLoop2.inputs:stop", 5),
],
},
)
await controller.evaluate(graph)
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", counter_node)), 5 * 5)
# ----------------------------------------------------------------------
async def test_async_nodes(self):
"""Test asynchronous action nodes"""
# Check that a nested loop state is maintained when executing a latent delay
#
# +---------+ +----------+ +----------+ +-------+ +--------+
# | IMPULSE +-->| FOR-LOOP +--->| FOR-LOOP +--->| DELAY +--->| COUNTER|
# +---------+ +----------+ +----------+ +-------+ +--------+
#
controller = og.Controller()
keys = og.Controller.Keys
(graph, (_, _, _, _, counter_node, _, _), _, _,) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnImpulse", "omni.graph.action.OnImpulseEvent"),
("ForLoop1", "omni.graph.action.ForLoop"),
("ForLoop2", "omni.graph.action.ForLoop"),
("Delay", "omni.graph.action.Delay"),
("Counter", "omni.graph.action.Counter"),
("OnTick", "omni.graph.action.OnTick"),
("Counter2", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("OnImpulse.outputs:execOut", "ForLoop1.inputs:execIn"),
("ForLoop1.outputs:loopBody", "ForLoop2.inputs:execIn"),
("ForLoop2.outputs:loopBody", "Delay.inputs:execIn"),
("Delay.outputs:finished", "Counter.inputs:execIn"),
("OnTick.outputs:tick", "Counter2.inputs:execIn"),
],
keys.SET_VALUES: [
("OnTick.inputs:onlyPlayback", False),
("OnImpulse.inputs:onlyPlayback", False),
("Delay.inputs:duration", 0.1),
("ForLoop1.inputs:stop", 2),
("ForLoop2.inputs:stop", 5),
],
},
)
await controller.evaluate(graph)
# trigger graph once
controller.edit(self.TEST_GRAPH_PATH, {keys.SET_VALUES: ("OnImpulse.state:enableImpulse", True)})
await controller.evaluate(graph)
# in delay now, no count
counter_controller = og.Controller(og.Controller.attribute("outputs:count", counter_node))
self.assertEqual(counter_controller.get(), 0)
# wait to ensure the first 5 delays compute
for _ in range(5):
await asyncio.sleep(0.2)
await controller.evaluate(graph)
count_val = counter_controller.get()
self.assertGreater(count_val, 4)
# wait and verify the remainder go through
for _ in range(5):
await asyncio.sleep(0.1)
await controller.evaluate(graph)
self.assertEqual(counter_controller.get(), 10)
# ----------------------------------------------------------------------
async def test_stateful_flowcontrol_evaluation(self):
"""Test that stateful flow control nodes are fully evaluated"""
# b
# +----------+ +---------+
# +--->| Sequence +-->|Counter1 |
# | +----------+ +---------+
# +-----------+ |
# | OnImpulse +-+
# +-----------+ |
# | +----------+ +----------+
# +--->| ForLoop1 +-->| Counter2 |
# +----------+ +----------+
# finished
controller = og.Controller()
keys = og.Controller.Keys
(graph, (_, _, counter1_node, _, counter2_node), _, _,) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnImpulse", "omni.graph.action.OnImpulseEvent"),
("Sequence", "omni.graph.action.Sequence"),
("Counter1", "omni.graph.action.Counter"),
("ForLoop1", "omni.graph.action.ForLoop"),
("Counter2", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("OnImpulse.outputs:execOut", "Sequence.inputs:execIn"),
("Sequence.outputs:b", "Counter1.inputs:execIn"),
("OnImpulse.outputs:execOut", "ForLoop1.inputs:execIn"),
("ForLoop1.outputs:finished", "Counter2.inputs:execIn"),
],
keys.SET_VALUES: [("OnImpulse.inputs:onlyPlayback", False), ("ForLoop1.inputs:stop", 10)],
},
)
await controller.evaluate(graph)
# trigger graph once
controller.edit(self.TEST_GRAPH_PATH, {keys.SET_VALUES: ("OnImpulse.state:enableImpulse", True)})
await controller.evaluate(graph)
# verify that counter was called in spite of sequence 'a' being disconnected
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", counter1_node)), 1)
# verify that counter was called in spite of there being no loopBody - execution evaluator has to still trigger
# the loop 11 times despite there being no downstream connection
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", counter2_node)), 1)
# ----------------------------------------------------------------------
async def test_request_driven_node(self):
"""Test that RequestDriven nodes are computed as expected"""
controller = og.Controller()
keys = og.Controller.Keys
(graph, (_, counter_node), _, _,) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnImpulse", "omni.graph.action.OnImpulseEvent"),
("Counter", "omni.graph.action.Counter"),
],
keys.SET_VALUES: [("OnImpulse.inputs:onlyPlayback", False)],
keys.CONNECT: ("OnImpulse.outputs:execOut", "Counter.inputs:execIn"),
},
)
# After several updates, there should have been no compute calls
await controller.evaluate(graph)
await controller.evaluate(graph)
await controller.evaluate(graph)
counter_controller = og.Controller(og.Controller.attribute("outputs:count", counter_node))
self.assertEqual(counter_controller.get(), 0)
# change OnImpulse state attrib. The node should now request compute
controller.edit(self.TEST_GRAPH_PATH, {keys.SET_VALUES: ("OnImpulse.state:enableImpulse", True)})
await controller.evaluate(graph)
self.assertEqual(counter_controller.get(), 1)
# more updates should not result in more computes
await controller.evaluate(graph)
await controller.evaluate(graph)
await controller.evaluate(graph)
self.assertEqual(counter_controller.get(), 1)
# ----------------------------------------------------------------------
async def test_fan_in_exec(self):
"""Test that execution fan-in is handled correctly."""
# The evaluator has to consider the case that gate.enter will have contradicting upstream values.
# Gate needs to know which input is active, it needs the value of enter to be ENABLED when it
# is triggered by OnTick, even though OnTickDisabled has set it's output to the same attrib as DISABLED.
#
# +--------------+
# |OnImpulseEvent+---+ +-----------+
# +--------------+ | |Gate |
# +--->toggle |
# +--------------+ | |
# |OnTick +------>|enter |
# +--------------+ +^-----exit-+
# |
# +--------------+ |
# |OnTickDisabled+--------+
# +--------------+
controller = og.Controller()
keys = og.Controller.Keys
(graph, (_, _, gate, on_impulse_event), _, _,) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("OnTickDisabled", "omni.graph.action.OnTick"),
("Gate", "omni.graph.action.Gate"),
("OnImpulseEvent", "omni.graph.action.OnImpulseEvent"),
],
keys.CONNECT: [
("OnTick.outputs:tick", "Gate.inputs:enter"),
("OnTickDisabled.outputs:tick", "Gate.inputs:enter"),
("OnImpulseEvent.outputs:execOut", "Gate.inputs:toggle"),
],
keys.SET_VALUES: [
("Gate.inputs:startClosed", True),
("OnTick.inputs:onlyPlayback", False),
("OnImpulseEvent.inputs:onlyPlayback", False),
],
},
)
await controller.evaluate(graph)
gate_exit = controller.attribute("outputs:exit", gate)
# Verify the Gate has not triggered
self.assertFalse(gate_exit.get())
await controller.evaluate(graph)
self.assertFalse(gate_exit.get())
# toggle the gate and verify that the tick goes through. The first evaluate it isn't known if the Gate will
# trigger because the order that entry points are executed is not defined... FIXME
controller.attribute("state:enableImpulse", on_impulse_event).set(True)
await controller.evaluate(graph)
await controller.evaluate(graph)
self.assertTrue(gate_exit.get())
# ----------------------------------------------------------------------
async def test_fan_out_exec(self):
"""Test that execution fan-out is handled correctly."""
# We want to reset the execution attribute states before the node compute() to avoid bugs
# that arise when authors forget to fully specify the output states. However we can't
# do this in the middle of traversal, because fan-out from a connection requires that the state
# be preserved for every downstream node which may read from it (like Gate).
controller = og.Controller()
keys = og.Controller.Keys
(graph, (_, gate, _), _, _,) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("Gate", "omni.graph.action.Gate"),
("Counter", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("OnTick.outputs:tick", "Counter.inputs:execIn"),
("OnTick.outputs:tick", "Gate.inputs:enter"),
],
keys.SET_VALUES: [
("Gate.inputs:startClosed", False),
("OnTick.inputs:onlyPlayback", False),
],
},
)
await controller.evaluate(graph)
gate_exit = controller.attribute("outputs:exit", gate)
# Verify the Gate has triggered
self.assertTrue(gate_exit.get())
await controller.evaluate(graph)
self.assertTrue(gate_exit.get())
# ----------------------------------------------------------------------
async def test_onclosing(self):
"""Test OnClosing node"""
# Test OnClosing is tricky because OG is being destroyed when it happens -
# so test by sending a custom event when the network is triggered
# and then checking if we got that event
def registered_event_name(event_name):
"""Returns the internal name used for the given custom event name"""
n = "omni.graph.action." + event_name
return carb.events.type_from_string(n)
got_event = [False]
def on_event(_):
got_event[0] = True
reg_event_name = registered_event_name("foo")
message_bus = omni.kit.app.get_app().get_message_bus_event_stream()
sub = message_bus.create_subscription_to_push_by_type(reg_event_name, on_event)
self.assertIsNotNone(sub)
controller = og.Controller()
keys = og.Controller.Keys
controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnClosing", "omni.graph.action.OnClosing"),
("Send", "omni.graph.action.SendCustomEvent"),
],
keys.CONNECT: [("OnClosing.outputs:execOut", "Send.inputs:execIn")],
keys.SET_VALUES: [("Send.inputs:eventName", "foo"), ("Send.inputs:path", "Test Path")],
},
)
# evaluate once so that graph is in steady state
await controller.evaluate()
# close the stage
usd_context = omni.usd.get_context()
(result, _) = await usd_context.close_stage_async()
self.assertTrue(result)
# Check our handler was called
self.assertTrue(got_event[0])
async def test_onloaded(self):
"""Test OnLoaded node"""
def registered_event_name(event_name):
"""Returns the internal name used for the given custom event name"""
n = "omni.graph.action." + event_name
return carb.events.type_from_string(n)
events = []
def on_event(e):
events.append(e.payload["!path"])
reg_event_name = registered_event_name("foo")
message_bus = omni.kit.app.get_app().get_message_bus_event_stream()
sub = message_bus.create_subscription_to_push_by_type(reg_event_name, on_event)
self.assertIsNotNone(sub)
controller = og.Controller()
keys = og.Controller.Keys
controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("OnLoaded", "omni.graph.action.OnLoaded"),
("Send1", "omni.graph.action.SendCustomEvent"),
("Send2", "omni.graph.action.SendCustomEvent"),
],
keys.CONNECT: [
("OnLoaded.outputs:execOut", "Send1.inputs:execIn"),
("OnTick.outputs:tick", "Send2.inputs:execIn"),
],
keys.SET_VALUES: [
("OnTick.inputs:onlyPlayback", False),
("Send1.inputs:eventName", "foo"),
("Send2.inputs:eventName", "foo"),
("Send1.inputs:path", "Loaded"),
("Send2.inputs:path", "Tick"),
],
},
)
# evaluate once so that graph is in steady state
await controller.evaluate()
# Verify Loaded came before OnTick
self.assertListEqual(events, ["Loaded", "Tick"])
# ----------------------------------------------------------------------
async def test_active_latent(self):
"""exercise a latent node that executes downstream nodes while latent"""
# +--------+ +----------+finished+-------------+
# | OnTick+-->| TickN +-------->FinishCounter|
# +--------+ | | +-------------+
# | +-+
# +----------+ | +------------+ +------------+ +------------+
# +-----> TickCounter+----->TickCounter2+---->TickCounter3|
# tick +------------+ +------------+ +------------+
controller = og.Controller()
keys = og.Controller.Keys
(graph, nodes, _, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("TickN", "omni.graph.action.TickN"),
("FinishCounter", "omni.graph.action.Counter"),
("TickCounter", "omni.graph.action.Counter"),
("TickCounter2", "omni.graph.action.Counter"),
("TickCounter3", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("OnTick.outputs:tick", "TickN.inputs:execIn"),
("TickN.outputs:finished", "FinishCounter.inputs:execIn"),
("TickN.outputs:tick", "TickCounter.inputs:execIn"),
("TickCounter.outputs:execOut", "TickCounter2.inputs:execIn"),
("TickCounter2.outputs:execOut", "TickCounter3.inputs:execIn"),
],
keys.SET_VALUES: [("TickN.inputs:duration", 3), ("OnTick.inputs:onlyPlayback", False)],
},
)
(_, _, finish_counter, tick_counter, _, tick_counter_3) = nodes
finish_counter_controller = og.Controller(og.Controller.attribute("outputs:count", finish_counter))
tick_counter_controller = og.Controller(og.Controller.attribute("outputs:count", tick_counter))
tick_counter_3_controller = og.Controller(og.Controller.attribute("outputs:count", tick_counter_3))
await controller.evaluate(graph)
self.assertEqual(finish_counter_controller.get(), 0)
await controller.evaluate(graph)
self.assertEqual(tick_counter_controller.get(), 1)
await controller.evaluate(graph)
self.assertEqual(finish_counter_controller.get(), 0)
self.assertEqual(tick_counter_controller.get(), 2)
await controller.evaluate(graph)
self.assertEqual(finish_counter_controller.get(), 0)
self.assertEqual(tick_counter_3_controller.get(), 3)
await controller.evaluate(graph)
self.assertEqual(finish_counter_controller.get(), 1)
self.assertEqual(tick_counter_3_controller.get(), 3)
await controller.evaluate(graph)
self.assertEqual(finish_counter_controller.get(), 1)
self.assertEqual(tick_counter_3_controller.get(), 3)
# ----------------------------------------------------------------------
async def test_latent_chain(self):
"""exercise a chain of latent nodes"""
# +---------+ +-------+ tick +-------+ tick +-------+
# |OnImpulse+-->TickA +-------->TickB +-------->|LatentC|
# +---------+ +-----+-+ +------++ +-------+-----+
# | finish | finish |
# finish | +-------------+ | +-------------+ +-v----------+
# +->TickCounterA | +-->| TickCounterB| |TickCounterC|
# +-------------+ +-------------+ +------------+
controller = og.Controller()
keys = og.Controller.Keys
(graph, nodes, _, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnImpulse", "omni.graph.action.OnImpulseEvent"),
("TickA", "omni.graph.action.TickN"),
("TickB", "omni.graph.action.TickN"),
("LatentC", "omni.graph.action.TickN"),
("TickCounterA", "omni.graph.action.Counter"),
("TickCounterB", "omni.graph.action.Counter"),
("TickCounterC", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("OnImpulse.outputs:execOut", "TickA.inputs:execIn"),
("TickA.outputs:tick", "TickB.inputs:execIn"),
("TickA.outputs:finished", "TickCounterA.inputs:execIn"),
("TickB.outputs:tick", "LatentC.inputs:execIn"),
("TickB.outputs:finished", "TickCounterB.inputs:execIn"),
("LatentC.outputs:finished", "TickCounterC.inputs:execIn"),
],
keys.SET_VALUES: [
("TickA.inputs:duration", 2),
("TickB.inputs:duration", 2),
("LatentC.inputs:duration", 2),
("OnImpulse.inputs:onlyPlayback", False),
("OnImpulse.state:enableImpulse", True),
],
},
)
(_, _, _, _, tick_counter_a, tick_counter_b, tick_counter_c) = nodes
for _ in range(16):
await controller.evaluate(graph)
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", tick_counter_a)), 1)
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", tick_counter_b)), 2)
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", tick_counter_c)), 4)
# ----------------------------------------------------------------------
async def test_latent_and_push(self):
"""exercise latent nodes in combination with stateful loop node"""
#
# +---------+ +-------+ tick +--------+ loopBody +-------+ +------------+
# |OnImpulse+-->|TickA +----------->ForLoop1++--------->|TickB +-+->|TickCounterB|
# +---------+ +----+--+ +--------++ +-------+ | +------------+
# | finish | |
# | | |
# | +--------------+ +v----------------+ +-v------------+
# +----->|FinishCounterA| |FinishLoopCounter| |FinishCounterB|
# +--------------+ +-----------------+ +--------------+
controller = og.Controller()
keys = og.Controller.Keys
(graph, nodes, _, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnImpulse", "omni.graph.action.OnImpulseEvent"),
("TickA", "omni.graph.action.TickN"),
("ForLoop1", "omni.graph.action.ForLoop"),
("TickB", "omni.graph.action.TickN"),
("FinishLoopCounter", "omni.graph.action.Counter"),
("FinishCounterA", "omni.graph.action.Counter"),
("FinishCounterB", "omni.graph.action.Counter"),
("TickCounterB", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("OnImpulse.outputs:execOut", "TickA.inputs:execIn"),
("TickA.outputs:tick", "ForLoop1.inputs:execIn"),
("TickA.outputs:finished", "FinishCounterA.inputs:execIn"),
("ForLoop1.outputs:loopBody", "TickB.inputs:execIn"),
("ForLoop1.outputs:finished", "FinishLoopCounter.inputs:execIn"),
("TickB.outputs:tick", "TickCounterB.inputs:execIn"),
("TickB.outputs:finished", "FinishCounterB.inputs:execIn"),
],
keys.SET_VALUES: [
("ForLoop1.inputs:start", 0),
("ForLoop1.inputs:stop", 3),
("TickA.inputs:duration", 2),
("TickB.inputs:duration", 2),
("OnImpulse.state:enableImpulse", True),
("OnImpulse.inputs:onlyPlayback", False),
],
},
)
(_, _, _, _, finish_loop_counter, finish_counter_a, finish_counter_b, tick_counter_b) = nodes
for _ in range(20):
await controller.evaluate(graph)
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", finish_counter_a)), 1)
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", tick_counter_b)), 12)
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", finish_counter_b)), 6)
self.assertEqual(og.Controller.get(controller.attribute("outputs:count", finish_loop_counter)), 2)
# ----------------------------------------------------------------------
async def test_latent_fan_out(self):
"""Test latent nodes when part of parallel evaluation"""
# +------------+
# +---->|TickCounterA|
# | +------------+
# |
# +--------++ +----------+
# +-> TickA +--->|FinishedA |
# | +---------+ +----------+
# +---------+ +-----------+ |
# |OnImpulse+-->|TickCounter+-+
# +---------+ +-----------+ |
# | +---------+ +----------+
# +>| TickB +--->|FinishedB |
# +--------++ +----------+
# |
# | +------------+
# +---->|TickCounterB|
# +------------+
controller = og.Controller()
keys = og.Controller.Keys
(graph, nodes, _, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnImpulse", "omni.graph.action.OnImpulseEvent"),
("TickA", "omni.graph.action.TickN"),
("TickB", "omni.graph.action.TickN"),
("TickCounter", "omni.graph.action.Counter"),
("TickCounterA", "omni.graph.action.Counter"),
("TickCounterB", "omni.graph.action.Counter"),
("FinishCounterA", "omni.graph.action.Counter"),
("FinishCounterB", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("OnImpulse.outputs:execOut", "TickCounter.inputs:execIn"),
("TickCounter.outputs:execOut", "TickA.inputs:execIn"),
("TickCounter.outputs:execOut", "TickB.inputs:execIn"),
("TickA.outputs:tick", "TickCounterA.inputs:execIn"),
("TickB.outputs:tick", "TickCounterB.inputs:execIn"),
("TickA.outputs:finished", "FinishCounterA.inputs:execIn"),
("TickB.outputs:finished", "FinishCounterB.inputs:execIn"),
],
keys.SET_VALUES: [
("TickA.inputs:duration", 2),
("TickB.inputs:duration", 2),
("OnImpulse.inputs:onlyPlayback", False),
("OnImpulse.state:enableImpulse", True),
],
},
)
(_, _, _, tick_counter, tick_counter_a, tick_counter_b, finish_counter_a, finish_counter_b) = nodes
def check_counts(c, a, b, f_a, f_b):
for node, expected in (
(tick_counter, c),
(tick_counter_a, a),
(tick_counter_b, b),
(finish_counter_a, f_a),
(finish_counter_b, f_b),
):
count = og.Controller.get(controller.attribute("outputs:count", node))
self.assertEqual(count, expected, node.get_prim_path())
await controller.evaluate(graph)
check_counts(1, 0, 0, 0, 0)
await controller.evaluate(graph)
check_counts(1, 1, 1, 0, 0)
await controller.evaluate(graph)
check_counts(1, 2, 2, 0, 0)
await controller.evaluate(graph)
check_counts(1, 2, 2, 1, 1)
# ----------------------------------------------------------------------
async def test_diamond_fan(self):
"""Test latent nodes in parallel fan-out which fan-in to a downstream node"""
# +--------++ +----------+
# +--> TickA +--->|FinishedA |---+
# | +---------+ +----------+ |
# +---------+ +-----------+ | | +------------+
# |OnImpulse+-->|TickCounter+-+ +-->|MergeCounter|
# +---------+ +-----------+ | | +------------+
# | +---------+ +----------+ |
# +-->| TickB +--->|FinishedB |--+
# +--------++ +----------+
# | +---------+
# +-->| TickC |
# +--------++
controller = og.Controller()
keys = og.Controller.Keys
(graph, nodes, _, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnImpulse", "omni.graph.action.OnImpulseEvent"),
("TickCounter", "omni.graph.action.Counter"),
("TickA", "omni.graph.action.TickN"),
("TickB", "omni.graph.action.TickN"),
("TickC", "omni.graph.action.TickN"),
("FinishCounterA", "omni.graph.action.Counter"),
("FinishCounterB", "omni.graph.action.Counter"),
("MergeCounter", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("OnImpulse.outputs:execOut", "TickCounter.inputs:execIn"),
("TickCounter.outputs:execOut", "TickA.inputs:execIn"),
("TickCounter.outputs:execOut", "TickB.inputs:execIn"),
("TickCounter.outputs:execOut", "TickC.inputs:execIn"),
("TickA.outputs:finished", "FinishCounterA.inputs:execIn"),
("TickB.outputs:finished", "FinishCounterB.inputs:execIn"),
("FinishCounterA.outputs:execOut", "MergeCounter.inputs:execIn"),
("FinishCounterB.outputs:execOut", "MergeCounter.inputs:execIn"),
],
keys.SET_VALUES: [
("TickA.inputs:duration", 1),
("TickB.inputs:duration", 1),
("TickC.inputs:duration", 1),
("OnImpulse.inputs:onlyPlayback", False),
("OnImpulse.state:enableImpulse", True),
],
},
)
(_, tick_counter, _, _, tick_c, finish_counter_a, finish_counter_b, merge_counter) = nodes
def check_counts(tc, f_a, f_b, mc, tick_c_count):
for node, expected in (
(tick_counter, tc),
(finish_counter_a, f_a),
(finish_counter_b, f_b),
(merge_counter, mc),
):
count = og.Controller.get(controller.attribute("outputs:count", node))
self.assertEqual(count, expected, node.get_prim_path())
self.assertEqual(tick_c.get_compute_count(), tick_c_count)
self.assertEqual(tick_c.get_compute_count(), 0)
# set up latent tickers
await controller.evaluate(graph)
check_counts(1, 0, 0, 0, 1)
# latent ticks
await controller.evaluate(graph)
check_counts(1, 0, 0, 0, 2)
# both branches complete
await controller.evaluate(graph)
check_counts(1, 1, 1, 2, 3)
# no count changes + no additional computes of tickC
await controller.evaluate(graph)
check_counts(1, 1, 1, 2, 3)
# ----------------------------------------------------------------------
async def test_diamond_latent_fan(self):
"""Test latent nodes in parallel fan-out which fan-in to a latent downstream node"""
# +--------++
# +--> TickA +--+
# | +---------+ |
# +---------+ | | +-------+ +-------+
# |OnImpulse+-->+ +-->|TickD +-+--->|CountF |
# +---------+ | | +-------+ | +-------+
# | +--------+ | +--->+-------+
# +-->| TickB +--+ |TickE |
# | +--------+ +--->+-------+
# | +--------+ |
# +-->| TickC +----------------+
# +--------+
# Note that when TickA triggers TickD into latent state, then TickB hits TickD subsequently. This subsequent
# evaluation is _transient_. Meaning that TickB will not block on a new copy of TickD.
# This is because there is only one TickD so there can be only one state (latent or not).
controller = og.Controller()
keys = og.Controller.Keys
(graph, nodes, _, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("OnImpulse", "omni.graph.action.OnImpulseEvent"),
("TickA", "omni.graph.action.TickN"),
("TickB", "omni.graph.action.TickN"),
("TickC", "omni.graph.action.TickN"),
("TickD", "omni.graph.action.TickN"),
("TickE", "omni.graph.action.TickN"),
("CountF", "omni.graph.action.Counter"),
],
keys.CONNECT: [
("OnImpulse.outputs:execOut", "TickA.inputs:execIn"),
("OnImpulse.outputs:execOut", "TickB.inputs:execIn"),
("OnImpulse.outputs:execOut", "TickC.inputs:execIn"),
("TickA.outputs:finished", "TickD.inputs:execIn"),
("TickB.outputs:finished", "TickD.inputs:execIn"),
("TickC.outputs:finished", "TickE.inputs:execIn"),
("TickD.outputs:finished", "TickE.inputs:execIn"),
("TickD.outputs:finished", "CountF.inputs:execIn"),
],
keys.SET_VALUES: [
("TickA.inputs:duration", 1),
("TickB.inputs:duration", 1),
("TickC.inputs:duration", 2),
("TickD.inputs:duration", 1),
("TickE.inputs:duration", 1),
("OnImpulse.inputs:onlyPlayback", False),
("OnImpulse.state:enableImpulse", True),
],
},
)
(_, tick_a, tick_b, tick_c, tick_d, tick_e, count_f) = nodes
def check_counts(i, ta, tb, tc, td, te):
# print(f"{[node.get_compute_count() for node, expected in ((tick_a, ta), (tick_b, tb), (tick_c, tc), (tick_d, td), (tick_e, te))]}")
for node, expected in ((tick_a, ta), (tick_b, tb), (tick_c, tc), (tick_d, td), (tick_e, te)):
self.assertEqual(node.get_compute_count(), expected, f"Check {i} for {node.get_prim_path()}")
# A, B, C, D, E
compute_counts = [
(1, 1, 1, 0, 0), # 0. fan out to trigger A, B, C into latent state
(2, 2, 2, 0, 0), # 1. A, B, C tick
(3, 3, 3, 2, 0), # 2. A, B end latent, D into latent via A or B, D ticks via A or B, C ticks
(3, 3, 4, 3, 2), # 3.
(3, 3, 4, 3, 3), # 4.
(3, 3, 4, 3, 3), # 5.
(3, 3, 4, 3, 3), # 6.
]
for i, cc in enumerate(compute_counts):
await controller.evaluate(graph)
check_counts(i, *cc)
# Verify that CountF has computed 1x due to the fan-in at TickD NOT acting like separate threads
self.assertEqual(count_f.get_compute_count(), 1)
async def test_om_63924(self):
"""Test OM-63924 bug is fixed"""
# The problem here was that if there was fan in to a node which was
# computed once and then totally unwound before the other history was
# processed, there would never be a deferred activation and so the 2nd
# compute would never happen. Instead we want to only unwind one history
# at a time to ensure each one is fully evaluated.
i = 2
class OnForEachEventPy:
@staticmethod
def compute(context: og.GraphContext, node: og.Node):
nonlocal i
go = node.get_attribute("inputs:go")
go_val = og.Controller.get(go)
if not go_val:
return True
if i > 0:
og.Controller.set(
node.get_attribute("outputs:execOut"), og.ExecutionAttributeState.ENABLED_AND_PUSH
)
og.Controller.set(node.get_attribute("outputs:syncValue"), i)
i -= 1
return True
@staticmethod
def get_node_type() -> str:
return "omni.graph.test.OnForEachEventPy"
@staticmethod
def initialize_type(node_type: og.NodeType):
node_type.add_input(
"inputs:go",
"bool",
False,
)
node_type.add_output("outputs:execOut", "execution", True)
node_type.add_output("outputs:syncValue", "uint64", True)
return True
og.register_node_type(OnForEachEventPy, 1)
class NoOpPy:
@staticmethod
def compute(context: og.GraphContext, node: og.Node):
og.Controller.set(node.get_attribute("outputs:execOut"), og.ExecutionAttributeState.ENABLED)
return True
@staticmethod
def get_node_type() -> str:
return "omni.graph.test.NoOpPy"
@staticmethod
def initialize_type(node_type: og.NodeType):
node_type.add_input(
"inputs:execIn",
"execution",
True,
)
node_type.add_output("outputs:execOut", "execution", True)
return True
og.register_node_type(NoOpPy, 1)
controller = og.Controller()
keys = og.Controller.Keys
(graph, (for_each, _, _, _, _, no_op_2), _, _) = controller.edit(
self.TEST_GRAPH_PATH,
{
keys.CREATE_NODES: [
("PostProcessDispatcher", "omni.graph.test.OnForEachEventPy"),
("TSA1", "omni.graph.action.SyncGate"),
("TSA0", "omni.graph.action.SyncGate"),
("TestSyncAccum", "omni.graph.action.SyncGate"),
("TestPrimBbox", "omni.graph.test.NoOpPy"),
("NoOpPy2", "omni.graph.test.NoOpPy"),
],
keys.CONNECT: [
("PostProcessDispatcher.outputs:execOut", "TSA0.inputs:execIn"),
("PostProcessDispatcher.outputs:execOut", "TSA1.inputs:execIn"),
("TSA1.outputs:execOut", "TestSyncAccum.inputs:execIn"),
("TSA0.outputs:execOut", "TestPrimBbox.inputs:execIn"),
("TestPrimBbox.outputs:execOut", "TestSyncAccum.inputs:execIn"),
("TestSyncAccum.outputs:execOut", "NoOpPy2.inputs:execIn"),
("PostProcessDispatcher.outputs:syncValue", "TSA1.inputs:syncValue"),
("PostProcessDispatcher.outputs:syncValue", "TSA0.inputs:syncValue"),
("PostProcessDispatcher.outputs:syncValue", "TestSyncAccum.inputs:syncValue"),
],
},
)
og.Controller.set(controller.attribute("inputs:go", for_each), True)
await controller.evaluate(graph)
# Verify the final sync gate triggered due to being computed 2x
exec_out = og.Controller.get(controller.attribute("outputs:execOut", no_op_2))
self.assertEqual(exec_out, og.ExecutionAttributeState.ENABLED)
| 44,048 | Python | 46.364516 | 145 | 0.4706 |
omniverse-code/kit/exts/omni.graph.action/docs/ui_nodes.md | (ogn_ui_nodes)=
# UI Nodes
You may have seen the `omni.ui` extension that gives you the ability to create user interface elements through Python scripting. OmniGraph provides some nodes that can be used to do the same thing through an action graph.
These nodes provide an interface to the equivalent `omni.ui` script elements. The attributes of the nodes match the parameters you would pass to the script.
| Node | omni.ui Equivalent |
| --------------------------------------------------------------------------------------------------- | -------------------------------------------------------------- |
| {ref}`Button<GENERATED - Documentation _ognomni.graph.ui.Button>` | {py:class}`omni.ui.Button` |
| {ref}`ComboBox<GENERATED - Documentation _ognomni.graph.ui.ComboBox>` | {py:class}`omni.ui.ComboBox` |
| {ref}`OnWidgetClicked<GENERATED - Documentation _ognomni.graph.ui.OnWidgetClicked>` | {py:class}`omni.ui.Widget.call_mouse_pressed_fn` |
| {ref}`OnWidgetValueChanged<GENERATED - Documentation _ognomni.graph.ui.OnWidgetValueChanged>` | {py:class}`omni.ui.Widget.add_value_changed_fn` |
| {ref}`Slider<GENERATED - Documentation _ognomni.graph.ui.Slider>` | {py:class}`omni.ui.IntSlider` {py:class}`omni.ui.FloatSlider` |
| {ref}`VStack<GENERATED - Documentation _ognomni.graph.ui.VStack>` | {py:class}`omni.ui.VStack` |
## How To Use Them
The UI nodes are meant to be triggered to exist temporarily, meaning you want to ensure that they are part of a graph that creates them only once and then destroys them once their utility has ended.
| 2,019 | Markdown | 86.826083 | 221 | 0.520059 |
omniverse-code/kit/exts/omni.graph.action/docs/CHANGELOG.md | # Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
## [1.31.1] - 2023-02-10
### Fixed
- Add tests for AG fan-out with shared dependencies
## [1.31.0] - 2022-09-01
### Added
- SwitchToken, OnLoaded
## [1.30.2] - 2022-08-30
### Fixed
- Linting errors
## [1.30.1] - 2022-08-09
### Fixed
- Applied formatting to all of the Python files
## [1.30.0] - 2022-07-29
### Changed
- Add prim input to OnObjectChanged node
## [1.29.0] - 2022-07-29
### Added
- Support for relative image urls in UI widget nodes
## [1.28.1] - 2022-07-28
### Fixed
- Spurious error messages about 'Node compute request is ignored because XXX is not request-driven'
## [1.28.0] - 2022-07-26
### Added
- Placer node.
- Added ReadWidgetProperty node
## [1.27.0] - 2022-07-26
### Changed
- Removed internal Placers from the widget nodes.
## [1.26.1] - 2022-07-25
### Fixed
- Various UI nodes were rejecting valid exec inputs like ENABLED_AND_PUSH
## [1.26.0] - 2022-07-22
### Added
- 'style' input attributes for Button, Spacer and Stack nodes.
### Fixed
- WriteWidgetStyle was failing on styles containing hex values (e.g. for colors)
## [1.25.0] - 2022-07-20
### Added
- WriteWidgetStyle node
## [1.24.1] - 2022-07-21
### Changed
- Undo revert
## [1.24.0] - 2022-07-18
### Added
- Spacer node
### Changed
- VStack node:
- Removed all execution inputs except for create.
- Added support for OgnWriteWidgetProperty.
- inputs:parentWidgetPath is no longer optional.
## [1.23.0] - 2022-07-18
### Changed
- Reverted changes in 1.21.1
## [1.22.0] - 2022-07-15
### Added
- WriteWidgetProperty node
### Changed
- Removed all of Button node's execution inputs except for Create
- Removed Button node's 'iconSize' input.
- Modified Button node to work with WriteWidgetProperty
## [1.21.1] - 2022-07-15
### Changed
- Added test node TickN, modified tests
## [1.21.0] - 2022-07-14
### Changed
- Added +/- icons to Multigate and Sequence
## [1.20.0] - 2022-07-07
### Added
- Test for public API consistency
## [1.19.0] - 2022-07-04
### Changed
- OgnButton requires a parentWidgetPath. It no longer defaults to the current viewport.
- Each OgnButton instance can create multiples buttons. They no longer destroy the previous ones.
- widgetIdentifiers are now unique within GraphContext
## [1.18.1] - 2022-06-20
### Changed
- Optimized MultiSequence
## [1.18.0] - 2022-05-30
### Added
- OnClosing
## [1.17.1] - 2022-05-23
### Changed
- Changed VStack ui name to Stack
## [1.17.0] - 2022-05-24
### Changed
- Converted ForEach node from Python to C++
- Removed OnWidgetDoubleClicked
## [1.16.1] - 2022-05-23
### Changed
- Converted Counter node from Python to C++
## [1.16.0] - 2022-05-21
### Added
- Removed OnGraphInitialize, added Once to replace
## [1.15.1] - 2022-05-20
### Changed
- Added direction input to VStack node to allow objects to stack in width, height & depth directions.
- Button node uses styles to select icons rather than mouse functions.
## [1.15.0] - 2022-05-19
### Added
- OnGraphInitialize - triggers when the graph is created
### Changed
- OnStageEvent - removed non-functional events
## [1.14.1] - 2022-05-19
### Fixed
- Fixed OgnOnWidgetValueChanged output type resolution
## [1.14.0] - 2022-05-17
### Changed
- Added '(BETA)' to the ui_names of the new UI nodes.
## [1.13.0] - 2022-05-16
### Added
- Added Sequence node with dynamic outputs named OgnMultisequence
## [1.12.0] - 2022-05-11
### Added
- Node definitions for UI creation and manipulation
- Documentation on how to use the new UI nodes
- Dependencies on extensions omni.ui_query and omni.kit.window.filepicker(optional)
## [1.11.3] - 2022-04-12
### Fixed
- OnCustomEvent when onlyPlayback=true
- Remove state attrib from PrintText
## [1.11.2] - 2022-04-08
### Added
- Added absoluteSimTime output attribute to the OnTick node
## [1.11.1] - 2022-03-16
### Fixed
- OnStageEvent Animation Stop event when only-on-playback is true
## [1.11.0] - 2022-03-10
### Added
- Removed _outputs::shiftOut_, _outputs::ctrlOut_, _outputs::altOut_ from _OnKeyboardInput_ node.
- Added _inputs::shiftIn_, _inputs::ctrlIn_, _inputs::altIn_ from _OnKeyboardInput_ node.
- Added support for key modifiers to _OnKeyboardInput_ node.
## [1.10.1] - 2022-03-09
### Changed
- Made all input attributes of all event source nodes literalOnly
## [1.10.0] - 2022-02-24
### Added
- added SyncGate node
## [1.9.1] - 2022-02-14
### Fixed
- add additional extension enabled check for omni.graph.ui not enabled error
## [1.9.0] - 2022-02-04
### Added
- added SetPrimRelationship node
## [1.8.0] - 2022-02-04
### Modified
- Several event nodes now have _inputs:onlyPlayback_ attributes to control when they are active. The default is enabled, which means these nodes will only operate what playback is active.
### Fixed
- Category for Counter
## [1.7.0] - 2022-01-27
### Added
- Added SetPrimActive node
## [1.6.0] - 2022-01-27
### Added
- Added OnMouseInput node
### Modified
- Changed OnGamepadInput to use SubscriptionToInputEvents instead
- Changed OnKeyboardInput to use SubscriptionToInputEvents instead
## [1.5.0] - 2022-01-25
### Added
- Added OnGamepadInput node
## [1.4.5] - 2022-01-24
### Fixed
- categories for several nodes
## [1.4.4] - 2022-01-14
### Added
- Added car customizer tutorial
## [1.4.2] - 2022-01-05
### Modified
- Categories added to all nodes
## [1.4.1] - 2021-12-20
### Modified
- _GetLookAtRotation_ moved to _omni.graph.nodes_
## [1.4.0] - 2021-12-10
### Modified
- _OnStageEvent_ handles new stage events
## [1.3.0] - 2021-11-22
### Modified
- _OnKeyboardInput_ to use allowedTokens for input key
- _OnStageEvent_ bugfix to avoid spurious error messages on shutdown
## [1.2.0] - 2021-11-10
### Modified
- _OnKeyboardInput_, _OnCustomEvent_ to use _INode::requestCompute()_
## [1.1.0] - 2021-11-04
### Modified
- _OnImpulseEvent_ to use _INode::requestCompute()_
## [1.0.0] - 2021-05-10
### Initial Version
| 6,104 | Markdown | 25.201717 | 187 | 0.693971 |
omniverse-code/kit/exts/omni.graph.action/docs/README.md | # OmniGraph Action Graphs
## Introduction to Action Graphs
Provides visual-programming graphs to help designers bring their omniverse creations to life. Action Graphs are
triggered by events and can execute nodes which modify the stage.
## The Action Graph Extension
Contains nodes which work only with Action Graphs. Compute Nodes from other extensions can be used with Action Graphs;
for example omni.graph.nodes.
| 422 | Markdown | 34.249997 | 119 | 0.808057 |
omniverse-code/kit/exts/omni.graph.action/docs/index.rst | .. _ogn_omni_graph_action:
OmniGraph Action Graph
######################
.. tabularcolumns:: |L|R|
.. csv-table::
:width: 100%
**Extension**: omni.graph.action,**Documentation Generated**: |today|
.. toctree::
:maxdepth: 1
CHANGELOG
This extension is a collection of functionality required for OmniGraph Action Graphs.
.. toctree::
:maxdepth: 2
:caption: Contents
Overview of Action Graphs<Overview>
Hands-on Introduction to Action Graphs<https://docs.omniverse.nvidia.com/prod_extensions/prod_extensions/ext_omnigraph/quickstart.html>
Action Graph Car Customizer Tutorial<https://docs.omniverse.nvidia.com/prod_extensions/prod_extensions/ext_omnigraph/car_customizer.html>
Building UI With Action Graph<ui_nodes>
For more comprehensive examples targeted at explaining the use of OmniGraph features in detail see
:ref:`ogn_user_guide`
.. note::
Action Graphs are in an early development state
| 937 | reStructuredText | 25.799999 | 140 | 0.731057 |
omniverse-code/kit/exts/omni.graph.action/docs/Overview.md | (ogn_omni_graph_action_overview)=
```{csv-table}
**Extension**: omni.graph.action,**Documentation Generated**: {sub-ref}`today`
```
This extension is a collection of functionality required for OmniGraph Action Graphs.
```{note}
Action Graphs are in an early development state
```
# Action Graph Overview
For a hands-on introduction to OmniGraph Action Graphs see
[Action Graph Quickstart](https://docs.omniverse.nvidia.com/prod_extensions/prod_extensions/ext_omnigraph/quickstart.html)
For more comprehensive and thorough documentation on various OmniGraph features see {ref}`ogn_user_guide`
Action Graphs are comprised of any number of separate chains of nodes, like deformer graphs. However there are important differences which make Action graphs more suited to particular applications.
## Event Sources
Action graphs are *event driven*, which means that each chain of nodes must start with an *Event Source* node. Each event source node can be thought of as an entry point of the graph.
*Event Source* nodes are named with an *On* prefix, never have an *execution* input attribute, and always have at least one output *execution* attribute.
| Event Source Nodes |
| ------------------------------------------------------------------------------------------ |
| {ref}`On Keyboard Input <GENERATED - Documentation _ognomni.graph.action.OnKeyboardInput>` |
| {ref}`On Tick <GENERATED - Documentation _ognomni.graph.action.OnTick>` |
| {ref}`On Playback Tick <GENERATED - Documentation _ognomni.graph.action.OnPlaybackTick>` |
| {ref}`On Impulse Event <GENERATED - Documentation _ognomni.graph.action.OnImpulseEvent>` |
| {ref}`On Object Change <GENERATED - Documentation _ognomni.graph.action.OnObjectChange>` |
| {ref}`On Custom Event <GENERATED - Documentation _ognomni.graph.action.OnCustomEvent>` |
## Execution Attributes
Action graphs make use of *execution*-type attributes.
The *execution* evaluator works by following *execution* connections downstream and computing nodes it encounters until there are no more downstream connections to follow. The entire chain is executed to completion. When there is no downstream node the execution terminates and the next node is popped off the *execution stack*
Note that if there is more than one downstream connection from an *execution* attribute, each path will be followed in an undetermined order. Multiple downstream chains can be executed in a fixed order either by chaining the end of one to the start of the other, or by using the {ref}`Sequence <GENERATED - Documentation _ognomni.graph.action.Sequence>` node.
The value of an *execution* attribute tells the evaluator what the next step should be in the chain. It can be one of:
| Value | Description
| ---------------- | ------------------------------------------------------------------------------------------- |
| DISABLED | Do not continue from this attribute. |
| ENABLED | Continue downstream from this attribute. |
| ENABLED_AND_PUSH | Save the current node on the *execution stack* and continue downstream from this attribute. |
| LATENT_PUSH | Save the current node as it performs some asynchronous operation |
| LATENT_FINISH | Finish the asynchronous operation and continue downstream from this attribute. |
# Flow Control
Many Action graphs will need to do different things depending on some state. In a python script you would use an *if* or *while* loop to accomplish this. Similarly in Action graph there are nodes which provide this branching functionality. Flow control nodes have more than one *execution* output attribute, which is used to branch the evaluation flow.
| Flow Control Nodes |
| --------------------------------------------------------------------------- |
| {ref}`Branch <GENERATED - Documentation _ognomni.graph.action.Branch>` |
| {ref}`ForEach <GENERATED - Documentation _ognomni.graph.action.ForEach>` |
| {ref}`For Loop <GENERATED - Documentation _ognomni.graph.action.ForLoop>` |
| {ref}`Flip Flop <GENERATED - Documentation _ognomni.graph.action.FlipFlop>` |
| {ref}`Gate <GENERATED - Documentation _ognomni.graph.action.Gate>` |
| {ref}`Sequence <GENERATED - Documentation _ognomni.graph.action.Sequence>` |
| {ref}`Delay <GENERATED - Documentation _ognomni.graph.action.Delay>` |
| 4,622 | Markdown | 66.985293 | 359 | 0.665729 |
omniverse-code/kit/exts/omni.kit.renderer.cuda_interop/omni/kit/renderer/cuda_interop_test/__init__.py | from ._renderer_cuda_interop_test import *
# Cached interface instance pointer
def get_renderer_cuda_interop_test_interface() -> IRendererCudaInteropTest:
if not hasattr(get_renderer_cuda_interop_test_interface, "renderer_cuda_interop_test"):
get_renderer_cuda_interop_test_interface.renderer_cuda_interop_test = acquire_renderer_cuda_interop_test_interface()
return get_renderer_cuda_interop_test_interface.renderer_cuda_interop_test
| 453 | Python | 49.444439 | 124 | 0.785872 |
omniverse-code/kit/exts/omni.kit.renderer.cuda_interop/omni/kit/renderer/cuda_interop/__init__.py | from ._renderer_cuda_interop import *
# Cached interface instance pointer
def get_renderer_cuda_interop_interface() -> IRendererCudaInterop:
"""Returns cached :class:`omni.kit.renderer.IRendererCudaInterop` interface"""
if not hasattr(get_renderer_cuda_interop_interface, "renderer_cuda_interop"):
get_renderer_cuda_interop_interface.renderer = acquire_renderer_cuda_interop_interface()
return get_renderer_cuda_interop_interface.renderer_cuda_interop
| 475 | Python | 42.272723 | 96 | 0.772632 |
omniverse-code/kit/exts/omni.kit.renderer.cuda_interop/omni/kit/renderer/cuda_interop/_renderer_cuda_interop.pyi | """
This module contains bindings to C++ omni::kit::renderer::IRendererCudaInterop interface, core C++ part of Omniverse Kit.
>>> import omni.kit.renderer.cuda_interop
>>> e = omni.kit.renderer.cuda_interop.get_renderer_cuda_interop_interface()
"""
from __future__ import annotations
import omni.kit.renderer.cuda_interop._renderer_cuda_interop
import typing
__all__ = [
"IRendererCudaInterop",
"acquire_renderer_cuda_interop_interface",
"release_renderer_cuda_interop_interface"
]
class IRendererCudaInterop():
pass
def acquire_renderer_cuda_interop_interface(plugin_name: str = None, library_path: str = None) -> IRendererCudaInterop:
pass
def release_renderer_cuda_interop_interface(arg0: IRendererCudaInterop) -> None:
pass
| 788 | unknown | 31.874999 | 129 | 0.717005 |
omniverse-code/kit/exts/omni.kit.renderer.cuda_interop/omni/kit/renderer/cuda_interop/tests/__init__.py | from .test_renderer_cuda_interop import *
| 42 | Python | 20.49999 | 41 | 0.785714 |
omniverse-code/kit/exts/omni.kit.renderer.cuda_interop/omni/kit/renderer/cuda_interop/tests/test_renderer_cuda_interop.py | import inspect
import pathlib
import carb
import carb.settings
import carb.tokens
import omni.kit.app
import omni.kit.test
import omni.kit.renderer.cuda_interop_test
class RendererCudaInteropTest(omni.kit.test.AsyncTestCase):
async def setUp(self):
self._settings = carb.settings.acquire_settings_interface()
self._app_window_factory = omni.appwindow.acquire_app_window_factory_interface()
self._renderer = omni.kit.renderer.bind.acquire_renderer_interface()
self._renderer_cuda_interop_test = omni.kit.renderer.cuda_interop_test.acquire_renderer_cuda_interop_test_interface()
self._renderer.startup()
self._renderer_cuda_interop_test.startup()
def __test_name(self) -> str:
return f"{self.__module__}.{self.__class__.__name__}.{inspect.stack()[2][3]}"
async def tearDown(self):
self._renderer_cuda_interop_test.shutdown()
self._renderer.shutdown()
self._renderer_cuda_interop_test = None
self._renderer = None
self._app_window_factory = None
self._settings = None
async def test_1_render_cuda_interop_test(self):
app_window = self._app_window_factory.create_window_from_settings()
app_window.startup_with_desc(
title="Renderer test OS window",
width=16,
height=16,
x=omni.appwindow.POSITION_CENTERED,
y=omni.appwindow.POSITION_CENTERED,
decorations=True,
resize=True,
always_on_top=False,
scale_to_monitor=False,
dpi_scale_override=-1.0
)
self._renderer.attach_app_window(app_window)
self._app_window_factory.set_default_window(app_window)
TEST_COLOR = (1, 2, 3, 255)
test_color_unit = tuple(c / 255.0 for c in TEST_COLOR)
self._renderer.set_clear_color(app_window, test_color_unit)
self._renderer_cuda_interop_test.startup_resources_for_app_window(app_window)
self._renderer_cuda_interop_test.setup_simple_comparison_for_app_window(app_window, TEST_COLOR[0], TEST_COLOR[1], TEST_COLOR[2], TEST_COLOR[3])
test_name = self.__test_name()
for _ in range(3):
await omni.kit.app.get_app().next_update_async()
self._renderer_cuda_interop_test.shutdown_resources_for_app_window(app_window)
self._app_window_factory.set_default_window(None)
self._renderer.detach_app_window(app_window)
app_window.shutdown()
app_window = None
| 2,522 | Python | 35.042857 | 151 | 0.647898 |
omniverse-code/kit/exts/omni.kit.window.audioplayer/config/extension.toml | [package]
title = "Kit Audio Player Window"
category = "Audio"
version = "1.0.1"
description = "A simple audio player window"
detailedDescription = """This adds a window for playing audio assets.
This also adds an option to the content browser to play audio assets in this
audio player.
"""
preview_image = "data/preview.png"
authors = ["NVIDIA"]
keywords = ["audio", "playback"]
[dependencies]
"omni.audioplayer" = {}
"omni.ui" = {}
"omni.kit.window.content_browser" = { optional=true }
"omni.kit.window.filepicker" = {}
"omni.kit.menu.utils" = {}
[[python.module]]
name = "omni.kit.window.audioplayer"
[[test]]
unreliable = true
args = [
# Use the null device backend so we don't scare devs by playing audio.
"--/audio/deviceBackend=null",
# Needed for UI testing
"--/app/menu/legacy_mode=false",
]
dependencies = [
"omni.kit.mainwindow",
"omni.kit.ui_test",
]
stdoutFailPatterns.exclude = [
"*" # I don't want these but OmniUiTest forces me to use them
]
| 993 | TOML | 22.116279 | 76 | 0.682779 |
omniverse-code/kit/exts/omni.kit.window.audioplayer/omni/kit/window/audioplayer/__init__.py | from .audio_player_window import *
| 35 | Python | 16.999992 | 34 | 0.771429 |
omniverse-code/kit/exts/omni.kit.window.audioplayer/omni/kit/window/audioplayer/audio_player_window.py | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import carb.settings
import carb.dictionary
import omni.audioplayer
import omni.kit.ui
import omni.ui
import threading
import time
import re
import asyncio
import enum
from typing import Callable
from omni.kit.window.filepicker import FilePickerDialog
PERSISTENT_SETTINGS_PREFIX = "/persistent"
class EndReason(enum.Enum):
# sound finished naturally
FINISHED = 0,
# sound was explicitly stopped
STOPPED = 1,
# seeked to a new location in the sound (causes an end callback)
SEEK = 2,
# the previous sound ended because another one is being played
QUEUED_NEW_SOUND = 3,
class AudioPlayerWindowExtension(omni.ext.IExt):
"""Audio Player Window Extension"""
class FieldModel(omni.ui.AbstractValueModel):
def __init__(self, end_edit_callback):
super(AudioPlayerWindowExtension.FieldModel, self).__init__()
self._end_edit_callback = end_edit_callback
self._value = ""
def get_value_as_string(self):
return self._value
def begin_edit(self):
pass
def set_value(self, value):
self._value = value
self._value_changed()
def end_edit(self):
self._end_edit_callback(self._value)
class SliderModel(omni.ui.AbstractValueModel):
def __init__(self, update_callback, end_edit_callback):
super(AudioPlayerWindowExtension.SliderModel, self).__init__()
self._update_callback = update_callback
self._end_edit_callback = end_edit_callback
self._value = 0
def get_value_as_int(self): # pragma: no cover
return int(self._value)
def get_value_as_float(self): # pragma: no cover
return float(self._value)
def begin_edit(self): # pragma: no cover
pass
def set_value(self, value): # pragma: no cover
self._value = value
self._value_changed()
self._update_callback(self._value)
def end_edit(self): # pragma: no cover
self._end_edit_callback(self._value)
def _on_file_pick(self, dialog: FilePickerDialog, filename: str, dirname: str): # pragma: no cover
path = ""
if dirname:
path = f"{dirname}/{filename}"
elif filename:
path = filename
dialog.hide()
self._file_field.model.set_value(path)
# this has to be called manually because set_value doesn't do it
self._file_field_end_edit(path)
def _choose_file_clicked(self): # pragma: no cover
dialog = FilePickerDialog(
"Select File",
apply_button_label="Select",
click_apply_handler=lambda filename, dirname: self._on_file_pick(dialog, filename, dirname),
)
dialog.show()
def _set_pause_button(self): # pragma: no cover
self._play_button.set_style({"image_url": "resources/glyphs/timeline_pause.svg"})
def _set_play_button(self): # pragma: no cover
self._play_button.set_style({"image_url": "resources/glyphs/timeline_play.svg"})
def _timeline_str(self, time): # pragma: no cover
sec = ":{:02.0f}".format(time % 60)
if time > 60.0 * 60.0:
return "{:1.0f}".format(time // (60 * 60)) + ":{:02.0f}".format((time // 60) % 60) + sec
else:
return "{:1.0f}".format(time // 60) + sec
def _timeline_ticker(self): # pragma: no cover
if not self._playing:
return
time = self._player.get_play_cursor()
self._timeline_cursor_label.text = self._timeline_str(time)
self._timeline_slider.model.set_value(time * self._timeline_slider_scale)
# if the window was closed, stop the player
if not self._window.visible:
self._end_reason = EndReason.STOPPED
self._player.stop_sound()
self._ticker = threading.Timer(0.25, self._timeline_ticker).start()
def _loading_ticker(self):
labels = {0: "Loading", 1: "Loading.", 2: "Loading..", 3: "Loading..."}
if not self._loading:
self._loading_label.text = ""
return
self._loading_label.text = labels[self._loading_counter % 4]
self._loading_counter += 1
self._loading_timer = threading.Timer(0.25, self._loading_ticker).start()
def _play_sound(self, time):
self._loading = True
self._player.play_sound(
self._file_field.model.get_value_as_string(), time
)
def _close_error_window(self): # pragma: no cover
self._error_window.visible = False
def _set_play_cursor(self, time): # pragma: no cover
self._end_reason = EndReason.SEEK
self._player.set_play_cursor(time)
def _file_loaded(self, success): # pragma: no cover
self._loading = False
if not success:
self._playing = False
self._set_play_button()
error_text = "Loading failed"
file_name = self._file_field.model.get_value_as_string()
if re.search("^.*.(m4a|aac)$", file_name):
error_text = (
f"Failed to load file '{file_name}' codec not supported - only Vorbis, FLAC and WAVE are supported"
)
else:
error_text = f"Failed to load file '{file_name}' codec not supported (only Vorbis, FLAC, MP3 and WAVE are supported), file does not exist or the file is corrupted"
self._error_window = omni.ui.Window(
"Audio Player Error", width=400, height=0, flags=omni.ui.WINDOW_FLAGS_NO_DOCKING
)
with self._error_window.frame:
with omni.ui.VStack():
with omni.ui.HStack():
omni.ui.Spacer()
self._error_window_label = omni.ui.Label(
error_text, word_wrap=True, width=380, alignment=omni.ui.Alignment.CENTER
)
omni.ui.Spacer()
with omni.ui.HStack():
omni.ui.Spacer()
self._error_window_ok_button = omni.ui.Button(
width=64, height=32, clicked_fn=self._close_error_window, text="ok"
)
omni.ui.Spacer()
self._waveform_image_provider.set_bytes_data([0, 0, 0, 0], [1, 1])
return
if self._new_file:
width = 2048
height = 64
raw_image = self._player.draw_waveform(width, height, [0.89, 0.54, 0.14, 1.0], [0.0, 0.0, 0.0, 0.0])
self._waveform_image_provider.set_bytes_data(raw_image, [width, height])
self._new_file = False
self._timeline_end_label.text = self._timeline_str(self._player.get_sound_length())
self._sound_length = self._player.get_sound_length()
self._timeline_slider_scale = 1.0 / self._sound_length
# set the timeline ticker going
self._timeline_ticker()
# set this back to default
self._end_reason = EndReason.FINISHED
def _play_finished(self): # pragma: no cover
if self._end_reason != EndReason.SEEK and self._end_reason != EndReason.QUEUED_NEW_SOUND:
self._playing = False
# set the slider to finished
self._timeline_cursor_label.text = self._timeline_str(0)
self._timeline_slider.model.set_value(0.0)
if self._end_reason == EndReason.FINISHED or self._end_reason == EndReason.STOPPED:
self._set_play_button()
if self._end_reason == EndReason.FINISHED and self._settings.get_as_bool(PERSISTENT_SETTINGS_PREFIX + "/audio/context/closeAudioPlayerOnStop"):
self._window.visible = False
def _play_clicked(self): # pragma: no cover
if self._loading:
return
if self._playing:
if self._paused:
self._player.unpause_sound()
self._set_pause_button()
self._paused = False
else:
self._player.pause_sound()
self._set_play_button()
self._paused = True
return
self._playing = True
self._paused = False
self._load_result_label.text = ""
self._loading_ticker()
self._set_pause_button()
self._play_sound(self._timeline_slider.model.get_value_as_float() * self._sound_length)
def _file_field_end_edit(self, value):
self._loading = True
self._new_file = True
self._load_result_label.text = ""
self._loading_ticker()
self._stop_clicked()
self._player.load_sound(self._file_field.model.get_value_as_string())
def _stop_clicked(self): # pragma: no cover
if self._loading:
return
self._end_reason = EndReason.STOPPED
self._player.stop_sound()
self._playing = False
self._paused = False
def _slider_end_edit(self, value): # pragma: no cover
if self._loading:
return
if not self._playing:
return
self._set_play_cursor(value * self._sound_length)
def _slider_changed(self, value): # pragma: no cover
if not self._playing and not self._loading:
self._timeline_cursor_label.text = self._timeline_str(value * self._sound_length)
def open_window(self):
"""
Make the window become visible
Args:
No arguments
Returns:
No return value
"""
self._window.visible = True
def open_window_and_play(self, path): # pragma: no cover
"""
Make the window become visible then begin playing a file
Args:
path: The file to begin playing
Returns:
No return value
"""
self._playing = True
self._loading = True;
self._paused = False
self._new_file = True
self._window.visible = True
self._load_result_label.text = ""
self._loading_ticker()
self._set_pause_button()
self._end_reason = EndReason.QUEUED_NEW_SOUND
self._file_field.model.set_value(path)
self._play_sound(0.0)
def _menu_callback(self, a, b):
self._window.visible = not self._window.visible
def _on_menu_click(self, menu, value): # pragma: no cover
if self._content_window is None:
return
protocol = self._content_window.get_selected_icon_protocol()
path = self._content_window.get_selected_icon_path()
if not path.startswith(protocol):
path = protocol + path
self.open_window_and_play(path)
def _on_menu_check(self, url):
return not not re.search("^.*\\.(wav|wave|ogg|oga|flac|fla|mp3|m4a|spx|opus|adpcm)$", url)
def _on_browser_click(self, menu, value): # pragma: no cover
if self._content_browser is None:
return
# protocol = self._content_browser.get_selected_icon_protocol()
# path = self._content_browser.get_selected_icon_path()
# if not path.startswith(protocol):
# path = protocol + path
self.open_window_and_play(value)
def _on_content_browser_load(self): # pragma: no cover
import omni.kit.window.content_browser
self._content_browser = omni.kit.window.content_browser.get_content_window()
if self._content_browser is not None:
self._content_browser_entry = self._content_browser.add_context_menu(
"Play Audio", "audio_play.svg", self._on_browser_click, self._on_menu_check
)
def _on_content_browser_unload(self): # pragma: no cover
if self._content_browser is not None:
self._content_browser.delete_context_menu("Play Audio")
self._content_browser_entry = None
self._content_browser = None
def _on_player_event(self, event):
if event.type == int(omni.audioplayer.CallbackType.LOADED):
success = event.payload["success"]
self._file_loaded(success)
elif event.type == int(omni.audioplayer.CallbackType.ENDED):
self._play_finished()
else:
print("unrecognized type " + str(event.type))
def on_startup(self):
self._content_browser = None
self._hooks = []
manager = omni.kit.app.get_app().get_extension_manager()
# current content window
self._hooks.append(
manager.subscribe_to_extension_enable(
on_enable_fn=lambda _: self._on_content_browser_load(),
on_disable_fn=lambda _: self._on_content_browser_unload(),
ext_name="omni.kit.window.content_browser",
hook_name="omni.kit.window.audioplayer omni.kit.window.content_browser listener",
)
)
self._loading_counter = 0
self._ticker = None
self._loading = False
self._end_reason = EndReason.FINISHED
self._new_file = True
self._sound_length = 0
self._timeline_slider_scale = 0
self._file = ""
self._playing = False
self._paused = False
self._player = omni.audioplayer.create_audio_player()
self._sub = self._player.get_event_stream().create_subscription_to_pop(self._on_player_event)
self._window = omni.ui.Window("Audio Player", width=600, height=200)
self._settings = carb.settings.get_settings()
self._settings.set_default_bool(PERSISTENT_SETTINGS_PREFIX + "/audio/context/closeAudioPlayerOnStop", False)
with self._window.frame:
with omni.ui.VStack(height=0, spacing=8):
# file dialogue
with omni.ui.HStack():
omni.ui.Button(
width=32,
height=32,
clicked_fn=self._choose_file_clicked,
style={"image_url": "resources/glyphs/folder.svg"},
)
self._file_field_model = AudioPlayerWindowExtension.FieldModel(self._file_field_end_edit)
self._file_field = omni.ui.StringField(self._file_field_model, height=32)
# timeline slider
with omni.ui.HStack(height=64):
self._timeline_cursor_label = omni.ui.Label("0:00", width=25)
omni.ui.Label(" / ", width=10)
self._timeline_end_label = omni.ui.Label("0:00", width=25)
self._timeline_slider_model = AudioPlayerWindowExtension.SliderModel(
self._slider_changed, self._slider_end_edit
)
with omni.ui.ZStack():
self._waveform_image_provider = omni.ui.ByteImageProvider()
self._waveform_image = omni.ui.ImageWithProvider(
self._waveform_image_provider,
width=omni.ui.Percent(100),
height=omni.ui.Percent(100),
fill_policy=omni.ui.IwpFillPolicy.IWP_STRETCH,
)
with omni.ui.VStack():
omni.ui.Spacer()
self._timeline_slider = omni.ui.FloatSlider(
self._timeline_slider_model,
height=0,
style={
"color": 0x00FFFFFF,
"background_color": 0x00000000,
"draw_mode": omni.ui.SliderDrawMode.HANDLE,
"font_size": 22,
},
)
omni.ui.Spacer()
# buttons
with omni.ui.HStack():
with omni.ui.ZStack():
omni.ui.Spacer()
self._load_result_label = omni.ui.Label(
"", alignment=omni.ui.Alignment.CENTER, style={"color": 0xFF0000FF}
)
self._play_button = omni.ui.Button(
width=32,
height=32,
clicked_fn=self._play_clicked,
style={"image_url": "resources/glyphs/timeline_play.svg"},
)
omni.ui.Button(
width=32,
height=32,
clicked_fn=self._stop_clicked,
style={"image_url": "resources/glyphs/timeline_stop.svg"},
)
with omni.ui.ZStack():
omni.ui.Spacer()
self._loading_label = omni.ui.Label("", alignment=omni.ui.Alignment.CENTER)
with omni.ui.HStack(alignment=omni.ui.Alignment.LEFT, width=100):
omni.ui.Label("Close on Stop", alignment=omni.ui.Alignment.LEFT)
omni.ui.Spacer()
self._auto_close_on_stop = omni.ui.CheckBox(alignment=omni.ui.Alignment.LEFT)
omni.ui.Spacer()
self._auto_close_on_stop.model.set_value(
self._settings.get_as_bool(PERSISTENT_SETTINGS_PREFIX + "/audio/context/closeAudioPlayerOnStop")
)
self._dict = carb.dictionary.get_dictionary()
self._auto_close_on_stop.model.add_value_changed_fn(
lambda a, b=self._settings: b.set_bool(
PERSISTENT_SETTINGS_PREFIX + "/audio/context/closeAudioPlayerOnStop", a.get_value_as_bool()
)
)
def on_change(item, event_type): # pragma: no cover
self._auto_close_on_stop.model.set_value(self._dict.get(item))
self._subscription = self._settings.subscribe_to_node_change_events(
PERSISTENT_SETTINGS_PREFIX + "/audio/context/closeAudioPlayerOnStop", on_change
)
# add a callback to open the window
# FIXME: disabled until the bugs are worked out
self._menuEntry = omni.kit.ui.get_editor_menu().add_item("Window/Audio Player", self._menu_callback)
self._window.visible = False
def on_shutdown(self): # pragma: no cover
self._end_reason = EndReason.STOPPED
self._player.stop_sound()
if self._ticker != None:
self._ticker.cancel()
self._settings.unsubscribe_to_change_events(self._subscription)
self._subscription = None
# run the unload function to avoid breaking the extension when it reloads
self._on_content_browser_unload()
# remove the subscription before the player to avoid events with a dead player
self._sub = None
self._player = None
self._window = None
self._menuEntry = None
self._content_window_entry = None
| 19,473 | Python | 37.259332 | 179 | 0.559185 |
omniverse-code/kit/exts/omni.kit.window.audioplayer/omni/kit/window/audioplayer/tests/test_audio_player.py | ## Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
##
## NVIDIA CORPORATION and its licensors retain all intellectual property
## and proprietary rights in and to this software, related documentation
## and any modifications thereto. Any use, reproduction, disclosure or
## distribution of this software and related documentation without an express
## license agreement from NVIDIA CORPORATION is strictly prohibited.
##
import omni.kit.app
import omni.kit.test
import omni.kit.ui_test
import omni.ui as ui
import omni.usd
import omni.timeline
import carb.tokens
import omni.usd.audio
from omni.ui.tests.test_base import OmniUiTest
import pathlib
import asyncio;
class TestAudioPlayerWindow(OmniUiTest): # pragma: no cover
async def _dock_window(self):
await self.docked_test_window(
window=self._win.window,
width=600,
height=200)
def _dump_ui_tree(self, root):
print("DUMP UI TREE START")
#windows = omni.ui.Workspace.get_windows()
#children = [windows[0].frame]
children = [root.frame]
print(str(dir(root.frame)))
def recurse(children, path=""):
for c in children:
name = path + "/" + type(c).__name__
print(name)
if isinstance(c, omni.ui.ComboBox):
print(str(dir(c)))
recurse(omni.ui.Inspector.get_children(c), name)
recurse(children)
print("DUMP UI TREE END")
async def setUp(self):
await super().setUp()
extension_path = carb.tokens.get_tokens_interface().resolve("${omni.kit.window.audioplayer}")
self._test_path = pathlib.Path(extension_path).joinpath("data").joinpath("tests").absolute()
self._golden_img_dir = self._test_path.joinpath("golden")
# open the dropdown
window_menu = omni.kit.ui_test.get_menubar().find_menu("Window")
self.assertIsNotNone(window_menu)
await window_menu.click()
# click the audioplayer option to open it
player_menu = omni.kit.ui_test.get_menubar().find_menu("Audio Player")
self.assertIsNotNone(player_menu)
await player_menu.click()
self._win = omni.kit.ui_test.find("Audio Player")
self.assertIsNotNone(self._win)
self._file_name_textbox = self._win.find("**/StringField[*]")
self.assertIsNotNone(self._file_name_textbox)
async def _test_just_opened(self):
await self._dock_window()
await self.finalize_test(golden_img_dir=self._golden_img_dir, golden_img_name="test_just_opened.png")
async def _test_load_file(self):
await self._file_name_textbox.click()
await self._file_name_textbox.input(str(self._test_path / "1hz.oga"))
await asyncio.sleep(1.0)
# delete the text in the textbox so we'll have something constant
# for the image comparison
await self._file_name_textbox.double_click()
await omni.kit.ui_test.emulate_keyboard_press(carb.input.KeyboardInput.DEL)
await self._dock_window()
await self.finalize_test(golden_img_dir=self._golden_img_dir, golden_img_name="test_open_file.png")
async def test_all(self):
await self._test_just_opened()
await self._test_load_file()
self._dump_ui_tree(self._win.window)
| 3,359 | Python | 34.368421 | 109 | 0.649598 |
omniverse-code/kit/exts/omni.kit.property.geometry/omni/kit/property/geometry/scripts/geometry_properties.py | import os
import carb
import omni.ext
from functools import partial
from pathlib import Path
from pxr import Sdf, Usd, UsdGeom, UsdUI
from typing import Any, Callable
from omni.kit.property.usd.prim_selection_payload import PrimSelectionPayload
_extension_instance = None
TEST_DATA_PATH = ""
def get_instance():
global _extension_instance
return _extension_instance
class GeometryPropertyExtension(omni.ext.IExt):
def __init__(self):
self._registered = False
self._button_menu_entry = []
self._visual_property_widget = None
super().__init__()
def on_startup(self, ext_id):
global _extension_instance
_extension_instance = self
self._register_widget()
manager = omni.kit.app.get_app().get_extension_manager()
extension_path = manager.get_extension_path(ext_id)
global TEST_DATA_PATH
TEST_DATA_PATH = Path(extension_path).joinpath("data").joinpath("tests")
# +add menu item(s)
from omni.kit.property.usd import PrimPathWidget
context_menu = omni.kit.context_menu.get_instance()
if context_menu is None:
carb.log_error("context_menu is disabled!") # pragma: no cover
return None # pragma: no cover
self._button_menu_entry.append(
PrimPathWidget.add_button_menu_entry(
"Instanceable",
show_fn=context_menu.is_prim_selected,
onclick_fn=self._click_toggle_instanceable,
)
)
self._button_menu_entry.append(
PrimPathWidget.add_button_menu_entry(
"Rendering/Toggle Wireframe Mode",
name_fn=partial(self._get_primvar_state, prim_name="wireframe", text_name=" Wireframe Mode"),
show_fn=partial(context_menu.prim_is_type, type=UsdGeom.Boundable),
onclick_fn=partial(self._click_set_primvar, prim_name="wireframe"),
)
)
self._button_menu_entry.append(
PrimPathWidget.add_button_menu_entry(
"Rendering/Toggle Do Not Cast Shadows",
name_fn=partial(
self._get_primvar_state, prim_name="doNotCastShadows", text_name=" Do Not Cast Shadows"
),
show_fn=partial(context_menu.prim_is_type, type=UsdGeom.Boundable),
onclick_fn=partial(self._click_set_primvar, prim_name="doNotCastShadows"),
)
)
self._button_menu_entry.append(
PrimPathWidget.add_button_menu_entry(
"Rendering/Toggle Enable Shadow Terminator Fix",
name_fn=partial(
self._get_primvar_state, prim_name="enableShadowTerminatorFix", text_name=" Enable Shadow Terminator Fix"
),
show_fn=partial(context_menu.prim_is_type, type=UsdGeom.Boundable),
onclick_fn=partial(self._click_set_primvar, prim_name="enableShadowTerminatorFix"),
)
)
self._button_menu_entry.append(
PrimPathWidget.add_button_menu_entry(
"Rendering/Toggle Enable Fast Refraction Shadow",
name_fn=partial(
self._get_primvar_state, prim_name="enableFastRefractionShadow", text_name=" Enable Fast Refraction Shadow"
),
show_fn=partial(context_menu.prim_is_type, type=UsdGeom.Boundable),
onclick_fn=partial(self._click_set_primvar, prim_name="enableFastRefractionShadow"),
)
)
self._button_menu_entry.append(
PrimPathWidget.add_button_menu_entry(
"Rendering/Toggle Disable RT SSS Transmission",
name_fn=partial(
self._get_primvar_state, prim_name="disableRtSssTransmission", text_name=" Disable RT SSS Transmission"
),
show_fn=partial(context_menu.prim_is_type, type=UsdGeom.Boundable),
onclick_fn=partial(self._click_set_primvar, prim_name="disableRtSssTransmission"),
)
)
self._button_menu_entry.append(
PrimPathWidget.add_button_menu_entry(
"Multimatted ID:",
name_fn=partial(
self._get_primvar_state, prim_name="multimatte_id", text_name=" ID for multimatte"
),
show_fn=partial(context_menu.prim_is_type, type=UsdGeom.Boundable),
onclick_fn=partial(self._click_set_primvar, prim_name="multimatte_id"),
)
)
self._button_menu_entry.append(
PrimPathWidget.add_button_menu_entry(
"Rendering/Toggle Enable Holdout Object",
name_fn=partial(
self._get_primvar_state, prim_name="holdoutObject", text_name=" Enable Holdout Object"
),
show_fn=partial(context_menu.prim_is_type, type=UsdGeom.Boundable),
onclick_fn=partial(self._click_set_primvar, prim_name="holdoutObject"),
)
)
self._button_menu_entry.append(
PrimPathWidget.add_button_menu_entry(
"Rendering/Toggle Invisible To Secondary Rays",
name_fn=partial(
self._get_primvar_state, prim_name="invisibleToSecondaryRays", text_name=" Invisible To Secondary Rays"
),
show_fn=partial(context_menu.prim_is_type, type=UsdGeom.Boundable),
onclick_fn=partial(self._click_set_primvar, prim_name="invisibleToSecondaryRays"),
)
)
self._button_menu_entry.append(
PrimPathWidget.add_button_menu_entry(
"Rendering/Toggle Is Procedural Volume",
name_fn=partial(
self._get_primvar_state, prim_name="isVolume", text_name=" Is Volume"
),
show_fn=partial(context_menu.prim_is_type, type=UsdGeom.Boundable),
onclick_fn=partial(self._click_set_primvar, prim_name="isVolume"),
)
)
self._button_menu_entry.append(
PrimPathWidget.add_button_menu_entry(
"Rendering/Toggle Matte Object",
name_fn=partial(self._get_primvar_state, prim_name="isMatteObject", text_name=" Matte Object"),
show_fn=partial(context_menu.prim_is_type, type=UsdGeom.Boundable),
onclick_fn=partial(self._click_set_primvar, prim_name="isMatteObject"),
)
)
self._button_menu_entry.append(
PrimPathWidget.add_button_menu_entry(
"Rendering/Toggle Hide From Camera",
name_fn=partial(
self._get_primvar_state, prim_name="hideForCamera", text_name=" Hide From Camera"
),
show_fn=[partial(context_menu.prim_is_type, type=UsdGeom.Boundable)],
onclick_fn=partial(self._click_set_primvar, prim_name="hideForCamera"),
)
)
self._button_menu_entry.append(
PrimPathWidget.add_button_menu_entry(
"Rendering/Toggle Is Light",
name_fn=partial(self._get_primvar_state, prim_name="isLight", text_name=" Is Light"),
show_fn=[partial(context_menu.prim_is_type, type=UsdGeom.Boundable)],
onclick_fn=partial(self._click_set_primvar, prim_name="isLight"),
)
)
def on_shutdown(self): # pragma: no cover
if self._registered:
self._unregister_widget()
# release menu item(s)
from omni.kit.property.usd import PrimPathWidget
for item in self._button_menu_entry:
PrimPathWidget.remove_button_menu_entry(item)
global _extension_instance
_extension_instance = None
def register_custom_visual_attribute(self,
attribute_name: str,
display_name: str,
type_name: str,
default_value: Any,
predicate: Callable[[Any], bool] = None):
"""
Add custom attribute with placeholder.
"""
if self._visual_property_widget:
self._visual_property_widget.add_custom_attribute(
attribute_name,
display_name,
type_name,
default_value,
predicate
)
def deregister_custom_visual_attribute(self, attribute_name: str):
if self._visual_property_widget:
self._visual_property_widget.remove_custom_attribute(attribute_name)
def _register_widget(self):
import omni.kit.window.property as p
from .prim_kind_widget import PrimKindWidget
from .prim_geometry_widget import GeometrySchemaAttributesWidget, ImageableSchemaAttributesWidget
w = p.get_window()
if w:
w.register_widget(
"prim",
"geometry",
GeometrySchemaAttributesWidget(
"Geometry",
UsdGeom.Xformable,
[
UsdGeom.BasisCurves,
UsdGeom.Capsule,
UsdGeom.Cone,
UsdGeom.Cube,
UsdGeom.Cylinder,
UsdGeom.HermiteCurves,
UsdGeom.Mesh,
UsdGeom.NurbsCurves,
UsdGeom.NurbsPatch,
UsdGeom.PointInstancer,
UsdGeom.Points,
UsdGeom.Subset,
UsdGeom.Sphere,
UsdGeom.Xform,
UsdGeom.Gprim,
UsdGeom.PointBased,
UsdGeom.Boundable,
UsdGeom.Curves,
UsdGeom.Imageable,
UsdGeom.PointBased,
UsdGeom.Subset,
UsdGeom.ModelAPI,
UsdGeom.MotionAPI,
UsdGeom.PrimvarsAPI,
UsdGeom.XformCommonAPI,
UsdGeom.ModelAPI,
UsdUI.Backdrop,
UsdUI.NodeGraphNodeAPI,
UsdUI.SceneGraphPrimAPI,
],
[
"proceduralMesh:parameterCheck",
"outputs:parameterCheck",
"refinementEnableOverride",
"refinementLevel",
"primvars:doNotCastShadows",
"primvars:enableShadowTerminatorFix",
"primvars:enableFastRefractionShadow",
"primvars:disableRtSssTransmission",
"primvars:holdoutObject",
"primvars:invisibleToSecondaryRays",
"primvars:isMatteObject",
"primvars:isVolume",
"primvars:multimatte_id",
"primvars:numSplits",
"primvars:endcaps",
UsdGeom.Tokens.proxyPrim,
],
[
"primvars:displayColor",
"primvars:displayOpacity",
"doubleSided",
"purpose",
"visibility",
"xformOpOrder",
],
),
)
self._visual_property_widget = ImageableSchemaAttributesWidget(
"Visual",
UsdGeom.Imageable,
[],
["primvars:displayColor", "primvars:displayOpacity", "doubleSided", "singleSided"],
[]
)
w.register_widget(
"prim",
"geometry_imageable",
self._visual_property_widget,
)
w.register_widget("prim", "kind", PrimKindWidget())
self._registered = True
def _unregister_widget(self): # pragma: no cover
import omni.kit.window.property as p
w = p.get_window()
if w:
w.unregister_widget("prim", "geometry")
w.unregister_widget("prim", "geometry_imageable")
w.unregister_widget("prim", "kind")
self._registered = False
def _click_set_primvar(self, payload: PrimSelectionPayload, prim_name: str):
stage = payload.get_stage()
if not stage:
return
omni.kit.commands.execute("TogglePrimVarCommand", prim_path=payload.get_paths(), prim_name=prim_name)
def _get_primvar_state(self, objects: dict, prim_name: str, text_prefix: str = "", text_name: str = "") -> str:
if not "stage" in objects or not "prim_list" in objects or not objects["stage"]:
return None
stage = objects["stage"]
primvar_state = []
for path in objects["prim_list"]:
prim = stage.GetPrimAtPath(path) if isinstance(path, Sdf.Path) else path
if prim:
primvars_api = UsdGeom.PrimvarsAPI(prim)
is_primvar = primvars_api.GetPrimvar(prim_name)
if is_primvar:
primvar_state.append(is_primvar.Get())
else:
primvar_state.append(False)
if primvar_state == [False] * len(primvar_state):
return f"{text_prefix}Set{text_name}"
elif primvar_state == [True] * len(primvar_state):
return f"{text_prefix}Clear{text_name}"
return f"{text_prefix}Toggle{text_name}"
def _click_toggle_instanceable(self, payload: PrimSelectionPayload):
omni.kit.commands.execute("ToggleInstanceableCommand", prim_path=payload.get_paths())
| 14,059 | Python | 41.477341 | 127 | 0.532968 |
omniverse-code/kit/exts/omni.kit.property.geometry/omni/kit/property/geometry/scripts/prim_geometry_widget.py | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import omni.ui as ui
import omni.usd
from dataclasses import dataclass, field
from typing import Any, Callable, OrderedDict, List
from omni.kit.property.usd.usd_property_widget import MultiSchemaPropertiesWidget, UsdPropertyUiEntry
from omni.kit.property.usd.usd_property_widget import create_primspec_bool, create_primspec_int
from omni.kit.property.usd.custom_layout_helper import CustomLayoutFrame, CustomLayoutGroup, CustomLayoutProperty
from pxr import Kind, Sdf, Usd, UsdGeom
class GeometrySchemaAttributesWidget(MultiSchemaPropertiesWidget):
def __init__(self, title: str, schema, schema_subclasses: list, include_list: list = [], exclude_list: list = []):
"""
Constructor.
Args:
title (str): Title of the widgets on the Collapsable Frame.
schema: The USD IsA schema or applied API schema to filter attributes.
schema_subclasses (list): list of subclasses
include_list (list): list of additional schema named to add
exclude_list (list): list of additional schema named to remove
"""
super().__init__(title, schema, schema_subclasses, include_list, exclude_list)
# custom attributes
self.add_custom_schema_attribute("primvars:enableFastRefractionShadow", lambda p: p.IsA(UsdGeom.Gprim), None, "", create_primspec_bool(False))
self.add_custom_schema_attribute("primvars:doNotCastShadows", lambda p: p.IsA(UsdGeom.Gprim), None, "", create_primspec_bool(False))
self.add_custom_schema_attribute("primvars:enableShadowTerminatorFix", lambda p: p.IsA(UsdGeom.Gprim), None, "", create_primspec_bool(True))
self.add_custom_schema_attribute("primvars:holdoutObject", lambda p: p.IsA(UsdGeom.Gprim), None, "", create_primspec_bool(False))
self.add_custom_schema_attribute("primvars:invisibleToSecondaryRays", lambda p: p.IsA(UsdGeom.Gprim), None, "", create_primspec_bool(False))
self.add_custom_schema_attribute("primvars:isMatteObject", lambda p: p.IsA(UsdGeom.Gprim), None, "", create_primspec_bool(False))
self.add_custom_schema_attribute("primvars:isVolume", lambda p: p.IsA(UsdGeom.Gprim), None, "", create_primspec_bool(False))
self.add_custom_schema_attribute("primvars:multimatte_id", lambda p: p.IsA(UsdGeom.Gprim), None, "", create_primspec_int(-1))
self.add_custom_schema_attribute("primvars:disableRtSssTransmission", lambda p: p.IsA(UsdGeom.Gprim), None, "", create_primspec_bool(False))
self.add_custom_schema_attribute("primvars:numSplitsOverride", lambda p: p.IsA(UsdGeom.BasisCurves), None, "", create_primspec_bool(False))
self.add_custom_schema_attribute("primvars:numSplits", lambda p: p.IsA(UsdGeom.BasisCurves), None, "", create_primspec_int(2))
self.add_custom_schema_attribute("primvars:endcaps", lambda p: p.IsA(UsdGeom.BasisCurves), None, "", create_primspec_int(1))
self.add_custom_schema_attribute("refinementEnableOverride", self._is_prim_refinement_level_supported, None, "", create_primspec_bool(False))
self.add_custom_schema_attribute("refinementLevel", self._is_prim_refinement_level_supported, None, "", create_primspec_int(0))
def on_new_payload(self, payload):
"""
See PropertyWidget.on_new_payload
"""
self._add_curves = False
self._add_points = False
if not super().on_new_payload(payload):
return False
if not self._payload or len(self._payload) == 0:
return False
used = []
for prim_path in self._payload:
prim = self._get_prim(prim_path)
if not prim or not prim.IsA(self._schema):
return False
used += [attr for attr in prim.GetProperties() if attr.GetName() in self._schema_attr_names and not attr.IsHidden()]
if (prim.IsA(UsdGeom.BasisCurves)):
self._add_curves = True
if (prim.IsA(UsdGeom.Points)):
self._add_points = True
if self.is_custom_schema_attribute_used(prim):
used.append(None)
return used
def _is_prim_refinement_level_supported(self, prim):
return (
prim.IsA(UsdGeom.Mesh)
or prim.IsA(UsdGeom.Cylinder)
or prim.IsA(UsdGeom.Capsule)
or prim.IsA(UsdGeom.Cone)
or prim.IsA(UsdGeom.Sphere)
or prim.IsA(UsdGeom.Cube)
)
def _is_prim_single_sided_supported(self, prim):
return (
prim.IsA(UsdGeom.Mesh)
or prim.IsA(UsdGeom.Cylinder)
or prim.IsA(UsdGeom.Capsule)
or prim.IsA(UsdGeom.Cone)
or prim.IsA(UsdGeom.Sphere)
or prim.IsA(UsdGeom.Cube)
)
def _customize_props_layout(self, attrs):
self.add_custom_schema_attributes_to_props(attrs)
frame = CustomLayoutFrame(hide_extra=False)
with frame:
def update_bounds(stage, prim_paths):
timeline = omni.timeline.get_timeline_interface()
current_time = timeline.get_current_time()
current_time_code = Usd.TimeCode(
omni.usd.get_frame_time_code(current_time, stage.GetTimeCodesPerSecond())
)
for path in prim_paths:
prim = stage.GetPrimAtPath(path)
attr = prim.GetAttribute("extent") if prim else None
if prim and attr:
bounds = UsdGeom.Boundable.ComputeExtentFromPlugins(UsdGeom.Boundable(prim), current_time_code)
attr.Set(bounds)
def build_extent_func(
stage,
attr_name,
metadata,
property_type,
prim_paths: List[Sdf.Path],
additional_label_kwargs={},
additional_widget_kwargs={},
):
from omni.kit.window.property.templates import HORIZONTAL_SPACING
from omni.kit.property.usd.usd_attribute_model import UsdAttributeModel
from omni.kit.property.usd.usd_property_widget import UsdPropertiesWidgetBuilder
from omni.kit.property.usd.widgets import ICON_PATH
if not attr_name or not property_type:
return
def value_changed_func(model, widget):
val = model.get_value_as_string()
widget.set_tooltip(val)
with ui.HStack(spacing=HORIZONTAL_SPACING):
model = UsdAttributeModel(stage, [path.AppendProperty(attr_name) for path in prim_paths], False, metadata)
UsdPropertiesWidgetBuilder._create_label(attr_name, metadata, additional_label_kwargs)
kwargs = {
"name": "models_readonly",
"model": model,
"enabled": False,
"tooltip": model.get_value_as_string(),
}
if additional_widget_kwargs:
kwargs.update(additional_widget_kwargs)
with ui.ZStack():
value_widget = ui.StringField(**kwargs)
mixed_overlay = UsdPropertiesWidgetBuilder._create_mixed_text_overlay()
ui.Spacer(width=0)
with ui.VStack(width=8):
ui.Spacer()
ui.Image(
f"{ICON_PATH}/Default value.svg",
width=5.5,
height=5.5,
)
ui.Spacer()
model.add_value_changed_fn(lambda m, w=value_widget: value_changed_func(m,w))
return model
def build_size_func(
stage,
attr_name,
metadata,
property_type,
prim_paths: List[Sdf.Path],
additional_label_kwargs={},
additional_widget_kwargs={},
):
from omni.kit.window.property.templates import HORIZONTAL_SPACING
from omni.kit.property.usd.usd_attribute_model import UsdAttributeModel
from omni.kit.property.usd.usd_property_widget import UsdPropertiesWidgetBuilder
from omni.kit.property.usd.widgets import ICON_PATH
if not attr_name or not property_type:
return
with ui.HStack(spacing=HORIZONTAL_SPACING):
model_kwargs = UsdPropertiesWidgetBuilder._get_attr_value_range_kwargs(metadata)
model = UsdAttributeModel(
stage, [path.AppendProperty(attr_name) for path in prim_paths], False, metadata, **model_kwargs
)
UsdPropertiesWidgetBuilder._create_label(attr_name, metadata, additional_label_kwargs)
widget_kwargs = {"model": model}
widget_kwargs.update(UsdPropertiesWidgetBuilder._get_attr_value_soft_range_kwargs(metadata))
if additional_widget_kwargs:
widget_kwargs.update(additional_widget_kwargs)
with ui.ZStack():
value_widget = UsdPropertiesWidgetBuilder._create_drag_or_slider(ui.FloatDrag, ui.FloatSlider, **widget_kwargs)
mixed_overlay = UsdPropertiesWidgetBuilder._create_mixed_text_overlay()
UsdPropertiesWidgetBuilder._create_control_state(value_widget=value_widget, mixed_overlay=mixed_overlay, **widget_kwargs)
model.add_value_changed_fn(lambda m, s=stage, p=prim_paths: update_bounds(s, p))
return model
def build_axis_func(
stage,
attr_name,
metadata,
property_type,
prim_paths: List[Sdf.Path],
additional_label_kwargs={},
additional_widget_kwargs={},
):
from omni.kit.window.property.templates import HORIZONTAL_SPACING
from omni.kit.property.usd.usd_attribute_model import TfTokenAttributeModel
from omni.kit.property.usd.usd_property_widget import UsdPropertiesWidgetBuilder
from omni.kit.property.usd.widgets import ICON_PATH
if not attr_name or not property_type:
return
with ui.HStack(spacing=HORIZONTAL_SPACING):
model = None
UsdPropertiesWidgetBuilder._create_label(attr_name, metadata, additional_label_kwargs)
tokens = metadata.get("allowedTokens")
if tokens is not None and len(tokens) > 0:
model = TfTokenAttributeModel(
stage, [path.AppendProperty(attr_name) for path in prim_paths], False, metadata
)
widget_kwargs = {"name": "choices"}
if additional_widget_kwargs:
widget_kwargs.update(additional_widget_kwargs)
with ui.ZStack():
value_widget = ui.ComboBox(model, **widget_kwargs)
mixed_overlay = UsdPropertiesWidgetBuilder._create_mixed_text_overlay()
else:
model = UsdAttributeModel(
stage, [path.AppendProperty(attr_name) for path in prim_paths], False, metadata
)
widget_kwargs = {"name": "models"}
if additional_widget_kwargs:
widget_kwargs.update(additional_widget_kwargs)
with ui.ZStack():
value_widget = ui.StringField(model, **widget_kwargs)
mixed_overlay = UsdPropertiesWidgetBuilder._create_mixed_text_overlay()
UsdPropertiesWidgetBuilder._create_control_state(
model=model, value_widget=value_widget, mixed_overlay=mixed_overlay, **widget_kwargs
)
model.add_item_changed_fn(lambda m, i, s=stage, p=prim_paths: update_bounds(s, p))
return model
def build_endcaps_func(
stage,
attr_name,
metadata,
property_type,
prim_paths: List[Sdf.Path],
additional_label_kwargs={},
additional_widget_kwargs={},
):
from omni.kit.window.property.templates import HORIZONTAL_SPACING
from omni.kit.property.usd.usd_attribute_model import TfTokenAttributeModel
from omni.kit.property.usd.usd_property_widget import UsdPropertiesWidgetBuilder
from omni.kit.property.usd.widgets import ICON_PATH
if not attr_name or not property_type:
return
with ui.HStack(spacing=HORIZONTAL_SPACING):
model = None
UsdPropertiesWidgetBuilder._create_label(attr_name, metadata, additional_label_kwargs)
class MyTfTokenAttributeModel(TfTokenAttributeModel):
allowed_tokens = ["open", "flat", "round"]
def _get_allowed_tokens(self, attr):
return self.allowed_tokens
def _get_value_from_index(self, value):
return value
def _update_value(self, force=False):
was_updating_value = self._updating_value
self._updating_value = True
if super(TfTokenAttributeModel, self)._update_value(force):
# TODO don't have to do this every time. Just needed when "allowedTokens" actually changed
self._update_allowed_token()
index = self._value if self._value < len(self._allowed_tokens) else -1
if index != -1 and self._current_index.as_int != index:
self._current_index.set_value(index)
self._item_changed(None)
self._updating_value = was_updating_value
model = MyTfTokenAttributeModel(
stage, [path.AppendProperty(attr_name) for path in prim_paths], False, metadata
)
widget_kwargs = {"name": "choices"}
if additional_widget_kwargs:
widget_kwargs.update(additional_widget_kwargs)
with ui.ZStack():
value_widget = ui.ComboBox(model, **widget_kwargs)
mixed_overlay = UsdPropertiesWidgetBuilder._create_mixed_text_overlay()
UsdPropertiesWidgetBuilder._create_control_state(
model=model, value_widget=value_widget, mixed_overlay=mixed_overlay, **widget_kwargs
)
return model
if self._add_curves:
with CustomLayoutGroup("Curve"):
CustomLayoutProperty("curveVertexCounts", "Per curve points")
CustomLayoutProperty("points", "Points")
CustomLayoutProperty("normals", "Normals")
CustomLayoutProperty("widths", "Widths")
CustomLayoutProperty("type", "Type")
CustomLayoutProperty("basis", "Basis")
CustomLayoutProperty("wrap", "Wrap")
CustomLayoutProperty("primvars:numSplitsOverride", "Number of BVH splits Override")
CustomLayoutProperty("primvars:numSplits", "Number of BVH splits")
CustomLayoutProperty("primvars:endcaps", "Endcaps", build_fn=build_endcaps_func)
if self._add_points:
with CustomLayoutGroup("Points"):
CustomLayoutProperty("points", "Points")
CustomLayoutProperty("normals", "Normals")
CustomLayoutProperty("widths", "Widths")
commonSectionName = "Mesh"
if self._add_curves or self._add_points:
commonSectionName = "Common"
with CustomLayoutGroup(commonSectionName):
CustomLayoutProperty("normals", "Normals")
CustomLayoutProperty("orientation", "Orientation")
CustomLayoutProperty("points", "Points")
CustomLayoutProperty("velocities", "Velocities")
CustomLayoutProperty("accelerations", "Accelerations")
CustomLayoutProperty("extent", "Extent", build_fn=build_extent_func)
CustomLayoutProperty("size", "Size", build_fn=build_size_func)
CustomLayoutProperty("radius", "Radius", build_fn=build_size_func)
CustomLayoutProperty("axis", "Axis", build_fn=build_axis_func)
CustomLayoutProperty("height", "Height", build_fn=build_size_func)
CustomLayoutProperty("polymesh:parameterCheck", "Parameter Check")
CustomLayoutProperty("primvars:doNotCastShadows", "Cast Shadows", build_fn=self._inverse_bool_builder)
CustomLayoutProperty("primvars:enableShadowTerminatorFix", "Shadow Terminator Fix")
CustomLayoutProperty("primvars:enableFastRefractionShadow", "Fast Refraction Shadow")
CustomLayoutProperty("primvars:disableRtSssTransmission", "Enable Rt SSS Transmission", build_fn=self._inverse_bool_builder)
CustomLayoutProperty("primvars:holdoutObject", "Holdout Object")
CustomLayoutProperty("primvars:invisibleToSecondaryRays", "Invisible To Secondary Rays")
CustomLayoutProperty("primvars:isMatteObject", "Matte Object")
CustomLayoutProperty("primvars:isVolme", "Is Volume")
CustomLayoutProperty("primvars:multimatte_id", "Multimatte ID")
with CustomLayoutGroup("Face"):
CustomLayoutProperty("faceVertexIndices", "Indices")
CustomLayoutProperty("faceVertexCounts", "Counts")
CustomLayoutProperty("faceVaryingLinearInterpolation", "Linear Interpolation")
CustomLayoutProperty("holeIndices", "Hole Indices")
with CustomLayoutGroup("Refinement"):
CustomLayoutProperty("refinementEnableOverride", "Refinement Override")
CustomLayoutProperty("refinementLevel", "Refinement Level")
CustomLayoutProperty("interpolateBoundary", "Interpolate Boundary")
CustomLayoutProperty("subdivisionScheme", "Subdivision Scheme")
CustomLayoutProperty("triangleSubdivisionRule", "Triangle SubdivisionRule")
with CustomLayoutGroup("Corner"):
CustomLayoutProperty("cornerIndices", "Indices")
CustomLayoutProperty("cornerSharpnesses", "Sharpnesses")
with CustomLayoutGroup("Crease"):
CustomLayoutProperty("creaseIndices", "Indices")
CustomLayoutProperty("creaseLengths", "Lengths")
CustomLayoutProperty("creaseSharpnesses", "Sharpnesses")
return frame.apply(attrs)
def get_additional_kwargs(self, ui_prop: UsdPropertyUiEntry):
"""
Override this function if you want to supply additional arguments when building the label or ui widget.
"""
additional_widget_kwargs = None
if ui_prop.prop_name == "refinementLevel":
additional_widget_kwargs = {"min": 0, "max": 5}
return None, additional_widget_kwargs
def _inverse_bool_builder(self,
stage,
attr_name,
metadata,
property_type,
prim_paths: List[Sdf.Path],
additional_label_kwargs={},
additional_widget_kwargs={}
):
import carb.settings
from omni.kit.window.property.templates import HORIZONTAL_SPACING
from omni.kit.property.usd.usd_attribute_model import UsdAttributeInvertedModel
from omni.kit.property.usd.usd_property_widget import UsdPropertiesWidgetBuilder
if not attr_name or not property_type:
return
with ui.HStack(spacing=HORIZONTAL_SPACING):
model = UsdAttributeInvertedModel(stage, [path.AppendProperty(attr_name) for path in prim_paths], False, metadata)
settings = carb.settings.get_settings()
left_aligned = settings.get("ext/omni.kit.window.property/checkboxAlignment") == "left"
if not left_aligned:
if not additional_label_kwargs:
additional_label_kwargs = {}
additional_label_kwargs["width"] = 0
UsdPropertiesWidgetBuilder._create_label(attr_name, metadata, additional_label_kwargs)
if not left_aligned:
ui.Spacer(width=10)
ui.Line(style={"color": 0x338A8777}, width=ui.Fraction(1))
ui.Spacer(width=5)
with ui.VStack(width=10):
ui.Spacer()
widget_kwargs = {"width": 10, "height": 0, "name": "greenCheck", "model": model}
if additional_widget_kwargs:
widget_kwargs.update(additional_widget_kwargs)
with ui.ZStack():
with ui.Placer(offset_x=0, offset_y=-2):
value_widget = ui.CheckBox(**widget_kwargs)
with ui.Placer(offset_x=1, offset_y=-1):
mixed_overlay = ui.Rectangle(
height=8, width=8, name="mixed_overlay", alignment=ui.Alignment.CENTER, visible=False
)
ui.Spacer()
if left_aligned:
ui.Spacer(width=5)
ui.Line(style={"color": 0x338A8777}, width=ui.Fraction(1))
UsdPropertiesWidgetBuilder._create_control_state(value_widget=value_widget, mixed_overlay=mixed_overlay, **widget_kwargs)
return model
@dataclass(frozen=True)
class CustomAttributeInfo:
schema_name: str
display_name: str
type_name: str
default_value: Any
predicate: Callable[[Any], bool] = None
def is_supported(self, prim):
return self.predicate is None or self.predicate(prim)
def get_metadata(self):
return {Sdf.PrimSpec.TypeNameKey: self.type_name, "customData": {"default": self.default_value}}
class ImageableSchemaAttributesWidget(MultiSchemaPropertiesWidget):
def __init__(self, title: str, schema, schema_subclasses: list, include_list: list = [], exclude_list: list = []):
"""
Constructor.
Args:
title (str): Title of the widgets on the Collapsable Frame.
schema: The USD IsA schema or applied API schema to filter attributes.
schema_subclasses (list): list of subclasses
include_list (list): list of additional schema named to add
exclude_list (list): list of additional schema named to remove
"""
super().__init__(title, schema, schema_subclasses, include_list, exclude_list)
self._custom_attributes: OrderedDict[str, CustomAttributeInfo] = OrderedDict()
self._custom_placeholders: List[str] = []
# custom attributes
self.add_custom_schema_attribute("singleSided", self._is_prim_single_sided_supported, None, "", create_primspec_bool(False))
def on_new_payload(self, payload):
"""
See PropertyWidget.on_new_payload
"""
self._custom_placeholders.clear()
if not super().on_new_payload(payload):
return False
if not self._payload or len(self._payload) == 0:
return False
used = []
for prim_path in self._payload:
prim = self._get_prim(prim_path)
if not prim or not prim.IsA(self._schema):
return False
used += [attr for attr in prim.GetProperties() if attr.GetName() in self._schema_attr_names and not attr.IsHidden()]
for schema_name, attr_info in self._custom_attributes.items():
if attr_info.is_supported(prim) and not prim.GetAttribute(schema_name):
self._custom_placeholders.append(schema_name)
used.append(None)
if self.is_custom_schema_attribute_used(prim):
used.append(None)
return used
def add_custom_attribute(self,
attribute_name,
display_name,
type_name="bool",
default_value=False,
predicate: Callable[[Any], bool] = None):
"""
Add custom attribute with placeholder.
"""
self._schema_attr_base.add(attribute_name)
self._custom_attributes.update(
{attribute_name: CustomAttributeInfo(attribute_name, display_name, type_name, default_value, predicate)}
)
self.request_rebuild()
def remove_custom_attribute(self, attribute_name):
self._schema_attr_base.remove(attribute_name)
del self._custom_attributes[attribute_name]
self.request_rebuild()
def _is_prim_single_sided_supported(self, prim):
return (
prim.IsA(UsdGeom.Mesh)
or prim.IsA(UsdGeom.Cylinder)
or prim.IsA(UsdGeom.Capsule)
or prim.IsA(UsdGeom.Cone)
or prim.IsA(UsdGeom.Sphere)
or prim.IsA(UsdGeom.Cube)
)
def _customize_props_layout(self, attrs):
self.add_custom_schema_attributes_to_props(attrs)
for schema_name, attr_info in self._custom_attributes.items():
if schema_name in self._custom_placeholders:
attrs.append(
UsdPropertyUiEntry(
schema_name,
"",
attr_info.get_metadata(),
Usd.Attribute,
)
)
frame = CustomLayoutFrame(hide_extra=True)
with frame:
for schema_name, attr_info in self._custom_attributes.items():
CustomLayoutProperty(schema_name, attr_info.display_name)
# OMFP-1917: Most Visual settings under the Property tab don't work
# Hiding doubleSided, singleSided, primvars:displayColor, primvars:displayOpacity
CustomLayoutProperty("doubleSided", "Double Sided", hide_if_true=True)
CustomLayoutProperty("singleSided", "Single Sided", hide_if_true=True)
CustomLayoutProperty("purpose", "Purpose")
CustomLayoutProperty("visibility", "Visibility")
CustomLayoutProperty("primvars:displayColor", "Display Color")
CustomLayoutProperty("primvars:displayOpacity", "Display Opacity")
return frame.apply(attrs)
| 28,148 | Python | 49.627698 | 160 | 0.574996 |
omniverse-code/kit/exts/omni.kit.property.geometry/omni/kit/property/geometry/scripts/__init__.py | from .geometry_properties import *
from .geometry_commands import *
| 68 | Python | 21.999993 | 34 | 0.794118 |
omniverse-code/kit/exts/omni.kit.property.geometry/omni/kit/property/geometry/scripts/geometry_commands.py | import carb
import omni.kit.commands
from typing import List, Optional, Any
from pxr import Usd, Sdf, UsdGeom
class PrimVarCommand(omni.kit.commands.Command):
"""
Set primvar undoable **Command**.
Args:
prim_path (list): List of paths of prims.
prim_name (str): Primvar name.
prim_type (): Primvar variable type (EG. Sdf.ValueTypeNames.Bool)
value (any): New primvar value. If primvar doesn't exist, it will be created
"""
def __init__(
self,
prim_path: List[str],
prim_name: str,
prim_type: str,
value: Any,
usd_context_name: Optional[str] = "",
):
self._prim_path = prim_path
self._prim_name = prim_name
self._prim_type = prim_type
self._value = value
self._usd_context = omni.usd.get_context(usd_context_name)
self._undo_values = {}
def do(self):
stage = self._usd_context.get_stage()
for path in self._prim_path:
if path:
primvars_api = UsdGeom.PrimvarsAPI(stage.GetPrimAtPath(path))
value = primvars_api.GetPrimvar(self._prim_name)
if value:
if value.GetTypeName() != self._prim_type:
carb.log_error(f"PrimVarCommand: cannot set value as {path}.{self._prim_name} is type {value.GetTypeName()} and expected type is {self._prim_type}")
else:
self._undo_values[str(path)] = value.Get()
value.Set(self._value)
else:
self._undo_values[str(path)] = None
primvars_api.CreatePrimvar(self._prim_name, self._prim_type).Set(self._value)
def undo(self):
stage = self._usd_context.get_stage()
for path in self._undo_values.keys():
primvars_api = UsdGeom.PrimvarsAPI(stage.GetPrimAtPath(path))
value = primvars_api.GetPrimvar(self._prim_name)
orig_value = self._undo_values[path]
if orig_value:
value.Set(orig_value)
else:
primvars_api.RemovePrimvar(self._prim_name)
self._undo_values = {}
class TogglePrimVarCommand(omni.kit.commands.Command):
"""
Toggle primvar undoable **Command**.
Args:
prim_path (list): List of paths of prims.
prim_name (str): Primvar name.
"""
def __init__(
self,
prim_path: List[str],
prim_name: str,
usd_context_name: Optional[str] = "",
):
self._prim_path = prim_path
self._prim_name = prim_name
self._usd_context = omni.usd.get_context(usd_context_name)
self._undo_values = {}
def do(self):
stage = self._usd_context.get_stage()
for path in self._prim_path:
if path:
primvars_api = UsdGeom.PrimvarsAPI(stage.GetPrimAtPath(path))
value = primvars_api.GetPrimvar(self._prim_name)
if value:
if value.GetTypeName() != Sdf.ValueTypeNames.Bool:
carb.log_error(f"TogglePrimVarCommand: cannot set value as {value.GetTypeName()} isn't a {self._prim_type}")
else:
self._undo_values[str(path)] = value.Get()
value.Set(not value.Get())
else:
self._undo_values[path] = None
primvars_api.CreatePrimvar(self._prim_name, Sdf.ValueTypeNames.Bool).Set(True)
def undo(self):
stage = self._usd_context.get_stage()
for path in self._undo_values.keys():
primvars_api = UsdGeom.PrimvarsAPI(stage.GetPrimAtPath(path))
value = primvars_api.GetPrimvar(self._prim_name)
orig_value = self._undo_values[path]
if orig_value:
value.Set(orig_value)
else:
primvars_api.RemovePrimvar(self._prim_name)
self._undo_values = {}
class ToggleInstanceableCommand(omni.kit.commands.Command):
"""
Toggle instanceable undoable **Command**.
Args:
prim_path (list): List of paths of prims.
"""
def __init__(
self,
prim_path: List[str],
usd_context_name: Optional[str] = "",
):
self._prim_path = prim_path
self._usd_context = omni.usd.get_context(usd_context_name)
self._undo_values = {}
def do(self):
stage = self._usd_context.get_stage()
for path in self._prim_path:
if path:
prim = stage.GetPrimAtPath(path)
value = prim.IsInstanceable()
self._undo_values[str(path)] = value
prim.SetInstanceable(not value)
def undo(self):
stage = self._usd_context.get_stage()
for path in self._undo_values.keys():
prim = stage.GetPrimAtPath(path)
value = self._undo_values[path]
prim.SetInstanceable(value)
self._undo_values = {}
| 5,046 | Python | 33.101351 | 173 | 0.550139 |
omniverse-code/kit/exts/omni.kit.property.geometry/omni/kit/property/geometry/scripts/prim_kind_widget.py | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import carb
import omni.ui as ui
import omni.usd
import carb
from omni.kit.window.property.templates import SimplePropertyWidget, LABEL_WIDTH, LABEL_HEIGHT, HORIZONTAL_SPACING
from omni.kit.property.usd.usd_property_widget import UsdPropertiesWidgetBuilder, UsdPropertiesWidget
from omni.kit.property.usd.usd_object_model import MetadataObjectModel
from pxr import Kind, Usd, UsdGeom
class Constant:
def __setattr__(self, name, value):
raise Exception(f"Can't change Constant.{name}") # pragma: no cover
FONT_SIZE = 14.0
MIXED = "Mixed"
MIXED_COLOR = 0xFFCC9E61
class PrimKindWidget(UsdPropertiesWidget):
def __init__(self):
super().__init__(title="Kind", collapsed=False)
self._metadata_model = None
def on_new_payload(self, payload):
"""
See PropertyWidget.on_new_payload
"""
if not super().on_new_payload(payload): # pragma: no cover
return False # pragma: no cover
if len(self._payload) == 0:
return False
for prim_path in self._payload: # pragma: no cover
prim = self._get_prim(prim_path) # pragma: no cover
if not prim or not prim.IsA(UsdGeom.Imageable): # pragma: no cover
return False
return True
def reset(self):
super().reset()
if self._metadata_model:
self._metadata_model.clean()
self._metadata_model = None
def build_items(self):
super().build_items()
# get Kinds
all_kinds = Kind.Registry.GetAllKinds()
all_kinds.insert(0, "")
# http://graphics.pixar.com/usd/docs/USD-Glossary.html#USDGlossary-Kind
# "model" is considered an abstract type and should not be assigned as any prim's kind.
all_kinds.remove(Kind.Tokens.model)
kind = None
ambiguous = False
stage = self._payload.get_stage()
for path in self._payload:
prim = stage.GetPrimAtPath(path)
if prim:
prim_kind = Usd.ModelAPI(prim).GetKind()
if kind == None:
kind = prim_kind
elif kind != prim_kind:
kind = "mixed"
if prim_kind not in all_kinds: # pragma: no cover
all_kinds.append(prim_kind) # pragma: no cover
carb.log_verbose(f"{path} has invalid Kind:{prim_kind}") # pragma: no cover
if kind == None: # pragma: no cover
return # pragma: no cover
if self._filter.matches("Kind"):
self._any_item_visible = True
highlight = self._filter.name
with ui.HStack(spacing=HORIZONTAL_SPACING):
UsdPropertiesWidgetBuilder._create_label("Kind", {}, {"highlight": highlight})
with ui.ZStack():
self._metadata_model = MetadataObjectModel(
stage, [path for path in self._payload], False, {}, key="kind", default="", options=all_kinds
)
value_widget = ui.ComboBox(self._metadata_model, name="choices")
mixed_overlay = UsdPropertiesWidgetBuilder._create_mixed_text_overlay()
UsdPropertiesWidgetBuilder._create_control_state(self._metadata_model, value_widget, mixed_overlay)
def _get_shared_properties_from_selected_prims(self, anchor_prim):
return None
def _get_prim(self, prim_path):
if prim_path:
stage = self._payload.get_stage()
if stage:
return stage.GetPrimAtPath(prim_path)
return None # pragma: no cover
| 4,198 | Python | 37.172727 | 117 | 0.596951 |
omniverse-code/kit/exts/omni.kit.property.geometry/omni/kit/property/geometry/tests/test_path_toggle.py | ## Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
##
## NVIDIA CORPORATION and its licensors retain all intellectual property
## and proprietary rights in and to this software, related documentation
## and any modifications thereto. Any use, reproduction, disclosure or
## distribution of this software and related documentation without an express
## license agreement from NVIDIA CORPORATION is strictly prohibited.
##
import omni.kit.test
import os
import omni.kit.app
import omni.usd
from omni.kit.test.async_unittest import AsyncTestCase
from omni.kit import ui_test
from pxr import Gf
from omni.kit.test_suite.helpers import open_stage, get_test_data_path, select_prims, wait_stage_loading, arrange_windows
class PropertyPathAddMenu(AsyncTestCase):
# Before running each test
async def setUp(self):
await arrange_windows("Stage", 64)
await open_stage(get_test_data_path(__name__, "geometry_test.usda"))
# After running each test
async def tearDown(self):
await wait_stage_loading()
async def test_property_path_rendering(self):
await ui_test.find("Property").focus()
usd_context = omni.usd.get_context()
stage = usd_context.get_stage()
# select cube
await select_prims(["/World/Cube"])
await ui_test.human_delay()
# verify not set
prim = stage.GetPrimAtPath("/World/Cube")
attr = prim.GetAttribute("primvars:wireframe")
self.assertFalse(attr.IsValid())
# click "Add"
add_widget = [w for w in ui_test.find_all("Property//Frame/**/Button[*].identifier==''") if w.widget.text.endswith("Add")][0]
await add_widget.click()
# select wireframe
await ui_test.select_context_menu("Rendering/Set Wireframe Mode")
# verify set
self.assertTrue(attr.IsValid())
self.assertTrue(attr.Get())
# undo
omni.kit.undo.undo()
# verify not set
self.assertFalse(attr.IsValid())
| 2,006 | Python | 33.016949 | 133 | 0.678465 |
omniverse-code/kit/exts/omni.kit.property.geometry/omni/kit/property/geometry/tests/__init__.py | from .test_geometry import *
from .test_commands import *
from .test_path_toggle import *
| 90 | Python | 21.749995 | 31 | 0.755556 |
omniverse-code/kit/exts/omni.kit.property.geometry/omni/kit/property/geometry/tests/test_commands.py | ## Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
##
## NVIDIA CORPORATION and its licensors retain all intellectual property
## and proprietary rights in and to this software, related documentation
## and any modifications thereto. Any use, reproduction, disclosure or
## distribution of this software and related documentation without an express
## license agreement from NVIDIA CORPORATION is strictly prohibited.
##
import pathlib
import omni.kit.app
import omni.kit.commands
import omni.kit.test
import omni.ui as ui
from omni.ui.tests.test_base import OmniUiTest
from omni.kit.test_suite.helpers import open_stage, get_test_data_path
from omni.kit import ui_test
from pxr import Sdf
class TestCommandWidget(OmniUiTest):
# Before running each test
async def setUp(self):
await super().setUp()
await open_stage(get_test_data_path(__name__, "geometry_test.usda"))
# After running each test
async def tearDown(self):
await super().tearDown()
async def test_command_prim_var(self):
stage = omni.usd.get_context().get_stage()
prim = stage.GetPrimAtPath("/World/Cube")
attr = prim.GetAttribute('primvars:test_int')
self.assertFalse(attr.IsValid())
# create primvar as int
omni.kit.commands.execute("PrimVarCommand", prim_path=["/World/Cube"], prim_name="test_int", prim_type=Sdf.ValueTypeNames.Int, value=123456)
attr = prim.GetAttribute('primvars:test_int')
self.assertTrue(attr.IsValid())
self.assertEqual(attr.Get(), 123456)
# try and change using bool
omni.kit.commands.execute("PrimVarCommand", prim_path=["/World/Cube"], prim_name="test_int", prim_type=Sdf.ValueTypeNames.Bool, value=True)
attr = prim.GetAttribute('primvars:test_int')
self.assertTrue(attr.IsValid())
self.assertEqual(attr.Get(), 123456)
# change primvar
omni.kit.commands.execute("PrimVarCommand", prim_path=["/World/Cube"], prim_name="test_int", prim_type=Sdf.ValueTypeNames.Int, value=654321)
attr = prim.GetAttribute('primvars:test_int')
self.assertTrue(attr.IsValid())
self.assertEqual(attr.Get(), 654321)
# undo
omni.kit.undo.undo()
omni.kit.undo.undo()
omni.kit.undo.undo()
# verify undo removed primvar
attr = prim.GetAttribute('primvars:test_int')
self.assertFalse(attr.IsValid())
# create primvar as bool
omni.kit.commands.execute("PrimVarCommand", prim_path=["/World/Cube"], prim_name="test_bool", prim_type=Sdf.ValueTypeNames.Bool, value=True)
attr = prim.GetAttribute('primvars:test_bool')
self.assertTrue(attr.IsValid())
self.assertEqual(attr.Get(), True)
# try and change using int
omni.kit.commands.execute("PrimVarCommand", prim_path=["/World/Cube"], prim_name="test_bool", prim_type=Sdf.ValueTypeNames.Int, value=123456)
attr = prim.GetAttribute('primvars:test_bool')
self.assertTrue(attr.IsValid())
self.assertEqual(attr.Get(), True)
# change primvar
omni.kit.commands.execute("PrimVarCommand", prim_path=["/World/Cube"], prim_name="test_bool", prim_type=Sdf.ValueTypeNames.Bool, value=False)
attr = prim.GetAttribute('primvars:test_bool')
self.assertTrue(attr.IsValid())
self.assertEqual(attr.Get(), False)
# undo
omni.kit.undo.undo()
omni.kit.undo.undo()
omni.kit.undo.undo()
# verify undo removed primvar
attr = prim.GetAttribute('primvars:test_bool')
self.assertFalse(attr.IsValid())
async def test_command_toggle_prim_var(self):
stage = omni.usd.get_context().get_stage()
prim = stage.GetPrimAtPath("/World/Cube")
attr = prim.GetAttribute('primvars:test_bool')
self.assertFalse(attr.IsValid())
# create primvar as bool
omni.kit.commands.execute("TogglePrimVarCommand", prim_path=["/World/Cube"], prim_name="test_bool")
attr = prim.GetAttribute('primvars:test_bool')
self.assertTrue(attr.IsValid())
self.assertEqual(attr.Get(), True)
# try and change using int
omni.kit.commands.execute("PrimVarCommand", prim_path=["/World/Cube"], prim_name="test_bool", prim_type=Sdf.ValueTypeNames.Int, value=123456)
attr = prim.GetAttribute('primvars:test_bool')
self.assertTrue(attr.IsValid())
self.assertEqual(attr.Get(), True)
# change primvar
omni.kit.commands.execute("TogglePrimVarCommand", prim_path=["/World/Cube"], prim_name="test_bool")
attr = prim.GetAttribute('primvars:test_bool')
self.assertTrue(attr.IsValid())
self.assertEqual(attr.Get(), False)
# undo
omni.kit.undo.undo()
omni.kit.undo.undo()
omni.kit.undo.undo()
# verify undo removed primvar
attr = prim.GetAttribute('primvars:test_bool')
self.assertFalse(attr.IsValid())
async def test_command_toggle_instanceable(self):
stage = omni.usd.get_context().get_stage()
prim = stage.GetPrimAtPath("/World/Cube")
self.assertFalse(prim.IsInstanceable())
# toggle instanceable
omni.kit.commands.execute("ToggleInstanceableCommand", prim_path=["/World/Cube"])
self.assertTrue(prim.IsInstanceable())
# toggle instanceable
omni.kit.commands.execute("ToggleInstanceableCommand", prim_path=["/World/Cube"])
self.assertFalse(prim.IsInstanceable())
# undo
omni.kit.undo.undo()
omni.kit.undo.undo()
omni.kit.undo.undo()
# verify undo
self.assertFalse(prim.IsInstanceable())
| 5,718 | Python | 38.171233 | 149 | 0.659671 |
omniverse-code/kit/exts/omni.kit.property.geometry/docs/CHANGELOG.md | # Changelog
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
## [1.2.2] - 2022-09-27
### Changes
- Changed primvars:numSplits* text
## [1.2.1] - 2022-05-13
### Changes
- Cleaned up ImageWithProvider vs Image usage
## [1.2.0] - 2021-05-31
### Added
- Added extent regeneration on size/radius/axis changes
## [1.1.0] - 2021-03-19
### Added
- Added soft range [0, 5] for refinementLevel.
## [1.0.7] - 2021-02-19
### Changes
- Added UI test
## [1.0.6] - 2020-12-09
### Changes
- Added extension icon
- Added readme
- Updated preview image
## [1.0.5] - 2020-11-20
### Changes
- Silenced unknown kind warning
## [1.0.4] - 2020-11-06
### Changes
- Update Kind to use metadata model
## [1.0.3] - 2020-10-27
### Changes
- Fixed spacing on kind widget
## [1.0.2] - 2020-10-22
### Changes
- Improved layout
## [1.0.1] - 2020-10-22
### Changes
- Moved schema into bundle
## [1.0.0] - 2020-10-05
### Changes
- Сreated
| 950 | Markdown | 16.611111 | 80 | 0.636842 |
omniverse-code/kit/exts/omni.kit.property.geometry/docs/README.md | # omni.kit.property.geometry
## Introduction
Property window extensions are for viewing and editing Usd Prim Attributes
## This extension supports editing of these Usd Types;
- UsdGeom.BasisCurves
- UsdGeom.Capsule
- UsdGeom.Cone
- UsdGeom.Cube
- UsdGeom.Cylinder
- UsdGeom.HermiteCurves
- UsdGeom.Mesh
- UsdGeom.NurbsCurves
- UsdGeom.NurbsPatch
- UsdGeom.PointInstancer
- UsdGeom.Points
- UsdGeom.Subset
- UsdGeom.Sphere
- UsdGeom.Xform
- UsdGeom.Gprim
- UsdGeom.PointBased
- UsdGeom.Boundable
- UsdGeom.Curves
- UsdGeom.Imageable
- UsdGeom.PointBased
- UsdUI.Backdrop
### and supports editing of these Usd APIs;
- UsdGeom.ModelAPI
- UsdGeom.MotionAPI
- UsdGeom.PrimvarsAPI
- UsdGeom.XformCommonAPI
- UsdGeom.ModelAPI
- UsdUI.NodeGraphNodeAPI
- UsdUI.SceneGraphPrimAPI
| 777 | Markdown | 17.975609 | 74 | 0.788932 |
omniverse-code/kit/exts/omni.kit.property.geometry/docs/index.rst | omni.kit.property.geometry
###########################
Property Geometry Values
.. toctree::
:maxdepth: 1
CHANGELOG
| 127 | reStructuredText | 9.666666 | 27 | 0.551181 |
omniverse-code/kit/exts/omni.kit.viewport.menubar.display/omni/kit/viewport/menubar/display/style.py | from pathlib import Path
CURRENT_PATH = Path(__file__).parent
ICON_PATH = CURRENT_PATH.parent.parent.parent.parent.parent.joinpath("data").joinpath("icons")
UI_STYLE = {"Menu.Item.Icon::Display": {"image_url": f"{ICON_PATH}/viewport_visibility.svg"}}
| 253 | Python | 35.285709 | 94 | 0.727273 |
omniverse-code/kit/exts/omni.kit.viewport.menubar.display/omni/kit/viewport/menubar/display/extension.py | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ["ViewportDisplayMenuBarExtension", "get_instance"]
from typing import Union
from omni.kit.viewport.menubar.core import BaseCategoryItem
from .display_menu_container import DEFAULT_SECTION, DisplayMenuContainer
import omni.ext
_extension_instance = None
def get_instance():
global _extension_instance
return _extension_instance
class ViewportDisplayMenuBarExtension(omni.ext.IExt):
"""The Entry Point for the Display Settings in Viewport Menu Bar"""
def on_startup(self, ext_id):
self._display_menu = DisplayMenuContainer()
global _extension_instance
_extension_instance = self
def on_shutdown(self):
self._display_menu.destroy()
self._display_menu = None
global _extension_instance
_extension_instance = None
def register_custom_setting(self, text: str, setting_path: str):
"""
Register custom display setting.
Args:
text (str): Text shown in menu item.
setting_path (str): Setting path for custom display setting (bool value).
"""
if self._display_menu:
self._display_menu.register_custom_setting(text, setting_path)
def deregister_custom_setting(self, text: str):
"""
Deregister custom display setting.
Args:
text (str): Text shown in menu item.
"""
if self._display_menu:
self._display_menu.deregister_custom_setting(text)
def register_custom_category_item(self, category: str, item: BaseCategoryItem, section: str = DEFAULT_SECTION):
"""
Register custom display setting in category.
Args:
category (str): Category to add menu item. Can be an existing category e.g. "Heads Up Display" or a new one.
item (item: BaseCategoryItem): Item to append.
section (str): Optional section to organise category, default no section.
"""
if self._display_menu:
self._display_menu.register_custom_category_item(category, item, section)
def deregister_custom_category_item(self, category: str, item: BaseCategoryItem):
"""
Deregister custom display setting in category.
Args:
category (str): Category to remove menu item. Can be an existing category e.g. "Heads Up Display" or a new one.
item (item: BaseCategoryItem): Item to remove.
"""
if self._display_menu:
self._display_menu.deregister_custom_category_item(category, item)
| 2,971 | Python | 36.15 | 123 | 0.672164 |
omniverse-code/kit/exts/omni.kit.viewport.menubar.display/omni/kit/viewport/menubar/display/model.py | import omni.ui as ui
class DisplayLayerModel(ui.SimpleBoolModel):
def __init__(self, layer) -> None:
self._layer = layer
super().__init__()
def get_value_as_bool(self) -> bool:
return self._layer.visible
def set_value(self, visible: bool):
if visible != self._layer.visible:
self._layer.visible = visible
self._value_changed()
def begin_edit(self) -> None:
pass
def end_edit(self) -> None:
pass
| 493 | Python | 21.454544 | 44 | 0.56998 |
omniverse-code/kit/exts/omni.kit.viewport.menubar.display/omni/kit/viewport/menubar/display/display_menu_container.py | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ["DisplayMenuContainer"]
from omni.kit.viewport.menubar.core import (
IconMenuDelegate,
SettingModel,
ViewportMenuContainer,
CategoryMenuContainer,
SelectableMenuItem,
SimpleCategoryModel,
CategoryStateItem,
BaseCategoryItem,
CategoryCustomItem,
CategoryCollectionItem
)
from .style import UI_STYLE
import carb
import carb.settings
import omni.ui as ui
import omni.kit.app
import omni.usd
from functools import partial
from typing import Dict, List, Optional
SHOW_BY_TYPE_EXCLUDE_LIST = "/exts/omni.kit.viewport.menubar.display/showByType/exclude_list"
HEADS_UP_CATEGORY_NAME = "Heads Up Display"
SHOW_BY_TYPE_CATEGORY_NAME = "Show By Type"
SHOW_BY_PURPOSE_CATEGORY_NAME = "Show By Purpose"
DEFAULT_CATEGORIES = [HEADS_UP_CATEGORY_NAME, SHOW_BY_TYPE_CATEGORY_NAME, SHOW_BY_PURPOSE_CATEGORY_NAME]
DEFAULT_SECTION = "default"
def _make_viewport_setting(viewport_api_id: str, setting: str):
return f"/persistent/app/viewport/{viewport_api_id}/{setting}/visible"
class DisplayMenuContainer(ViewportMenuContainer):
"""The menu with the visibility settings"""
def __init__(self):
super().__init__(
name="Display",
delegate=IconMenuDelegate("Display"),
visible_setting_path="/exts/omni.kit.viewport.menubar.display/visible",
order_setting_path="/exts/omni.kit.viewport.menubar.display/order",
style=UI_STYLE
)
self._root_menu: Optional[ui.Menu] = None
self._category_models: Dict[str, SimpleCategoryModel] = {}
self._custom_settings: List[List[str, str]] = []
self._custom_category_items: Dict[str, List[BaseCategoryItem]] = {}
self._section_categories: Dict[str, List[str]] = {}
self._section_categories[DEFAULT_SECTION] = DEFAULT_CATEGORIES[:] # Copy the default categories list
def destroy(self):
super().destroy()
def register_custom_setting(self, text: str, setting_path: str):
self._custom_settings.append((text, setting_path))
if self._root_menu:
self._root_menu.invalidate()
def deregister_custom_setting(self, text: str):
found = [item for item in self._custom_settings if item[0] == text]
if found:
for item in found:
self._custom_settings.remove(item)
if self._root_menu:
self._root_menu.invalidate()
def register_custom_category_item(self, category: str, item: BaseCategoryItem, section: str):
is_top_category = False
if category not in DEFAULT_CATEGORIES and category not in self._category_models:
if item.text == category and isinstance(item, CategoryCollectionItem):
self._category_models[category] = SimpleCategoryModel(category, root=item)
is_top_category = True
else:
self._category_models[category] = SimpleCategoryModel(category)
if category not in self._custom_category_items:
self._custom_category_items[category] = []
if section not in self._section_categories:
self._section_categories[section] = []
if not is_top_category:
self._custom_category_items[category].append(item)
if category not in self._section_categories[section]:
self._section_categories[section].append(category)
if self._root_menu:
self._root_menu.invalidate()
def deregister_custom_category_item(self, category: str, item: BaseCategoryItem):
if category in self._custom_category_items:
if item in self._custom_category_items[category]:
self._custom_category_items[category].remove(item)
if category not in DEFAULT_CATEGORIES:
if (item.text == category and isinstance(item, CategoryCollectionItem)) or len(self._custom_category_items[category]) == 0:
del self._category_models[category]
# Now clean up section
sections = list(self._section_categories.keys())
for section in sections:
if category in self._section_categories[section]:
self._section_categories[section].remove(category)
if len(self._section_categories[section]) == 0:
del self._section_categories[section]
if self._root_menu:
self._root_menu.invalidate()
def build_fn(self, viewport_context: dict):
self._root_menu = ui.Menu(self.name, delegate=self._delegate,
on_build_fn=partial(self._build_menu_items, viewport_context),
style=self._style)
def _build_menu_items(self, viewport_context: dict, *args, **kwargs):
viewport = viewport_context.get("viewport_api")
viewport_api_id: str = str(viewport.id)
settings = carb.settings.get_settings()
show_by_type_items: list[BaseCategoryItem] = [
CategoryStateItem("Cameras", setting_path=_make_viewport_setting(viewport_api_id, "scene/cameras")),
CategoryStateItem("Lights", setting_path=_make_viewport_setting(viewport_api_id, "scene/lights")),
CategoryStateItem("Skeletons", setting_path=_make_viewport_setting(viewport_api_id, "scene/skeletons")),
CategoryStateItem("Audio", setting_path=_make_viewport_setting(viewport_api_id, "scene/audio")),
]
if (exclude_list := settings.get(SHOW_BY_TYPE_EXCLUDE_LIST)):
show_by_type_items = [item for item in show_by_type_items if item.text not in exclude_list]
# 105.1: Support alternate label of memory (i.e. "Host Memory", "Process Memory", "Memory")
# Defaults to pre 105.1 label (Host Memory) when not specified
mem_label = settings.get("/exts/omni.kit.viewport.window/hud/hostMemory/label")
if mem_label is None:
mem_label = "Host"
default_category_models = {
HEADS_UP_CATEGORY_NAME: SimpleCategoryModel(
HEADS_UP_CATEGORY_NAME,
[
CategoryStateItem("FPS", setting_path=_make_viewport_setting(viewport_api_id, "hud/renderFPS")),
CategoryStateItem("Device Memory", setting_path=_make_viewport_setting(viewport_api_id, "hud/deviceMemory")),
CategoryStateItem(f"{mem_label} Memory", setting_path=_make_viewport_setting(viewport_api_id, "hud/hostMemory")),
CategoryStateItem("Resolution", setting_path=_make_viewport_setting(viewport_api_id, "hud/renderResolution")),
CategoryStateItem("Progress", setting_path=_make_viewport_setting(viewport_api_id, "hud/renderProgress")),
]
),
SHOW_BY_TYPE_CATEGORY_NAME: SimpleCategoryModel(
SHOW_BY_TYPE_CATEGORY_NAME,
show_by_type_items
),
SHOW_BY_PURPOSE_CATEGORY_NAME: SimpleCategoryModel(
SHOW_BY_PURPOSE_CATEGORY_NAME,
[
CategoryStateItem("Guide", setting_path="/persistent/app/hydra/displayPurpose/guide"),
CategoryStateItem("Proxy", setting_path="/persistent/app/hydra/displayPurpose/proxy"),
CategoryStateItem("Render", setting_path="/persistent/app/hydra/displayPurpose/render"),
]
)
}
self._category_models.update(default_category_models)
# XXX: These add_item calls currently must occur to add the separator!
self._category_models[SHOW_BY_TYPE_CATEGORY_NAME].add_item(CategoryCustomItem(
"Meshes",
lambda: SelectableMenuItem("Meshes",
SettingModel(setting_path=_make_viewport_setting(viewport_api_id, "scene/meshes")))
))
self._category_models[HEADS_UP_CATEGORY_NAME].add_item(CategoryCustomItem(
"Camera Speed",
lambda: SelectableMenuItem("Camera Speed",
SettingModel(_make_viewport_setting(viewport_api_id, "hud/cameraSpeed")))
))
identifier = "omni.kit.viewport.menubar.display"
# Create default section categories first
for name in self._section_categories[DEFAULT_SECTION]:
model = self._category_models[name]
if name in self._custom_category_items:
for item in self._custom_category_items[name]:
model.add_item(item)
# XXX: Workaround nested creation of these items not being able to trigger an action!
trigger_fns = None
if name == SHOW_BY_TYPE_CATEGORY_NAME:
icon_click_id = f"{identifier}.{name}.{name}" # Left-most check/mixed icon was toggled
trigger_fns = {
"Cameras": partial(self.__trigger_action, "toggle_camera_visibility", viewport_api=viewport),
"Lights": partial(self.__trigger_action, "toggle_light_visibility", viewport_api=viewport),
"Skeletons": partial(self.__trigger_action, "toggle_skeleton_visibility", viewport_api=viewport),
"Audio": partial(self.__trigger_action, "toggle_audio_visibility", viewport_api=viewport),
"Meshes": partial(self.__trigger_action, "toggle_mesh_visibility", viewport_api=viewport),
icon_click_id: partial(self.__trigger_action, "toggle_show_by_type_visibility", viewport_api=viewport),
}
CategoryMenuContainer(model, identifier=f"{identifier}.{name}", trigger_fns=trigger_fns)
# Now iterate named sections, with a separator for each.
for section, categories in self._section_categories.items():
if section is DEFAULT_SECTION:
continue
ui.Separator(text=section)
for name in categories:
model = self._category_models[name]
if name in self._custom_category_items:
for item in self._custom_category_items[name]:
model.add_item(item)
CategoryMenuContainer(model, identifier=f"{identifier}.{name}")
ui.Separator()
# This currently is just easier tied to legacy global setting
SelectableMenuItem("Selection Outline", SettingModel(_make_viewport_setting(viewport_api_id, "guide/selection")),
triggered_fn=partial(self.__trigger_action, "toggle_selection_hilight_visibility", viewport_api=viewport),
trigger_will_set_model=True
)
SelectableMenuItem("Axis", SettingModel(_make_viewport_setting(viewport_api_id, "guide/axis")),
triggered_fn=partial(self.__trigger_action, "toggle_axis_visibility", viewport_api=viewport),
trigger_will_set_model=True
)
SelectableMenuItem("Grid", SettingModel(_make_viewport_setting(viewport_api_id, "guide/grid")),
triggered_fn=partial(self.__trigger_action, "toggle_grid_visibility", viewport_api=viewport),
trigger_will_set_model=True
)
# Custom display settings
if self._custom_settings:
ui.Separator()
for (text, setting_path) in self._custom_settings:
SelectableMenuItem(text, SettingModel(setting_path))
def __trigger_action(self, action: str, *args, **kwargs):
import omni.kit.actions.core
action_registry = omni.kit.actions.core.get_action_registry()
if action_registry:
exc_action = action_registry.get_action("omni.kit.viewport.actions", action)
if exc_action:
exc_action.execute(*args, **kwargs)
else:
carb.log_error(f"Could not find action to run: '{action}'")
else:
carb.log_error(f"Could not get action_registry to run '{action}")
| 12,371 | Python | 47.140078 | 135 | 0.629941 |
omniverse-code/kit/exts/omni.kit.viewport.menubar.display/omni/kit/viewport/menubar/display/tests/test_ui.py | import omni.kit.test
from re import I
from omni.ui.tests.test_base import OmniUiTest
import omni.kit.ui_test as ui_test
from omni.kit.ui_test import Vec2
import omni.usd
import omni.kit.app
from pathlib import Path
import carb.input
import asyncio
import omni.ui as ui
from omni.kit.viewport.menubar.core import CategoryCollectionItem, CategoryStateItem, CategoryCustomItem, ViewportMenuDelegate, SelectableMenuItem
CURRENT_PATH = Path(__file__).parent
TEST_DATA_PATH = CURRENT_PATH.parent.parent.parent.parent.parent.parent.joinpath("data").joinpath("tests")
TEST_WIDTH, TEST_HEIGHT = 600, 400
TEST_SETTING_TRUE = "/exts/test/setting/true"
TEST_SETTING_FALSE = "/exts/test/setting/false"
class TestSettingMenuWindow(OmniUiTest):
async def setUp(self):
self._golden_img_dir = TEST_DATA_PATH.absolute().joinpath("golden_img").absolute()
await self.create_test_area(width=TEST_WIDTH, height=TEST_HEIGHT)
await omni.kit.app.get_app().next_update_async()
async def test_general(self):
await self._show_display_menu("menubar_display.png", None)
async def test_heads_up(self):
await self._show_display_menu("menubar_display_headsup.png", 86)
async def test_show_by_type(self):
await self._show_display_menu("menubar_display_show_type.png", 106)
async def test_show_by_purpose(self):
await self._show_display_menu("menubar_display_show_purpose.png", 126)
async def test_show_custom_menu_item(self):
inst = omni.kit.viewport.menubar.display.get_instance()
custom_collection_item = CategoryCollectionItem(
"Custom catetory",
[
CategoryStateItem("Custom Item", ui.SimpleBoolModel(True)),
]
)
inst.register_custom_category_item("Show By Type", custom_collection_item)
def _build_menu():
with ui.Menu("Physics", delegate=ViewportMenuDelegate()):
SelectableMenuItem("Joints", ui.SimpleBoolModel(True))
with ui.Menu("Colliders", delegate=ViewportMenuDelegate()):
SelectableMenuItem("None", ui.SimpleBoolModel(True))
SelectableMenuItem("Selected", ui.SimpleBoolModel(False))
SelectableMenuItem("All", ui.SimpleBoolModel(False))
ui.Separator()
SelectableMenuItem("Normals", ui.SimpleBoolModel(False))
physics_item = CategoryCustomItem("Physics", _build_menu)
inst.register_custom_category_item("Show By Type", physics_item)
settings = carb.settings.get_settings()
settings.set(TEST_SETTING_FALSE, False)
settings.set(TEST_SETTING_TRUE, True)
inst.register_custom_setting("test new setting (True)", TEST_SETTING_TRUE)
inst.register_custom_setting("test new setting (False)", TEST_SETTING_FALSE)
await omni.kit.app.get_app().next_update_async()
await self._show_display_menu("menubar_display_custom.png", 106)
inst.deregister_custom_category_item("Show By Type", custom_collection_item)
inst.deregister_custom_category_item("Show By Type", physics_item)
inst.deregister_custom_setting("test new setting (True)")
inst.deregister_custom_setting("test new setting (False)")
await omni.kit.app.get_app().next_update_async()
async def test_show_custom_category_and_section(self):
inst = omni.kit.viewport.menubar.display.get_instance()
category = "Draw Overlay"
section = "Selection Display"
did_shown_changed_callback = False
def on_shown(s):
print("on_shown: {s}")
nonlocal did_shown_changed_callback
did_shown_changed_callback = True
overlay_item = CategoryCollectionItem(
category,
[
CategoryCustomItem("Points", lambda: SelectableMenuItem("Points", model=ui.SimpleBoolModel())),
CategoryCustomItem("Normals", lambda: SelectableMenuItem("Normals", model=ui.SimpleBoolModel()))
],
shown_changed_fn=on_shown
)
inst.register_custom_category_item(category, overlay_item, section)
await omni.kit.app.get_app().next_update_async()
await self._show_display_menu("menubar_display_custom_category_and_section.png", 166)
self.assertTrue(did_shown_changed_callback)
inst.deregister_custom_category_item(category, overlay_item)
await omni.kit.app.get_app().next_update_async()
async def _show_display_menu(self, golden_img_name: str, y: int = None) -> None:
# Enable mouse input
app_window = omni.appwindow.get_default_app_window()
for device in [carb.input.DeviceType.MOUSE]:
app_window.set_input_blocking_state(device, None)
try:
await ui_test.emulate_mouse_move(Vec2(20, 46), human_delay_speed=4)
await ui_test.emulate_mouse_click()
if y is not None:
await ui_test.emulate_mouse_move(Vec2(20, y))
await self.finalize_test(golden_img_dir=self._golden_img_dir, golden_img_name=golden_img_name)
finally:
for i in range(3):
await omni.kit.app.get_app().next_update_async()
await ui_test.emulate_mouse_move(Vec2(300, 26))
await ui_test.emulate_mouse_click()
for i in range(3):
await omni.kit.app.get_app().next_update_async()
| 5,475 | Python | 39.865671 | 146 | 0.652603 |
omniverse-code/kit/exts/omni.kit.usdz_export/config/extension.toml | [package]
title = "USDZ Exporter"
description = "Packages assets into a USDZ archive."
authors = ["NVIDIA"]
version = "1.0.1"
changelog="docs/CHANGELOG.md"
preview_image = "data/preview.png"
readme = "docs/README.md"
#icon = "data/icon.png"
category = "Internal"
feature = true
[[python.module]]
name = "omni.kit.usdz_export"
[dependencies]
"omni.kit.pip_archive" = {}
"omni.ui" = {}
"omni.usd" = {}
"omni.usd.libs" = {}
"omni.kit.tool.collect" = {}
"omni.kit.window.file_exporter" = {}
# Additional python module with tests, to make them discoverable by test system.
[[python.module]]
name = "omni.kit.usdz_export.tests"
[[test]]
args = [
"--/app/asyncRendering=false",
"--/rtx/materialDb/syncLoads=true",
"--/omni.kit.plugin/syncUsdLoads=true",
"--/rtx/hydra/materialSyncLoads=true"
]
dependencies = [
"omni.kit.material.library",
] | 861 | TOML | 21.102564 | 80 | 0.671312 |
omniverse-code/kit/exts/omni.kit.usdz_export/omni/kit/usdz_export/extension_usdz.py | # Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from .layers_menu import layers_available
from .layers_menu import LayersMenu
import omni.ext
import omni.kit.app
class UsdzExportExtension(omni.ext.IExt):
def on_startup(self, ext_id):
# Setup a callback for the event
app = omni.kit.app.get_app_interface()
ext_manager = app.get_extension_manager()
self.__extensions_subscription = ext_manager.get_change_event_stream().create_subscription_to_pop(
self._on_event, name="omni.kit.usdz_export"
)
self.__layers_menu = None
self._on_event(None)
def _on_event(self, event):
# Create/destroy the menu in the Layers window
if self.__layers_menu:
if not layers_available():
self.__layers_menu.destroy()
self.__layers_menu = None
else:
if layers_available():
self.__layers_menu = LayersMenu()
def on_shutdown(self):
self.__extensions_subscription = None
if self.__layers_menu:
self.__layers_menu.destroy()
self.__layers_menu = None
| 1,530 | Python | 33.795454 | 106 | 0.65817 |
omniverse-code/kit/exts/omni.kit.usdz_export/omni/kit/usdz_export/layers_menu.py | # Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from .utils import is_extension_loaded, copy, list_folder_async
from pxr import Sdf, Usd
from pathlib import Path
from zipfile import ZipFile
from functools import partial
from typing import Callable, List
from omni.kit.window.file_exporter import get_file_exporter
from omni.kit.widget.prompt import PromptManager
import carb
import omni.kit.tool.collect as collect
import omni.usd
import asyncio
import tempfile
import os
import shutil
import omni.kit.app
import omni.kit.notification_manager as nm
def layers_available() -> bool:
"""Returns True if the extension "omni.kit.widget.layers" is loaded"""
return is_extension_loaded("omni.kit.widget.layers")
async def usdz_export(identifier, export_path):
try:
target_out = export_path
carb.log_info(f"Starting to export layer '{identifier}' to '{target_out}'")
prompt = PromptManager.post_simple_prompt("Please Wait", "Exporting to USDZ...", ok_button_info=None, modal=True)
# Waits for prompt to be shown
await omni.kit.app.get_app().next_update_async()
await omni.kit.app.get_app().next_update_async()
layer = Sdf.Layer.FindOrOpen(identifier)
if not layer:
message = f"Failed to export layer {identifier} as it does not exist."
carb.log_error(message)
nm.post_notification(message, status=nm.NotificationStatus.WARNING)
return
with tempfile.TemporaryDirectory() as tmp_path:
tmp_path = Path(tmp_path)
collect_path = tmp_path.joinpath("collected")
split_ext = os.path.splitext(identifier)
# Can't collect USDZ files because MDLs can't be resolved
if (split_ext[1] == '.usdz'):
input_usdz_temp_path = str(tmp_path.joinpath('temp_copy.usdz'))
await copy(identifier, str(input_usdz_temp_path))
with ZipFile(input_usdz_temp_path, 'r') as zip_ref:
zip_ref.extractall(str(tmp_path))
tmp_file_path = str(tmp_path.joinpath("main.usdc"))
layer.Export(tmp_file_path)
entry_layer_to_collect = tmp_file_path
elif not omni.usd.is_usd_writable_filetype(identifier) or identifier.startswith('anon'):
tmp_file_path = str(tmp_path.joinpath("main.usdc"))
layer.Export(tmp_file_path)
entry_layer_to_collect = tmp_file_path
else:
entry_layer_to_collect = identifier
collector = collect.Collector(entry_layer_to_collect, str(collect_path), flat_collection=True)
await collector.collect(None, None)
# must create USDZ locally because the UsdUtils package cannot handle omniverse:// URIs
absolute_paths, relative_paths = await list_folder_async(str(collect_path))
local_out_path = collect_path.joinpath("local_out.usdz")
# Create usdz package manually without using USD API as it cannot handle UDIM textures.
zip_writer = Usd.ZipFileWriter.CreateNew(str(local_out_path))
with zip_writer:
for absolute_path, relative_path in zip(absolute_paths, relative_paths):
url = omni.client.break_url(absolute_path)
absolute_path = url.path
# FIXME: omni.client will return windows path prefixed with '/'
if os.name == "nt" and absolute_path[0] == '/':
absolute_path = absolute_path[1:]
zip_writer.AddFile(absolute_path, relative_path)
await copy(str(local_out_path), target_out)
layer = None
zip_writer = None
finally:
prompt.visible = False
prompt = None
carb.log_info(f"Finished exporting layer '{identifier}' to '{target_out}'")
def export(objects):
"""Export the target layer to USDZ"""
def on_export(callback: Callable, flatten: bool, filename: str, dirname: str, extension: str = '', selections: List[str] = []):
nonlocal objects
path = f"{dirname}/{filename}{extension}"
item = objects["item"]
identifier = item().identifier
asyncio.ensure_future(usdz_export(identifier, path))
file_picker = get_file_exporter()
file_picker.show_window(
title="Export To USDZ",
export_button_label="Export",
export_handler=partial(on_export, None, False),
file_extension_types=[(".usdz", "Zipped package")]
)
class LayersMenu:
"""
When this object is alive, Layers 2.0 has an additional action
for exporting the layer to USDZ.
"""
def __init__(self):
import omni.kit.widget.layers as layers
self.__menu_subscription = layers.ContextMenu.add_menu(
[
{"name": ""},
{
"name": "Export USDZ",
"glyph": "menu_rename.svg",
"show_fn": [
layers.ContextMenu.is_layer_item,
layers.ContextMenu.is_not_missing_layer,
layers.ContextMenu.is_layer_not_locked_by_other,
layers.ContextMenu.is_layer_and_parent_unmuted
],
"onclick_fn": export,
}
]
)
def destroy(self):
"""Remove the menu from Layers 2.0"""
self.__menu_subscription = None
| 5,903 | Python | 38.891892 | 131 | 0.614942 |
omniverse-code/kit/exts/omni.kit.usdz_export/omni/kit/usdz_export/utils.py | # Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import os
import omni.kit.app
import traceback
import carb
import omni.client
import omni.client.utils as clientutils
def is_extension_loaded(extansion_name: str) -> bool:
"""
Returns True if the extension with the given name is loaded.
"""
def is_ext(id: str, extension_name: str) -> bool:
id_name = id.split("-")[0]
return id_name == extension_name
app = omni.kit.app.get_app_interface()
ext_manager = app.get_extension_manager()
extensions = ext_manager.get_extensions()
loaded = next((ext for ext in extensions if is_ext(ext["id"], extansion_name) and ext["enabled"]), None)
return not not loaded
async def copy(src_path: str, dest_path: str):
carb.log_info(f"Copying from {src_path} to {dest_path}...")
try:
result = await omni.client.copy_async(src_path, dest_path, omni.client.CopyBehavior.OVERWRITE)
if result != omni.client.Result.OK:
carb.log_error(f"Cannot copy from {src_path} to {dest_path}, error code: {result}.")
return False
else:
return True
except Exception as e:
traceback.print_exc()
carb.log_error(str(e))
return False
async def list_folder_async(folder_path):
def compute_absolute_path(base_path, is_base_path_folder, path, is_path_folder):
if is_base_path_folder and not base_path.endswith("/"):
base_path += "/"
if is_path_folder and not path.endswith("/"):
path += "/"
return clientutils.make_absolute_url_if_possible(base_path, path)
def remove_prefix(text, prefix):
if text.startswith(prefix):
return text[len(prefix) :]
return text
absolute_paths = []
relative_paths = []
result, entry = await omni.client.stat_async(folder_path)
if result == omni.client.Result.OK and entry.flags & omni.client.ItemFlags.CAN_HAVE_CHILDREN:
is_folder = True
else:
is_folder = False
folder_path = clientutils.make_file_url_if_possible(folder_path)
if not is_folder:
absolute_paths = [folder_path]
relative_paths = [os.path.basename(folder_path)]
else:
if not folder_path.endswith("/"):
folder_path += "/"
folder_queue = [folder_path]
while len(folder_queue) > 0:
folder = folder_queue.pop(0)
(result, entries) = await omni.client.list_async(folder)
if result != omni.client.Result.OK:
break
folders = set((e.relative_path for e in entries if e.flags & omni.client.ItemFlags.CAN_HAVE_CHILDREN))
for f in folders:
folder_queue.append(compute_absolute_path(folder, True, f, False))
files = set((e.relative_path for e in entries if not e.flags & omni.client.ItemFlags.CAN_HAVE_CHILDREN))
for file in files:
absolute_path = compute_absolute_path(folder, True, file, False)
absolute_paths.append(absolute_path)
relative_path = remove_prefix(absolute_path, folder_path[:-1])
relative_path = relative_path.replace("\\", "/")
if relative_path != "/" and relative_path.startswith("/"):
relative_path = relative_path[1:]
if len(relative_path) > 0:
relative_paths.append(relative_path)
return absolute_paths, relative_paths
| 3,859 | Python | 36.115384 | 116 | 0.63177 |
omniverse-code/kit/exts/omni.kit.usdz_export/omni/kit/usdz_export/tests/usdz_export_test.py | ## Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
##
## NVIDIA CORPORATION and its licensors retain all intellectual property
## and proprietary rights in and to this software, related documentation
## and any modifications thereto. Any use, reproduction, disclosure or
## distribution of this software and related documentation without an express
## license agreement from NVIDIA CORPORATION is strictly prohibited.
##
from omni.ui.tests.test_base import OmniUiTest
from pathlib import Path
from pxr import Usd
from pxr import UsdGeom
import carb
import omni.client
import omni.kit
import omni.usd
import os
import time
import unittest
from omni.kit.usdz_export import usdz_export
OMNI_SERVER = "omniverse://ov-test"
class TestUsdzExport(OmniUiTest):
def get_test_dir(self):
token = carb.tokens.get_tokens_interface()
data_dir = token.resolve("${data}")
return f"{data_dir}"
async def test_export_usdz_file(self):
usdz_size = 2600000
usdz_size_tc = 2675966
current_path = Path(__file__)
test_data_path = current_path.parent.parent.parent.parent.parent.joinpath("data")
test_stage_path = str(test_data_path.joinpath("test_stage").joinpath("scene.usd"))
test_dir = self.get_test_dir()
export_file_path = Path(test_dir).joinpath("out.usdz").resolve()
await usdz_export(test_stage_path, export_file_path.__str__())
self.assertTrue(os.path.isfile(export_file_path.__str__()), 'out.usdz does not exist')
size = os.stat(export_file_path).st_size
self.assertTrue(size >= usdz_size and size <= usdz_size_tc, f'File size mismatch, expected {usdz_size} but got {size}')
| 1,702 | Python | 36.844444 | 127 | 0.706228 |
omniverse-code/kit/exts/omni.kit.usdz_export/docs/CHANGELOG.md | # Changelog
## [1.0.1] - 2022-11-08
- Add "omni.kit.window.file_exporter" as dependency.
## [1.0.0] - 2022-08-18
- Initial extension.
| 137 | Markdown | 14.333332 | 52 | 0.635036 |
omniverse-code/kit/exts/omni.kit.usdz_export/docs/README.md | # USDZ Exporter [omni.kit.usdz_export]
Exports selected layer to a USDZ archive.
| 83 | Markdown | 15.799997 | 41 | 0.759036 |
omniverse-code/kit/fabric/include/carb/flatcache/IToken.h | // Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <carb/Interface.h>
#ifndef __CUDACC__
// InterfaceUtils.h provides carb::getCachedInterface and is not CUDA-compatible
#include <carb/InterfaceUtils.h>
#endif // __CUDACC__
// Set to empty macro when IToken::iToken static member is removed
#define FLATCACHE_ITOKEN_INIT \
const carb::flatcache::IToken* carb::flatcache::iToken = nullptr;
namespace carb
{
namespace flatcache
{
// TokenC are integer keys that identify paths to C-ABI interfaces
struct TokenC
{
uint64_t token;
// Note that in the name comparisons below we mask off USD's lifetime bit.
// For example, tokens created from the same string are considered equal even
// if one was created with finite lifetime and the other infinite lifetime.
constexpr bool operator<(const TokenC& other) const
{
return (token & ~1) < (other.token & ~1);
}
constexpr bool operator==(const TokenC& other) const
{
return (token & ~1) == (other.token & ~1);
}
constexpr bool operator!=(const TokenC& other) const
{
return (token & ~1) != (other.token & ~1);
}
};
static_assert(std::is_standard_layout<TokenC>::value, "Struct must be standard layout as it is used in C-ABI interfaces");
// We don't reference count the uninitialized (or empty) token, and we use
// this fact to avoid unnecessary dll calls to addRef()/removeRef(), for
// example during std::vector resize. To do this we need to check whether a
// token is uninitialized without the dll call getEmptyToken(), so we store
// its value here in a constant.
// We run automated test "IToken::getEmptyToken() dll call can be replaced with
// constant, kUninitializedToken" to ensure that this constant never
// changes.
static constexpr TokenC kUninitializedToken{0};
// C-ABI interface to pxr::TfToken
struct IToken
{
CARB_PLUGIN_INTERFACE("carb::flatcache::IToken", 0, 1);
TokenC (*getHandle)(const char* name);
const char* (*getText)(TokenC handle);
void (*addRef)(TokenC handle);
void (*removeRef)(TokenC handle);
TokenC (*getEmptyToken)();
uint64_t (*size)(TokenC handle);
};
// C++ wrapper for IToken
class Token
{
static carb::flatcache::IToken& sIToken();
public:
// DEPRECATED: keeping for binary compatibility
// Will be removed in October 2021 - @TODO set FLATCACHE_ITOKEN_INIT to empty macro when removed!
// Still safe to use if initialized in a given dll
static const carb::flatcache::IToken* iToken;
Token() : mHandle(kUninitializedToken)
{
}
Token(const char* string)
{
mHandle = sIToken().getHandle(string);
}
// Needs to be noexcept for std::vector::resize() to move instead of copy
~Token() noexcept
{
#ifndef __CUDACC__
if (mHandle != kUninitializedToken)
{
if (!carb::isFrameworkValid())
{
return;
}
// IToken can be nullptr durin exit process
if (auto iToken = carb::getCachedInterface<carb::flatcache::IToken>())
{
iToken->removeRef(mHandle);
}
}
#endif // __CUDACC__
}
// Copy constructor
Token(const Token& other) : mHandle(other.mHandle)
{
if (mHandle != kUninitializedToken)
{
sIToken().addRef(mHandle);
}
}
// Copy construct from integer
Token(TokenC token) : mHandle(token)
{
if (mHandle != kUninitializedToken)
{
sIToken().addRef(mHandle);
}
}
// Move constructor
// Needs to be noexcept for std::vector::resize() to move instead of copy
Token(Token&& other) noexcept
{
// We are moving the src handle so don't need to change its refcount
mHandle = other.mHandle;
// Make source invalid
other.mHandle = kUninitializedToken;
}
// Copy assignment
Token& operator=(const Token& other)
{
if (this != &other)
{
if (mHandle != kUninitializedToken)
{
sIToken().removeRef(mHandle);
}
mHandle = other.mHandle;
if (other.mHandle != kUninitializedToken)
{
sIToken().addRef(mHandle);
}
}
return *this;
}
// Move assignment
Token& operator=(Token&& other) noexcept
{
if (&other == this)
return *this;
// We are about to overwrite the dest handle, so decrease its refcount
if (mHandle != kUninitializedToken)
{
sIToken().removeRef(mHandle);
}
// We are moving the src handle so don't need to change its refcount
mHandle = other.mHandle;
other.mHandle = kUninitializedToken;
return *this;
}
const char* getText() const
{
return sIToken().getText(mHandle);
}
uint64_t size() const
{
return sIToken().size(mHandle);
}
std::string getString() const
{
return std::string(sIToken().getText(mHandle), sIToken().size(mHandle));
}
// Note that in the name comparisons below TokenC masks off USD's lifetime bit.
// In other words, tokens created from the same string are considered equal even
// if one was created with finite lifetime and the other infinite lifetime.
constexpr bool operator<(const Token& other) const
{
return mHandle < other.mHandle;
}
constexpr bool operator!=(const Token& other) const
{
return mHandle != other.mHandle;
}
constexpr bool operator==(const Token& other) const
{
return mHandle == other.mHandle;
}
constexpr operator TokenC() const
{
return mHandle;
}
private:
TokenC mHandle;
};
static_assert(std::is_standard_layout<Token>::value, "Token must be standard layout as it is used in C-ABI interfaces");
#ifndef __CUDACC__
inline carb::flatcache::IToken& Token::sIToken()
{
// Acquire carbonite interface on first use
carb::flatcache::IToken* iToken = carb::getCachedInterface<carb::flatcache::IToken>();
CARB_ASSERT(iToken);
return *iToken;
}
#endif // __CUDACC__
inline uint64_t swapByteOrder(uint64_t val)
{
#if !CARB_COMPILER_MSC
// Compilers other than MSVC tend to turn the following into a single instruction like bswap
val =
((val & 0xFF00000000000000u) >> 56u) |
((val & 0x00FF000000000000u) >> 40u) |
((val & 0x0000FF0000000000u) >> 24u) |
((val & 0x000000FF00000000u) >> 8u) |
((val & 0x00000000FF000000u) << 8u) |
((val & 0x0000000000FF0000u) << 24u) |
((val & 0x000000000000FF00u) << 40u) |
((val & 0x00000000000000FFu) << 56u);
#else
// MSVC does not currently optimize the above code, so we have to use an intrinsic to get bswap
val = _byteswap_uint64(val);
#endif
return val;
}
inline size_t hash(TokenC token)
{
size_t tokenWithoutMortalityBit = token.token & ~1;
// The following Hash function was chosen to match the one in pxr\base\tf\hash.h
// This is based on Knuth's multiplicative hash for integers. The
// constant is the closest prime to the binary expansion of the inverse
// golden ratio. The best way to produce a hash table bucket index from
// the result is to shift the result right, since the higher order bits
// have the most entropy. But since we can't know the number of buckets
// in a table that's using this, we just reverse the byte order instead,
// to get the highest entropy bits into the low-order bytes.
return swapByteOrder(tokenWithoutMortalityBit * 11400714819323198549ULL);
}
inline size_t hash(Token const& token)
{
return hash(TokenC(token));
}
}
}
namespace std
{
template <>
struct hash<carb::flatcache::Token>
{
std::size_t operator()(const carb::flatcache::Token& key) const
{
return carb::flatcache::hash(key);
}
};
template <>
class hash<carb::flatcache::TokenC>
{
public:
size_t operator()(const carb::flatcache::TokenC& key) const
{
return carb::flatcache::hash(key);
}
};
}
| 8,572 | C | 27.768456 | 122 | 0.640457 |
omniverse-code/kit/fabric/include/carb/flatcache/Defines.h | // Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
// Improved #define preprocessor directives that support compile-time checking for mispelled or missing
// directives. Basically, the same as #define MY_FEATURE 0/1, but with a bit more compile-time safety,
// and ease of use around mixing or combining boolean logic.
//
// Example usage:
// #define MY_FEATURE_A IN_USE
// #define MY_FEATURE_B NOT_IN_USE
// #define MY_FEATURE_C USE_IF( USING( MY_FEATURE_A ) && USING( MY_FEATURE_B ) )
// ...
// void doStuff()
// {
// #if USING( MY_FEATURE_C )
// doStuff_C();
// #else // #if USING( MY_FEATURE_C )
// doStuff_NotC();
// #endif // #if USING( MY_FEATURE_C )
// }
#define IN_USE &&
#define NOT_IN_USE &&!
#define USE_IF(X) &&((X)?1:0)&&
#define USING(X) (1 X 1)
#ifndef NDEBUG
#define DEVELOPMENT_BUILD IN_USE
#else // #ifndef NDEBUG
#define DEVELOPMENT_BUILD NOT_IN_USE
#endif // #ifndef NDEBUG
#ifdef _WIN32
#define WINDOWS_BUILD IN_USE
#define LINUX_BUILD NOT_IN_USE
#elif defined(__linux__) // #ifdef _WIN32
#define WINDOWS_BUILD NOT_IN_USE
#define LINUX_BUILD IN_USE
#else // #elif defined(__linux__) // #ifdef _WIN32
#error "Unsupported platform"
#endif
#define ASSERTS USE_IF( USING( DEVELOPMENT_BUILD ) )
| 1,630 | C | 29.203703 | 103 | 0.707975 |
omniverse-code/kit/fabric/include/carb/flatcache/WrapperImpl.h | // Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
// The purpose of this file is to implement the C++ classes StageInProgress,
// StageAtTime, StageAtTimeInterval and StageHistoryWindow by calling the
// carbonite C-ABI interfaces, IStageInProgress, IStageAtTime,
// IStageAtTimeWindow and IStageHistoryWindow.
//
//
#include "StageWithHistory.h"
#include <carb/InterfaceUtils.h>
#include <carb/logging/Log.h>
#include <type_traits>
#include <cstdint>
namespace carb
{
namespace flatcache
{
// StageInProgress implementation starts here
// RAII constructor
inline StageInProgress::StageInProgress(StageWithHistory& stageWithHistory, size_t simFrameNumber)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
m_stageInProgress = iStageInProgress->create(stageWithHistory.m_usdStageId, simFrameNumber);
m_usdStageId = stageWithHistory.m_usdStageId;
m_createdFromId = false;
}
// Non-RAII constructor
inline StageInProgress::StageInProgress(StageInProgressId stageInProgressId)
{
m_stageInProgress = stageInProgressId;
m_createdFromId = true;
// m_usdStageId is not valid when m_createdFromId==true
}
inline StageInProgress::~StageInProgress()
{
if (!m_createdFromId)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
iStageInProgress->destroy(m_usdStageId);
}
}
inline size_t StageInProgress::getFrameNumber()
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
return iStageInProgress->getFrameNumber(m_stageInProgress);
}
inline ValidMirrors StageInProgress::getAttributeValidBits(const Path& path, const Token& attrName) const
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
return iStageInProgress->getAttributeValidBits(m_stageInProgress, path, attrName);
}
inline RationalTime StageInProgress::getFrameTime()
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
return iStageInProgress->getFrameTime(m_stageInProgress);
}
template <typename T>
T* StageInProgress::getAttribute(const Path& path, const Token& attrName)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
SpanC ptrAndSize = iStageInProgress->getAttribute(m_stageInProgress, path, attrName);
if (sizeof(T) == ptrAndSize.elementSize)
{
return reinterpret_cast<T*>(ptrAndSize.ptr);
}
else
{
CARB_LOG_WARN_ONCE("Trying to access %zu bytes from %s.%s, but flatcache has only %zu bytes", sizeof(T),
path.getText(), attrName.getText(), ptrAndSize.elementSize);
return nullptr;
}
}
template <typename T>
const T* StageInProgress::getAttributeRd(const Path& path, const Token& attrName)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
ConstSpanC ptrAndSize = iStageInProgress->getAttributeRd(m_stageInProgress, path, attrName);
if (sizeof(T) == ptrAndSize.elementSize)
{
return reinterpret_cast<const T*>(ptrAndSize.ptr);
}
else
{
CARB_LOG_WARN_ONCE("Trying to access %zu bytes from %s.%s, but flatcache has only %zu bytes", sizeof(T),
path.getText(), attrName.getText(), ptrAndSize.elementSize);
return nullptr;
}
}
template <typename T>
T* StageInProgress::getAttributeWr(const Path& path, const Token& attrName)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
SpanC ptrAndSize = iStageInProgress->getAttributeWr(m_stageInProgress, path, attrName);
if (sizeof(T) == ptrAndSize.elementSize)
{
return reinterpret_cast<T*>(ptrAndSize.ptr);
}
else
{
CARB_LOG_WARN_ONCE("Trying to access %zu bytes from %s.%s, but flatcache has only %zu bytes", sizeof(T),
path.getText(), attrName.getText(), ptrAndSize.elementSize);
return nullptr;
}
}
template <typename T>
T* StageInProgress::getAttributeGpu(const Path& path, const Token& attrName)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
SpanC ptrAndSize = iStageInProgress->getAttributeGpu(m_stageInProgress, path, attrName);
if (sizeof(T) == ptrAndSize.elementSize)
{
return reinterpret_cast<T*>(ptrAndSize.ptr);
}
else
{
CARB_LOG_WARN_ONCE("Trying to access %zu bytes from %s.%s, but flatcache has only %zu bytes", sizeof(T),
path.getText(), attrName.getText(), ptrAndSize.elementSize);
return nullptr;
}
}
template <typename T>
const T* StageInProgress::getAttributeRdGpu(const Path& path, const Token& attrName)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
ConstSpanC ptrAndSize = iStageInProgress->getAttributeRdGpu(m_stageInProgress, path, attrName);
if (sizeof(T) == ptrAndSize.elementSize)
{
return reinterpret_cast<const T*>(ptrAndSize.ptr);
}
else
{
CARB_LOG_WARN_ONCE("Trying to access %zu bytes from %s.%s, but flatcache has only %zu bytes", sizeof(T),
path.getText(), attrName.getText(), ptrAndSize.elementSize);
return nullptr;
}
}
template <typename T>
T* StageInProgress::getAttributeWrGpu(const Path& path, const Token& attrName)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
SpanC ptrAndSize = iStageInProgress->getAttributeWrGpu(m_stageInProgress, path, attrName);
if (sizeof(T*) == ptrAndSize.elementSize)
{
return reinterpret_cast<T*>(ptrAndSize.ptr);
}
else
{
CARB_LOG_WARN_ONCE("Trying to access %zu bytes from %s.%s, but flatcache has only %zu bytes", sizeof(T),
path.getText(), attrName.getText(), ptrAndSize.elementSize);
return nullptr;
}
}
template <typename T>
T& StageInProgress::getOrCreateAttributeWr(const Path& path, const Token& attrName, Type type)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
SpanC ptrAndSize = iStageInProgress->getOrCreateAttributeWr(m_stageInProgress, path, attrName, TypeC(type));
if (sizeof(T) != ptrAndSize.elementSize)
{
CARB_LOG_WARN_ONCE("Trying to access %zu bytes from %s.%s, but flatcache has only %zu bytes", sizeof(T),
path.getText(), attrName.getText(), ptrAndSize.elementSize);
}
return *reinterpret_cast<T*>(ptrAndSize.ptr);
}
template <typename T>
gsl::span<T> StageInProgress::getArrayAttribute(const Path& path, const Token& attrName)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
SpanC arrayData = iStageInProgress->getArrayAttributeWr(m_stageInProgress, path, attrName);
if (sizeof(T) != arrayData.elementSize)
{
CARB_LOG_WARN_ONCE(
"Trying to access array with elements of size %zu bytes from %s.%s, but flatcache has only %zu bytes",
sizeof(T), path.getText(), attrName.getText(), arrayData.elementSize);
return gsl::span<T>();
}
gsl::span<T> retval(reinterpret_cast<T*>(arrayData.ptr), arrayData.elementCount);
return retval;
}
template <typename T>
gsl::span<const T> StageInProgress::getArrayAttributeRd(const Path& path, const Token& attrName)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
ConstSpanC arrayData = iStageInProgress->getArrayAttributeRd(m_stageInProgress, path, attrName);
if (sizeof(T) != arrayData.elementSize)
{
CARB_LOG_WARN_ONCE(
"Trying to access array with elements of size %zu bytes from %s.%s, but flatcache has only %zu bytes",
sizeof(T), path.getText(), attrName.getText(), arrayData.elementSize);
return gsl::span<const T>();
}
gsl::span<const T> retval(reinterpret_cast<const T*>(arrayData.ptr), arrayData.elementCount);
return retval;
}
template <typename T>
gsl::span<T> StageInProgress::getArrayAttributeWr(const Path& path, const Token& attrName)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
SpanC arrayData = iStageInProgress->getArrayAttributeWr(m_stageInProgress, path, attrName);
if (sizeof(T) != arrayData.elementSize)
{
CARB_LOG_WARN_ONCE(
"Trying to access array with elements of size %zu bytes from %s.%s, but flatcache has only %zu bytes",
sizeof(T), path.getText(), attrName.getText(), arrayData.elementSize);
return gsl::span<T>();
}
gsl::span<T> retval(reinterpret_cast<T*>(arrayData.ptr), arrayData.elementCount);
return retval;
}
inline size_t StageInProgress::getArrayAttributeSize(const Path& path, const Token& attrName)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
return iStageInProgress->getArrayAttributeSize(m_stageInProgress, path, attrName);
}
inline void StageInProgress::setArrayAttributeSize(const Path& path, const Token& attrName, size_t elemCount)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
iStageInProgress->setArrayAttributeSize(m_stageInProgress, path, attrName, elemCount);
}
template <typename T>
inline gsl::span<T> StageInProgress::setArrayAttributeSizeAndGet(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
size_t indexInBucket,
const Token& attrName,
size_t newElemCount)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
SpanC newArrayC = iStageInProgress->setArrayAttributeSizeAndGet(
m_stageInProgress, primBucketList.m_primBucketListId, primBucketListIndex, indexInBucket, attrName, newElemCount);
T* typedElementsPtr = reinterpret_cast<T*>(newArrayC.ptr);
return { typedElementsPtr, newArrayC.elementCount };
}
inline void StageInProgress::createPrim(const Path& path)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
iStageInProgress->createPrim(m_stageInProgress, path);
}
inline void StageInProgress::destroyPrim(const Path& path)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
iStageInProgress->destroyPrim(m_stageInProgress, path);
}
inline void StageInProgress::createAttribute(const Path& path, const Token& attrName, Type type)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
iStageInProgress->createAttribute(m_stageInProgress, path, attrName, TypeC(type));
}
template <int n>
inline void StageInProgress::createAttributes(const Path& path, std::array<AttrNameAndType, n> attributes)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
std::array<TokenC, n> names;
std::array<TypeC, n> types;
for (int c = 0; c < n; ++c)
{
names[c] = attributes[c].name;
types[c] = TypeC(attributes[c].type);
}
iStageInProgress->createAttributes(m_stageInProgress, path, names.data(), types.data(), n);
}
inline void StageInProgress::destroyAttribute(const Path& path, const Token& attrName, Type)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
iStageInProgress->destroyAttribute2(m_stageInProgress, path, attrName);
}
inline void StageInProgress::destroyAttribute(const Path& path, const Token& attrName)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
iStageInProgress->destroyAttribute2(m_stageInProgress, path, attrName);
}
template <int n>
inline void StageInProgress::destroyAttributes(const Path& path, const std::array<Token, n>& attributes)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
std::array<TokenC, n> names;
for (int c = 0; c < n; ++c)
{
names[c] = TokenC(attributes[c]);
}
iStageInProgress->destroyAttributes(m_stageInProgress, path, names.data(), n);
}
inline void StageInProgress::destroyAttributes(const Path& path, const std::vector<Token>& attributes)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
const size_t n = attributes.size();
std::vector<TokenC> names(n);
for (size_t c = 0; c < n; ++c)
{
names[c] = TokenC(attributes[c]);
}
iStageInProgress->destroyAttributes(m_stageInProgress, path, names.data(), (uint32_t)n);
}
inline PrimBucketList StageInProgress::findPrims(const carb::flatcache::set<AttrNameAndType>& all,
const carb::flatcache::set<AttrNameAndType>& any,
const carb::flatcache::set<AttrNameAndType>& none)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
PrimBucketListId primBucketListId = iStageInProgress->findPrims(m_stageInProgress, all, any, none);
return { primBucketListId };
}
inline void StageInProgress::attributeEnableChangeTracking(const Token& attrName, ListenerId listenerId)
{
auto iChangeTrackerConfig = carb::getCachedInterface<carb::flatcache::IChangeTrackerConfig>();
iChangeTrackerConfig->attributeEnable(m_stageInProgress, attrName, listenerId);
}
inline void StageInProgress::enablePrimCreateTracking(ListenerId listenerId)
{
auto iChangeTrackerConfig = carb::getCachedInterface<carb::flatcache::IChangeTrackerConfig>();
iChangeTrackerConfig->enablePrimCreateTracking(m_stageInProgress, listenerId);
}
inline void StageInProgress::attributeDisableChangeTracking(const Token& attrName, ListenerId listenerId)
{
auto iChangeTrackerConfig = carb::getCachedInterface<carb::flatcache::IChangeTrackerConfig>();
iChangeTrackerConfig->attributeDisable(m_stageInProgress, attrName, listenerId);
}
inline void StageInProgress::pauseChangeTracking(ListenerId listenerId)
{
auto iChangeTrackerConfig = carb::getCachedInterface<carb::flatcache::IChangeTrackerConfig>();
iChangeTrackerConfig->pause(m_stageInProgress, listenerId);
}
inline void StageInProgress::resumeChangeTracking(ListenerId listenerId)
{
auto iChangeTrackerConfig = carb::getCachedInterface<carb::flatcache::IChangeTrackerConfig>();
iChangeTrackerConfig->resume(m_stageInProgress, listenerId);
}
inline bool StageInProgress::isChangeTrackingPaused(ListenerId listenerId)
{
auto iChangeTrackerConfig = carb::getCachedInterface<carb::flatcache::IChangeTrackerConfig>();
return iChangeTrackerConfig->isChangeTrackingPaused(m_stageInProgress, listenerId);
}
inline bool StageInProgress::isListenerAttached(ListenerId listenerId)
{
auto iChangeTrackerConfig = carb::getCachedInterface<carb::flatcache::IChangeTrackerConfig>();
return iChangeTrackerConfig->isListenerAttached(m_stageInProgress, listenerId);
}
inline void StageInProgress::detachListener(ListenerId listenerId)
{
auto iChangeTrackerConfig = carb::getCachedInterface<carb::flatcache::IChangeTrackerConfig>();
iChangeTrackerConfig->detachListener(m_stageInProgress, listenerId);
}
inline size_t StageInProgress::getListenerCount()
{
auto iChangeTrackerConfig = carb::getCachedInterface<carb::flatcache::IChangeTrackerConfig>();
return iChangeTrackerConfig->getListenerCount(m_stageInProgress);
}
inline ChangedPrimBucketList StageInProgress::getChanges(ListenerId listenerId)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
PrimBucketListId changeListId = iStageInProgress->getChanges(m_stageInProgress, listenerId);
return ChangedPrimBucketList(changeListId);
}
inline void StageInProgress::popChanges(ListenerId listenerId)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
iStageInProgress->popChanges(m_stageInProgress, listenerId);
}
template <typename T>
gsl::span<T> StageInProgress::getAttributeArray(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName)
{
SpanC array;
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
iStageInProgress->getAttributeArray(
&array, m_stageInProgress, primBucketList.m_primBucketListId, primBucketListIndex, attrName);
T* typedElementsPtr = reinterpret_cast<T*>(array.ptr);
gsl::span<T> retval(typedElementsPtr, array.elementCount);
return retval;
}
template <typename T>
gsl::span<const T> StageInProgress::getAttributeArrayRd(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName) const
{
ConstSpanC array;
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
iStageInProgress->getAttributeArrayRd(
&array, m_stageInProgress, primBucketList.m_primBucketListId, primBucketListIndex, attrName);
const T* typedElementsPtr = reinterpret_cast<const T*>(array.ptr);
gsl::span<const T> retval(typedElementsPtr, array.elementCount);
return retval;
}
template <typename T>
gsl::span<T> StageInProgress::getAttributeArrayWr(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName)
{
SpanC array;
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
iStageInProgress->getAttributeArrayWr(
&array, m_stageInProgress, primBucketList.m_primBucketListId, primBucketListIndex, attrName);
T* typedElementsPtr = reinterpret_cast<T*>(array.ptr);
gsl::span<T> retval(typedElementsPtr, array.elementCount);
return retval;
}
template <typename T>
gsl::span<T> StageInProgress::getAttributeArrayGpu(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName)
{
SpanC array;
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
iStageInProgress->getAttributeArrayGpu(
&array, m_stageInProgress, primBucketList.m_primBucketListId, primBucketListIndex, attrName);
T* typedElementsPtr = reinterpret_cast<T*>(array.ptr);
gsl::span<T> retval(typedElementsPtr, array.elementCount);
return retval;
}
template <typename T>
gsl::span<const T> StageInProgress::getAttributeArrayRdGpu(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName) const
{
ConstSpanC array;
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
iStageInProgress->getAttributeArrayRdGpu(
&array, m_stageInProgress, primBucketList.m_primBucketListId, primBucketListIndex, attrName);
const T* typedElementsPtr = reinterpret_cast<const T*>(array.ptr);
gsl::span<const T> retval(typedElementsPtr, array.elementCount);
return retval;
}
template <typename T>
gsl::span<T> StageInProgress::getAttributeArrayWrGpu(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName)
{
SpanC array;
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
iStageInProgress->getAttributeArrayWrGpu(
&array, m_stageInProgress, primBucketList.m_primBucketListId, primBucketListIndex, attrName);
T* typedElementsPtr = reinterpret_cast<T*>(array.ptr);
gsl::span<T> retval(typedElementsPtr, array.elementCount);
return retval;
}
template <typename T>
gsl::span<T> StageInProgress::getOrCreateAttributeArrayWr(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName,
Type type)
{
SpanC array;
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
iStageInProgress->getOrCreateAttributeArrayWr(
&array, m_stageInProgress, primBucketList.m_primBucketListId, primBucketListIndex, attrName, TypeC(type));
T* typedElementsPtr = reinterpret_cast<T*>(array.ptr);
gsl::span<T> retval(typedElementsPtr, array.elementCount);
return retval;
}
template <typename T>
std::vector<gsl::span<T>> StageInProgress::getArrayAttributeArray(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName) const
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
ArrayPointersAndSizesC pointersAndSizes = iStageInProgress->getArrayAttributeArrayWithSizes(
m_stageInProgress, primBucketList.m_primBucketListId, primBucketListIndex, attrName);
size_t primCount = pointersAndSizes.elementCount;
std::vector<gsl::span<T>> arrays(primCount);
for (size_t i = 0; i != primCount; i++)
{
T* typedElementsPtr = reinterpret_cast<T*>(pointersAndSizes.arrayPtrs[i]);
arrays[i] = { typedElementsPtr, pointersAndSizes.sizes[i] };
}
return arrays;
}
template <typename T>
std::vector<gsl::span<const T>> StageInProgress::getArrayAttributeArrayRd(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName) const
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
ConstArrayPointersAndSizesC pointersAndSizes = iStageInProgress->getArrayAttributeArrayWithSizesRd(
m_stageInProgress, primBucketList.m_primBucketListId, primBucketListIndex, attrName);
size_t primCount = pointersAndSizes.elementCount;
std::vector<gsl::span<const T>> arrays(primCount);
for (size_t i = 0; i != primCount; i++)
{
const T* typedElementsPtr = reinterpret_cast<const T*>(pointersAndSizes.arrayPtrs[i]);
arrays[i] = { typedElementsPtr, pointersAndSizes.sizes[i] };
}
return arrays;
}
template <typename T>
std::vector<gsl::span<T>> StageInProgress::getArrayAttributeArrayWr(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName) const
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
ArrayPointersAndSizesC pointersAndSizes = iStageInProgress->getArrayAttributeArrayWithSizesWr(
m_stageInProgress, primBucketList.m_primBucketListId, primBucketListIndex, attrName);
size_t primCount = pointersAndSizes.elementCount;
std::vector<gsl::span<T>> arrays(primCount);
for (size_t i = 0; i != primCount; i++)
{
T* typedElementsPtr = reinterpret_cast<T*>(pointersAndSizes.arrayPtrs[i]);
arrays[i] = { typedElementsPtr, pointersAndSizes.sizes[i] };
}
return arrays;
}
inline gsl::span<const Path> StageInProgress::getPathArray(const PrimBucketList& primBucketList,
size_t primBucketListIndex) const
{
ConstPathCSpan arrayC;
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
iStageInProgress->getPathArray(&arrayC, m_stageInProgress, primBucketList.m_primBucketListId, primBucketListIndex);
const Path* array = reinterpret_cast<const Path*>(arrayC.ptr);
gsl::span<const Path> retval(array, arrayC.elementCount);
return retval;
}
inline void StageInProgress::printBucketNames() const
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
iStageInProgress->printBucketNames(m_stageInProgress);
}
inline void StageInProgress::logAttributeWriteForNotice(const Path& path, const Token& attrName)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
iStageInProgress->logAttributeWriteForNotice(m_stageInProgress, path, attrName);
}
inline flatcache::set<AttrNameAndType> StageInProgress::getAttributeNamesAndTypes(const PrimBucketList& primBucketList,
size_t primBucketListIndex) const
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
size_t attrCount = iStageInProgress->getBucketAttributeCount(
m_stageInProgress, primBucketList.m_primBucketListId, primBucketListIndex);
flatcache::set<AttrNameAndType> namesAndTypes;
namesAndTypes.v.resize(attrCount);
// getBucketAttributeNamesAndTypes is guaranteed to return an ordered vector, so we don't have to sort namesAndTypes
iStageInProgress->getBucketAttributeNamesAndTypes(
namesAndTypes.data(), attrCount, m_stageInProgress, primBucketList.m_primBucketListId, primBucketListIndex);
return namesAndTypes;
}
// Connection API
inline void StageInProgress::createConnection(const Path& path, const Token& connectionName, const Connection& connection)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
iStageInProgress->createConnection(m_stageInProgress, path, connectionName, connection);
}
inline void StageInProgress::createConnections(const Path& path, const gsl::span<Token>& connectionNames, const gsl::span<Connection>& connections )
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
if(connectionNames.size() != connections.size())
return;
const TokenC* namesC = reinterpret_cast<const TokenC*>(connectionNames.data());
iStageInProgress->createConnections(m_stageInProgress, path, namesC, connections.data(), connectionNames.size());
}
inline void StageInProgress::destroyConnection(const Path& path, const Token& connectionName)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
iStageInProgress->destroyConnection(m_stageInProgress, path, connectionName);
}
inline void StageInProgress::destroyConnections(const Path& path, const gsl::span<Token>& connectionNames)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
const TokenC* namesC = reinterpret_cast<const TokenC*>(connectionNames.data());
iStageInProgress->destroyConnections(m_stageInProgress, path, namesC, connectionNames.size());
}
inline Connection* StageInProgress::getConnection(const Path& path, const Token& connectionName)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
return iStageInProgress->getConnection(m_stageInProgress, path, connectionName);
}
inline const Connection* StageInProgress::getConnectionRd(const Path& path, const Token& connectionName)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
return iStageInProgress->getConnectionRd(m_stageInProgress, path, connectionName);
}
inline Connection* StageInProgress::getConnectionWr(const Path& path, const Token& connectionName)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
return iStageInProgress->getConnectionWr(m_stageInProgress, path, connectionName);
}
inline void StageInProgress::copyAttributes(const Path& srcPath, const Path& dstPath)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
iStageInProgress->copyAllAttributes(m_stageInProgress, srcPath, dstPath);
}
inline void StageInProgress::copyAttributes(const Path& srcPath, const gsl::span<Token>& srcAttrs, const Path& dstPath)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
size_t n = srcAttrs.size();
const TokenC* srcAttrsC = reinterpret_cast<const TokenC*>(srcAttrs.data());
iStageInProgress->copySpecifiedAttributes(m_stageInProgress, srcPath, srcAttrsC, dstPath, srcAttrsC, n);
}
inline void StageInProgress::copyAttributes(const Path& srcPath, const gsl::span<Token>& srcAttrs, const Path& dstPath, const gsl::span<Token>& dstAttrs)
{
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
if(srcAttrs.size() != dstAttrs.size())
{
return;
}
size_t n = srcAttrs.size();
const TokenC* srcAttrsC = reinterpret_cast<const TokenC*>(srcAttrs.data());
const TokenC* dstAttrsC = reinterpret_cast<const TokenC*>(dstAttrs.data());
iStageInProgress->copySpecifiedAttributes(m_stageInProgress, srcPath, srcAttrsC, dstPath, dstAttrsC, n);
}
inline bool StageInProgress::primExists(const Path& path)
{
auto iStageReaderWriter = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
bool retval = iStageReaderWriter->getAttributeCount(m_stageInProgress, path) != 0;
return retval;
}
// PrimBucketList implementation starts here
inline carb::flatcache::IPrimBucketList* PrimBucketList::sIPrimBucketList()
{
// Acquire carbonite interface on first use
return carb::getCachedInterface<carb::flatcache::IPrimBucketList>();
}
inline size_t PrimBucketList::bucketCount() const
{
return sIPrimBucketList()->getBucketCount(m_primBucketListId);
}
inline size_t PrimBucketList::size() const
{
return sIPrimBucketList()->getBucketCount(m_primBucketListId);
}
inline void PrimBucketList::print() const
{
return sIPrimBucketList()->print(m_primBucketListId);
}
inline PrimBucketList::~PrimBucketList()
{
sIPrimBucketList()->destroy(m_primBucketListId);
}
inline BucketChanges ChangedPrimBucketList::getChanges(size_t index)
{
return BucketChanges(sIPrimBucketList()->getChanges(m_primBucketListId, index));
}
inline AddedPrimIndices ChangedPrimBucketList::getAddedPrims(size_t index)
{
return AddedPrimIndices(sIPrimBucketList()->getAddedPrims(m_primBucketListId, index));
}
// StageAtTimeInterval implementation starts here
inline carb::flatcache::IStageAtTimeInterval* StageAtTimeInterval::sIStageAtTimeInterval()
{
return carb::getCachedInterface<carb::flatcache::IStageAtTimeInterval>();
}
inline StageAtTimeInterval::StageAtTimeInterval(StageWithHistory& stageWithHistory,
RationalTime beginTime,
RationalTime endTime,
bool includeEndTime)
{
m_stageAtTimeInterval =
sIStageAtTimeInterval()->create(stageWithHistory.m_stageWithHistory, beginTime, endTime, includeEndTime);
}
inline StageAtTimeInterval::StageAtTimeInterval(StageWithHistoryId stageWithHistoryId,
RationalTime beginTime,
RationalTime endTime,
bool includeEndTime)
{
m_stageAtTimeInterval = sIStageAtTimeInterval()->create(stageWithHistoryId, beginTime, endTime, includeEndTime);
}
inline ValidMirrors StageAtTimeInterval::getAttributeValidBits(const PathC& path, const TokenC& attrName) const
{
return sIStageAtTimeInterval()->getAttributeValidBits(m_stageAtTimeInterval, path, attrName);
}
template <typename T>
std::vector<const T*> StageAtTimeInterval::getAttributeRd(const Path& path, const Token& attrName) const
{
size_t count = sIStageAtTimeInterval()->getTimesampleCount(m_stageAtTimeInterval);
std::vector<const T*> retval(count);
const void** retvalData = reinterpret_cast<const void**>(retval.data());
size_t bytesPerAttr =
sIStageAtTimeInterval()->getAttributeRd(retvalData, count, m_stageAtTimeInterval, path, attrName);
if (sizeof(T) == bytesPerAttr)
{
return retval;
}
else
{
CARB_LOG_WARN_ONCE("Trying to access %zu bytes from %s.%s, but flatcache has only %zu bytes", sizeof(T),
path.getText(), attrName.getText(), bytesPerAttr);
return std::vector<const T*>();
}
}
template <typename T>
std::vector<const T*> StageAtTimeInterval::getAttributeRdGpu(const Path& path, const Token& attrName) const
{
size_t count = sIStageAtTimeInterval()->getTimesampleCount(m_stageAtTimeInterval);
std::vector<const T*> retval(count);
std::vector<ConstSpanC> arrays(count);
sIStageAtTimeInterval()->getAttributeRdGpu(arrays.data(), count, m_stageAtTimeInterval, path, attrName);
for (size_t i = 0; i != count; i++)
{
if (arrays[i].elementSize == sizeof(T))
{
retval[i] = reinterpret_cast<const T*>(arrays[i].ptr);
}
else
{
retval[i] = nullptr;
}
}
return retval;
}
inline std::vector<size_t> StageAtTimeInterval::getArrayAttributeSize(const Path& path, const Token& attrName) const
{
size_t count = sIStageAtTimeInterval()->getTimesampleCount(m_stageAtTimeInterval);
std::vector<size_t> sizes(count);
sIStageAtTimeInterval()->getArrayAttributeSize(sizes.data(), count, m_stageAtTimeInterval, path, attrName);
return sizes;
}
template <typename T>
std::vector<gsl::span<const T>> StageAtTimeInterval::getArrayAttributeRd(const Path& path, const Token& attrName) const
{
size_t count = sIStageAtTimeInterval()->getTimesampleCount(m_stageAtTimeInterval);
std::vector<ConstSpanWithTypeC> arrays(count);
std::vector<gsl::span<const T>> retval(count);
sIStageAtTimeInterval()->getArrayAttributeWithSizeRd(arrays.data(), count, m_stageAtTimeInterval, path, attrName);
for (size_t i = 0; i != count; i++)
{
if (arrays[i].elementSize != sizeof(T))
{
retval[i] = gsl::span<T>();
continue;
}
const T* ptr = reinterpret_cast<const T*>(arrays[i].ptr);
retval[i] = gsl::span<const T>(ptr, arrays[i].elementCount);
}
return retval;
}
inline std::vector<ConstArrayAsBytes> StageAtTimeInterval::getArrayAttributeRawRd(const Path& path, const Token& attrName) const
{
size_t count = sIStageAtTimeInterval()->getTimesampleCount(m_stageAtTimeInterval);
std::vector<ConstSpanWithTypeC> arrays(count);
std::vector<ConstArrayAsBytes> retval(count);
sIStageAtTimeInterval()->getArrayAttributeWithSizeRd(arrays.data(), count, m_stageAtTimeInterval, path, attrName);
for (size_t i = 0; i != count; i++)
{
const gsl::byte* ptr = reinterpret_cast<const gsl::byte*>(arrays[i].ptr);
retval[i].arrayBytes = gsl::span<const gsl::byte>(ptr, arrays[i].elementCount * arrays[i].elementSize);
retval[i].bytesPerElement = arrays[i].elementSize;
retval[i].elementType = Type(arrays[i].type);
}
return retval;
}
inline std::vector<RationalTime> StageAtTimeInterval::getTimestamps() const
{
size_t count = sIStageAtTimeInterval()->getTimesampleCount(m_stageAtTimeInterval);
std::vector<RationalTime> retval(count);
sIStageAtTimeInterval()->getTimestamps(retval.data(), count, m_stageAtTimeInterval);
return retval;
}
inline size_t StageAtTimeInterval::getTimeSampleCount() const
{
return sIStageAtTimeInterval()->getTimesampleCount(m_stageAtTimeInterval);
}
inline PrimBucketList StageAtTimeInterval::findPrims(const carb::flatcache::set<AttrNameAndType>& all,
const carb::flatcache::set<AttrNameAndType>& any,
const carb::flatcache::set<AttrNameAndType>& none)
{
PrimBucketListId primBucketListId = sIStageAtTimeInterval()->findPrims(m_stageAtTimeInterval, all, any, none);
return { primBucketListId };
}
template <typename T>
std::vector<gsl::span<const T>> StageAtTimeInterval::getAttributeArrayRd(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName) const
{
size_t count = sIStageAtTimeInterval()->getTimesampleCount(m_stageAtTimeInterval);
std::vector<ConstSpanC> outC(count);
ConstSpanC* outCData = outC.data();
sIStageAtTimeInterval()->getAttributeArrayRd(
outCData, count, m_stageAtTimeInterval, primBucketList.m_primBucketListId, primBucketListIndex, attrName);
std::vector<gsl::span<const T>> retval(count);
size_t i = 0;
for (ConstSpanC array : outC)
{
const T* typedElementsPtr = reinterpret_cast<const T*>(array.ptr);
retval[i] = gsl::span<const T>(typedElementsPtr, array.elementCount);
i++;
}
return retval;
}
template <typename T>
std::vector<gsl::span<const T>> StageAtTimeInterval::getAttributeArrayRdGpu(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName) const
{
size_t count = sIStageAtTimeInterval()->getTimesampleCount(m_stageAtTimeInterval);
std::vector<ConstSpanC> outC(count);
ConstSpanC* outCData = outC.data();
sIStageAtTimeInterval()->getAttributeArrayRdGpu(
outCData, count, m_stageAtTimeInterval, primBucketList.m_primBucketListId, primBucketListIndex, attrName);
std::vector<gsl::span<const T>> retval(count);
size_t i = 0;
for (ConstSpanC array : outC)
{
const T* typedElementsPtr = reinterpret_cast<const T*>(array.ptr);
retval[i] = gsl::span<const T>(typedElementsPtr, array.elementCount);
i++;
}
return retval;
}
template <typename T>
std::vector<std::vector<gsl::span<const T>>> StageAtTimeInterval::getArrayAttributeArrayRd(
const PrimBucketList& primBucketList, size_t primBucketListIndex, const Token& attrName) const
{
size_t count = sIStageAtTimeInterval()->getTimesampleCount(m_stageAtTimeInterval);
std::vector<ConstArrayPointersAndSizesC> outC(count);
ConstArrayPointersAndSizesC* outCData = outC.data();
sIStageAtTimeInterval()->getArrayAttributeArrayWithSizesRd(
outCData, count, m_stageAtTimeInterval, primBucketList.m_primBucketListId, primBucketListIndex, attrName);
std::vector<std::vector<gsl::span<const T>>> retval(count);
size_t i = 0;
for (ConstArrayPointersAndSizesC pointersAndSizes : outC)
{
size_t primCount = pointersAndSizes.elementCount;
retval[i].resize(primCount);
for (size_t j = 0; j != primCount; j++)
{
const T* typedElementsPtr = reinterpret_cast<const T*>(pointersAndSizes.arrayPtrs[j]);
retval[i][j] = { typedElementsPtr, pointersAndSizes.sizes[j] };
}
i++;
}
return retval;
}
inline std::vector<gsl::span<const char>> StageAtTimeInterval::getAttributeArrayRawRd(
const PrimBucketList& primBucketList, size_t primBucketListIndex, const Token& attrName) const
{
size_t count = sIStageAtTimeInterval()->getTimesampleCount(m_stageAtTimeInterval);
std::vector<ConstSpanC> outC(count);
ConstSpanC* outCData = outC.data();
sIStageAtTimeInterval()->getAttributeArrayRd(
outCData, count, m_stageAtTimeInterval, primBucketList.m_primBucketListId, primBucketListIndex, attrName);
std::vector<gsl::span<const char>> retval(count);
size_t i = 0;
for (ConstSpanC array : outC)
{
const char* typedElementsPtr = reinterpret_cast<const char*>(array.ptr);
retval[i] = gsl::span<const char>(typedElementsPtr, array.elementCount * array.elementSize);
i++;
}
return retval;
}
inline std::vector<gsl::span<const Path>> StageAtTimeInterval::getPathArray(const PrimBucketList& primBucketList,
size_t primBucketListIndex) const
{
size_t count = sIStageAtTimeInterval()->getTimesampleCount(m_stageAtTimeInterval);
std::vector<ConstPathCSpan> outC(count);
ConstPathCSpan* outCData = outC.data();
sIStageAtTimeInterval()->getPathArray(
outCData, count, m_stageAtTimeInterval, primBucketList.m_primBucketListId, primBucketListIndex);
std::vector<gsl::span<const Path>> retval(count);
size_t i = 0;
for (ConstPathCSpan arrayC : outC)
{
const Path* array = reinterpret_cast<const Path*>(arrayC.ptr);
retval[i] = gsl::span<const Path>(array, arrayC.elementCount);
i++;
}
return retval;
}
inline std::vector<const Connection*> StageAtTimeInterval::getConnectionRd(const Path& path, const Token& connectionName)
{
size_t count = sIStageAtTimeInterval()->getTimesampleCount(m_stageAtTimeInterval);
std::vector<const Connection*> retval(count);
const void** retvalData = reinterpret_cast<const void**>(retval.data());
sIStageAtTimeInterval()->getConnectionRd(retvalData, count, m_stageAtTimeInterval, path, connectionName);
return retval;
}
inline void StageAtTimeInterval::printBucketNames() const
{
sIStageAtTimeInterval()->printBucketNames(m_stageAtTimeInterval);
}
inline std::vector<size_t> StageAtTimeInterval::getAttributeCounts(const PrimBucketList& primBucketList,
size_t primBucketListIndex) const
{
std::vector<size_t> counts;
size_t count = sIStageAtTimeInterval()->getTimesampleCount(m_stageAtTimeInterval);
counts.resize(count);
sIStageAtTimeInterval()->getAttributeCounts(
m_stageAtTimeInterval, primBucketList.m_primBucketListId, primBucketListIndex, count, counts.data());
return counts;
}
inline std::pair<std::vector<std::vector<Token>>, std::vector<std::vector<Type>>> StageAtTimeInterval::getAttributeNamesAndTypes(
const PrimBucketList& primBucketList, size_t primBucketListIndex) const
{
std::vector<std::vector<Token>> outNames;
std::vector<std::vector<Type>> outTypes;
size_t count = sIStageAtTimeInterval()->getTimesampleCount(m_stageAtTimeInterval);
std::vector<size_t> outSizes;
outSizes.resize(count);
sIStageAtTimeInterval()->getAttributeCounts(
m_stageAtTimeInterval, primBucketList.m_primBucketListId, primBucketListIndex, count, outSizes.data());
outNames.resize(count);
outTypes.resize(count);
// Make array of pointers to inner arrays to allow us to call
// getAttributeNamesAndTypes, which takes a C-style 2D array
// not a std::vector<std::vector>.
// Also set size of inner arrays
std::vector<Token*> outNamesPtrs(count);
std::vector<Type*> outTypesPtrs(count);
for (size_t i = 0; i < count; ++i)
{
outNames[i].resize(outSizes[i]);
outTypes[i].resize(outSizes[i]);
outNamesPtrs[i] = outNames[i].data();
outTypesPtrs[i] = outTypes[i].data();
}
sIStageAtTimeInterval()->getAttributeNamesAndTypes(m_stageAtTimeInterval,
primBucketList.m_primBucketListId,
primBucketListIndex,
count,
outSizes.data(),
outNamesPtrs.data(),
outTypesPtrs.data());
return { outNames, outTypes };
}
inline StageAtTimeInterval::~StageAtTimeInterval()
{
sIStageAtTimeInterval()->destroy(m_stageAtTimeInterval);
}
inline void StageAtTimeInterval::exportUsd(UsdStageId usdStageId) const
{
auto iStageAtTimeInterval = carb::getCachedInterface<carb::flatcache::IStageAtTimeInterval>();
iStageAtTimeInterval->exportUsd(m_stageAtTimeInterval, usdStageId);
}
/**
* @brief Linear interpolation for carb types Double3, Float3, Float4 (color)
* See InterpolationUsd.h for extended type support
*
* @details This is intended to be used internally by StageAtTime read methods in order
* to calculate values that were not written by StageInProgress directly.
*
* Enables the decoupling of the sim and render threads by allowing them access
* to ringbuffer values at various frequencies.
*/
template <typename T>
const T interpolate(const T& a, const T& b, float theta)
{
T result = T(a * (1.0f - theta)) + T(b * theta);
return result;
// T result = std::lerp(a, b, theta);
}
template <>
inline const carb::Double3 interpolate(const carb::Double3& a, const carb::Double3& b, float theta)
{
if (theta < 0.0 || theta > 1.0)
{
CARB_LOG_WARN_ONCE("WrapperImpl interpolate(): theta %f outside range [0.0, 1.0]", theta);
}
carb::Double3 result;
double tmp = 1.0 - theta;
result.x = (a.x * tmp) + (b.x * theta);
result.y = (a.y * tmp) + (b.y * theta);
result.z = (a.z * tmp) + (b.z * theta);
return result;
}
template <>
inline const carb::Float3 interpolate(const carb::Float3& a, const carb::Float3& b, float theta)
{
if (theta < 0.0f || theta > 1.0f)
{
CARB_LOG_WARN_ONCE("WrapperImpl interpolate(): theta %f outside range [0.0, 1.0]", theta);
}
carb::Float3 result;
float tmp = 1.0f - theta;
result.x = (a.x * tmp) + (b.x * theta);
result.y = (a.y * tmp) + (b.y * theta);
result.z = (a.z * tmp) + (b.z * theta);
return result;
}
template <>
inline const carb::Float4 interpolate(const carb::Float4& a, const carb::Float4& b, float theta)
{
if (theta < 0.0f || theta > 1.0f)
{
CARB_LOG_WARN_ONCE("WrapperImpl interpolate(): theta %f outside range [0.0, 1.0]", theta);
}
carb::Float4 result;
float tmp = 1.0f - theta;
result.x = (a.x * tmp) + (b.x * theta);
result.y = (a.y * tmp) + (b.y * theta);
result.z = (a.z * tmp) + (b.z * theta);
result.w = (a.w * tmp) + (b.w * theta);
return result;
}
template <>
inline const carb::flatcache::Token interpolate(const carb::flatcache::Token& a, const carb::flatcache::Token& b, float theta)
{
if (theta < 0.0f || theta > 1.0f)
{
CARB_LOG_WARN_ONCE("WrapperImpl interpolate(): theta %f outside range [0.0, 1.0]", theta);
}
return theta < 0.5f ? a : b;
}
// Auxiliary function used when handling data that is not going to be interpolated (bool, string, int, uint)
// Returns pair of values from first and second sampled frame, or the value found and nullptr if data is only available
// in one frame
template <typename T>
inline optional<std::pair<optional<T>,optional<T>>> StageAtTime::getNonInterpolatableAttributeRd(const Path& path, const Token& attrName) const
{
auto rawSamples = m_historyWindow.getAttributeRd<T>(path, attrName);
std::vector<RationalTime> sampleTimes = m_historyWindow.getTimestamps();
if (rawSamples.size() != sampleTimes.size())
{
return carb::cpp17::nullopt;
}
// checking that if the rawSamples are not empty, we have something valid in rawSamples[0]
CARB_ASSERT(rawSamples.empty() || rawSamples[0]);
// Communicate zero samples found
if ( rawSamples.empty() || !rawSamples[0] )
{
return carb::cpp17::nullopt;
}
if (rawSamples.size() == 1)
{
std::pair<carb::cpp17::optional<T>, carb::cpp17::optional<T>> result(*rawSamples[0], carb::cpp17::nullopt);
return result;
}
else if ( (rawSamples.size() == 2) && rawSamples[1] )
{
std::pair<carb::cpp17::optional<T>, carb::cpp17::optional<T>> result(*rawSamples[0], *rawSamples[1]);
return result;
}
return carb::cpp17::nullopt;
}
inline uint64_t StageAtTimeInterval::writeCacheToDisk(const char* file, uint8_t* workingBuffer, uint64_t workingBufferSize) const
{
return sIStageAtTimeInterval()->writeCacheToDisk(m_stageAtTimeInterval, file, workingBuffer, workingBufferSize);
}
inline void StageAtTimeInterval::addRefCount()
{
return sIStageAtTimeInterval()->addRefCount(m_stageAtTimeInterval);
}
inline bool StageAtTimeInterval::removeRefCount()
{
return sIStageAtTimeInterval()->removeRefCount(m_stageAtTimeInterval);
}
inline unsigned int StageAtTimeInterval::getRefCount()
{
return sIStageAtTimeInterval()->getRefCount(m_stageAtTimeInterval);
}
// StageAtTime implementation starts here
// This is defined here rather than in Carbonite plugin to allow use of templates and inlining
inline ValidMirrors StageAtTime::getAttributeValidBits(const PathC& path, const TokenC& attrName) const
{
return m_historyWindow.getAttributeValidBits(path, attrName);
}
// The method reports interpolatable data types, and is specialized as optional<pair<optional<T>,optional<T>
// in order to report non-interpolatable data types as encountered in either or both samples
template <typename T>
inline optional<T> StageAtTime::getAttributeRd(const Path& path, const Token& attrName) const
{
auto rawSamples = m_historyWindow.getAttributeRd<T>(path, attrName);
// Communicate zero samples found
if (rawSamples.size() == 0)
{
return carb::cpp17::nullopt;
}
// Linear interpolation supports at most two samples
CARB_ASSERT(rawSamples.size() <= 2);
if (rawSamples.size() == 1)
{
CARB_ASSERT(rawSamples[0]);
return *rawSamples[0];
}
else if (rawSamples.size() == 2)
{
CARB_ASSERT(rawSamples[0]);
CARB_ASSERT(rawSamples[1]);
// Calculate linear approximation of f(time)
T a_f = *rawSamples[0];
T b_f = *rawSamples[1];
return interpolate(a_f, b_f, (float)m_theta);
}
return carb::cpp17::nullopt;
}
// The following functions are marked for deletion since the specified types cannot be interpolated
// StageAtTime reports the non-interpolatable types read from Flatcache as a pair<optional<T>, optional<T>>
template <>
inline optional<bool> StageAtTime::getAttributeRd(const Path& path, const Token& attrName) const = delete;
template <>
inline optional<int> StageAtTime::getAttributeRd(const Path& path, const Token& attrName) const = delete;
template <>
inline optional<unsigned int> StageAtTime::getAttributeRd(const Path& path, const Token& attrName) const = delete;
template <>
inline optional<unsigned char> StageAtTime::getAttributeRd(const Path& path, const Token& attrName) const = delete;
template <>
inline optional<int64_t> StageAtTime::getAttributeRd(const Path& path, const Token& attrName) const = delete;
template <>
inline optional<uint64_t> StageAtTime::getAttributeRd(const Path& path, const Token& attrName) const = delete;
template <>
inline optional<carb::flatcache::Token> StageAtTime::getAttributeRd(const Path& path, const Token& attrName) const = delete;
// Specialize StageAtTime::getAttributeRd for non-interpolatable types: bool, int, uint
// In these cases the returned type will be a pair of values from the samples found, or nullopt otherwise
template <>
inline optional<std::pair<optional<bool>, optional<bool>>> StageAtTime::getAttributeRd(const Path& path, const Token& attrName) const
{
auto result = getNonInterpolatableAttributeRd<bool>(path, attrName);
return result;
}
template <>
inline optional<std::pair<optional<int>, optional<int>>> StageAtTime::getAttributeRd(const Path& path, const Token& attrName) const
{
return getNonInterpolatableAttributeRd<int>(path, attrName);
}
template <>
inline optional<std::pair<optional<unsigned int>, optional<unsigned int>>> StageAtTime::getAttributeRd(const Path& path, const Token& attrName) const
{
return getNonInterpolatableAttributeRd<unsigned int>(path, attrName);
}
template <>
inline optional<std::pair<optional<unsigned char>, optional<unsigned char>>> StageAtTime::getAttributeRd(const Path& path, const Token& attrName) const
{
return getNonInterpolatableAttributeRd<unsigned char>(path, attrName);
}
template <>
inline optional<std::pair<optional<int64_t>, optional<int64_t>>> StageAtTime::getAttributeRd(const Path& path, const Token& attrName) const
{
return getNonInterpolatableAttributeRd<int64_t>(path, attrName);
}
template <>
inline optional<std::pair<optional<uint64_t>, optional<uint64_t>>> StageAtTime::getAttributeRd(const Path& path, const Token& attrName) const
{
return getNonInterpolatableAttributeRd<uint64_t>(path, attrName);
}
template <>
inline optional<std::pair<optional<carb::flatcache::Token>, optional<carb::flatcache::Token>>> StageAtTime::getAttributeRd(
const Path& path, const Token& attrName) const
{
return getNonInterpolatableAttributeRd<carb::flatcache::Token>(path, attrName);
}
template <typename T>
const T* StageAtTime::getAttributeRdGpu(const Path& path, const Token& attrName) const
{
auto rawSamples = m_historyWindow.getAttributeRdGpu<T>(path, attrName);
std::vector<RationalTime> sampleTimes = m_historyWindow.getTimestamps();
CARB_ASSERT(rawSamples.size() == sampleTimes.size());
// This API doesn't have a way to communicate zero samples found
CARB_ASSERT(rawSamples.size() != 0);
// Linear interpolation supports at most two samples
CARB_ASSERT(rawSamples.size() <= 2);
if (rawSamples.size() == 1)
{
CARB_ASSERT(rawSamples[0]);
return rawSamples[0];
}
else if (rawSamples.size() == 2)
{
// For GPU types there is no support for interpolation yet
// Return first sample value instead for now
CARB_LOG_WARN_ONCE("Support for interpolation of array attributes is not supported yet, returning first time sample instead!");
CARB_ASSERT(rawSamples[0]);
return rawSamples[0];
}
return nullptr;
}
inline size_t StageAtTime::getArrayAttributeSize(const Path& path, const Token& attrName) const
{
auto rawSamples = m_historyWindow.getArrayAttributeSize(path, attrName);
std::vector<RationalTime> sampleTimes = m_historyWindow.getTimestamps();
CARB_ASSERT(rawSamples.size() == sampleTimes.size());
// This API doesn't have a way to communicate zero samples found
CARB_ASSERT(rawSamples.size() != 0);
// Linear interpolation supports at most two samples
CARB_ASSERT(rawSamples.size() <= 2);
if (rawSamples.size() == 1)
{
return rawSamples[0];
}
else if (rawSamples.size() == 2)
{
// For GPU types there is no support for interpolation yet
// Return first sample value instead for now
return rawSamples[0];
}
return 0;
}
template <typename T>
inline gsl::span<const T> StageAtTime::getArrayAttributeRd(const Path& path, const Token& attrName)
{
auto rawSamples = m_historyWindow.getArrayAttributeRd<T>(path, attrName);
std::vector<RationalTime> sampleTimes = m_historyWindow.getTimestamps();
CARB_ASSERT(rawSamples.size() == sampleTimes.size());
// This API doesn't have a way to communicate zero samples found
CARB_ASSERT(rawSamples.size() != 0);
// Linear interpolation supports at most two samples
CARB_ASSERT(rawSamples.size() <= 2);
if (rawSamples.size() == 1)
{
return rawSamples[0];
}
else if (rawSamples.size() == 2)
{
// For array types there is no support for interpolation yet
// Return first sample value instead for now
CARB_LOG_WARN_ONCE("Support for interpolation of array attributes is not supported yet, returning first time sample instead!");
return rawSamples[0];
}
return gsl::span<const T>();
}
/**
* @brief Auxiliary function used by AttributeArrayResult<T> and AttributeArrayResult<std::vector<T>>
*
* @details Used to assess if a prim is present in both of the sampled frames
*/
inline bool checkPathCorrespondence(std::vector<gsl::span<const carb::flatcache::Path>> paths, size_t index, size_t& pos_f0, size_t& pos_f1)
{
if (paths.size() > 1)
{
// in the common case, the prim exists in both frames
if ((index < paths[1].size()) && (paths[0][index] == paths[1][index]))
{
pos_f0 = pos_f1 = index;
return true;
}
auto pathIt = std::find(paths[1].begin(), paths[1].end(), paths[0][index]);
if (pathIt != paths[1].end())
{
pos_f0 = index; // TODO: this isn't needed, can infer it
pos_f1 = std::distance(paths[1].begin(), pathIt);
return true;
}
}
return false;
}
/**
* @brief Returned by StageAtTime.getAttributeArrayRd
*
* @details Holds at most two samples (one from frame n, and one from frame n+1)
* checkPathCorrespondence verifies if the path in frame n exists in frame n+1
* If no corresponding path exists, the value will be returned and not interpolated
*/
template <typename T>
class AttributeArrayResult
{
public:
size_t size() const
{
return m_samples[0].size();
}
bool empty() const
{
return (size() == 0);
}
std::vector<gsl::span<const T>> const* data() const
{
return &m_samples;
}
std::vector<gsl::span<const T>>* data()
{
return &m_samples;
}
T operator[](const size_t valueIndex) const
{
{
if (valueIndex >= m_samples[0].size() || m_samples[0].empty())
{
CARB_LOG_WARN_ONCE("AttributeArrayResult[] out of bounds");
return T();
}
if (m_samples.size() == 1)
{
return m_samples[0][valueIndex];
}
else if (m_samples.size() == 2)
{
size_t pos0, pos1;
if (checkPathCorrespondence(m_paths, valueIndex, pos0, pos1))
{
T a = (m_samples[0][pos0]);
T b = (m_samples[1][pos1]);
T result = interpolate<T>(a, b, m_theta);
return result;
}
return m_samples[0][valueIndex];
}
}
return T();
};
std::vector<gsl::span<const carb::flatcache::Path>> m_paths;
std::vector<gsl::span<const T>> m_samples;
float m_theta;
};
/**
* @brief Returned by StageAtTime.getArrayAttributeArrayRd
*
* @details Enables access to a vector of readily interpolated attribute values
*/
template <typename T>
class AttributeArrayResult<std::vector<T>>
{
public:
size_t size() const
{
return m_samples[0].size();
}
bool empty() const
{
return (size() == 0);
}
std::vector<std::vector<gsl::span<const T>>> const* data() const
{
return m_samples;
}
std::vector<std::vector<gsl::span<const T>>>* data()
{
return m_samples;
}
std::vector<T> operator[](const size_t primIndex)
{
std::vector<T> interpolatedAttributeValues;
if (m_samples.size() == 1)
{
interpolatedAttributeValues.resize(m_samples[0][primIndex].size());
std::copy(m_samples[0][primIndex].begin(), m_samples[0][primIndex].end(), interpolatedAttributeValues.begin());
return interpolatedAttributeValues;
}
else if (m_samples.size() == 2)
{
size_t pos0, pos1;
if (checkPathCorrespondence(m_paths, primIndex, pos0, pos1))
{
auto values_f0 = m_samples[0][primIndex];
auto values_f1 = m_samples[1][primIndex];
interpolatedAttributeValues.reserve(values_f0.size());
// interpolate attrib values for the requested {prim index : attrib val index}
for (size_t valueIndex = 0; valueIndex < values_f0.size(); ++valueIndex)
{
T a = (values_f0[valueIndex]);
T b = (values_f1[valueIndex]);
T result = interpolate<T>(a, b, m_theta);
interpolatedAttributeValues.emplace_back(result);
}
return interpolatedAttributeValues;
}
interpolatedAttributeValues.resize(m_samples[0][primIndex].size());
std::copy(m_samples[0][primIndex].begin(), m_samples[0][primIndex].end(), interpolatedAttributeValues.begin());
return interpolatedAttributeValues;
}
return std::vector<T>();
}
std::vector<gsl::span<const carb::flatcache::Path>> m_paths;
std::vector<std::vector<gsl::span<const T>>> m_samples;
float m_theta;
};
template <typename T>
AttributeArrayResult<T> StageAtTime::getAttributeArrayRd(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName) const
{
size_t sampleCount = m_historyWindow.getTimeSampleCount();
if (sampleCount > 0)
{
AttributeArrayResult<T> arrAttRes;
arrAttRes.m_samples = m_historyWindow.getAttributeArrayRd<T>(primBucketList, primBucketListIndex, attrName);
arrAttRes.m_paths = m_historyWindow.getPathArray(primBucketList, primBucketListIndex);
arrAttRes.m_theta = (float)m_theta;
return arrAttRes;
}
else
{
CARB_LOG_WARN_ONCE(
"getAttributeArrayRd %s: Data not available at time, possible dropped frame", attrName.getText());
return AttributeArrayResult<T>();
}
}
template <typename T>
AttributeArrayResult<T> StageAtTime::getAttributeArrayRdGpu(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName) const
{
size_t sampleCount = m_historyWindow.getTimeSampleCount();
if (sampleCount > 0)
{
AttributeArrayResult<T> arrAttRes;
arrAttRes.m_samples = m_historyWindow.getAttributeArrayRdGpu<T>(primBucketList, primBucketListIndex, attrName);
arrAttRes.m_paths = m_historyWindow.getPathArray(primBucketList, primBucketListIndex);
arrAttRes.m_theta = (float)m_theta;
return arrAttRes;
}
else
{
CARB_LOG_WARN_ONCE(
"getAttributeArrayRdGpu %s: Data not available at time, possible dropped frame", attrName.getText());
return AttributeArrayResult<T>();
}
}
inline std::vector<gsl::span<const char>> StageAtTime::getAttributeArrayRawRd(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName) const
{
return m_historyWindow.getAttributeArrayRawRd(primBucketList, primBucketListIndex, attrName);
}
template <typename T>
AttributeArrayResult<std::vector<T>> StageAtTime::getArrayAttributeArrayRd(
const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName) const
{
size_t sampleCount = m_historyWindow.getTimeSampleCount();
AttributeArrayResult<std::vector<T>> result;
if (sampleCount > 0)
{
result.m_samples = m_historyWindow.getArrayAttributeArrayRd<T>(primBucketList, primBucketListIndex, attrName);
result.m_paths = m_historyWindow.getPathArray(primBucketList, primBucketListIndex);
result.m_theta = (float)m_theta;
return result;
}
else
{
CARB_LOG_WARN_ONCE(
"getAttributeArrayRd %s: Data not available at time, possible dropped frame", attrName.getText());
return AttributeArrayResult<std::vector<T>>();
}
}
inline gsl::span<const Path> StageAtTime::getPathArray(const PrimBucketList& primBucketList,
size_t primBucketListIndex) const
{
size_t sampleCount = m_historyWindow.getTimeSampleCount();
if (sampleCount == 1)
{
return m_historyWindow.getPathArray(primBucketList, primBucketListIndex)[0];
}
else if (sampleCount == 0)
{
CARB_LOG_WARN_ONCE("getPathArray: Data not available at time, possible dropped frame");
return gsl::span<const Path>();
}
else if (sampleCount == 2)
{
// TODO: make this correct when prims are being added and deleted
// To do this we need to make a new array out:
// out[i] = in0[i] , if in0[i] == in1[i]
// = kUninitializedPath, otherwise
return m_historyWindow.getPathArray(primBucketList, primBucketListIndex)[0];
#if 0
gsl::span<const Path> in0 = m_historyWindow.getPathArray(primBucketList, primBucketListIndex)[0];
gsl::span<const Path> in1 = m_historyWindow.getPathArray(primBucketList, primBucketListIndex)[1];
std::vector<Path> multiframePaths;
for (size_t i = 0; i < in0.size(); ++i)
in0[i] == in1[i] ? multiframePaths.emplace_back(in0[i]) : multiframePaths.emplace_back(flatcache::kUninitializedPath);
return multiframePaths;
#endif
}
return gsl::span<const Path>();
}
inline std::vector<const Connection*> StageAtTime::getConnectionRd(const Path& path, const Token& connectionName)
{
return m_historyWindow.getConnectionRd(path, connectionName);
}
inline void StageAtTime::printBucketNames() const
{
m_historyWindow.printBucketNames();
}
inline size_t StageAtTime::getAttributeCount(const PrimBucketList& primBucketList, size_t primBucketListIndex) const
{
std::vector<size_t> counts = m_historyWindow.getAttributeCounts(primBucketList, primBucketListIndex);
if (counts.size() == 1)
{
return counts[0];
}
// Perform a set intersection to get a valid count size;
if (counts.size() == 2)
{
//
// TODO: The attributes are internally sorted vectors, see flatcache::set.
// Ideally we'd make a C-ABI type that makes it clear that these are sorted,
// wrap with flatcache::set in the C++ wrapper and then use the standard library set intersection.
//
auto namesAndTypes = m_historyWindow.getAttributeNamesAndTypes(primBucketList, primBucketListIndex);
const std::vector<std::vector<Token>>& names = namesAndTypes.first;
const std::vector<std::vector<Type>>& types = namesAndTypes.second;
std::vector<Token> intersection;
// Perform a set intersection but we need to track the types as we intersect
const std::vector<Token>& workingNames = names[0].size() < names[1].size() ? names[0] : names[1];
const std::vector<Type>& workingTypes = names[0].size() < names[1].size() ? types[0] : types[1];
const std::vector<Token>& testingNames = names[0].size() < names[1].size() ? names[1] : names[0];
const std::vector<Type>& testingTypes = names[0].size() < names[1].size() ? types[1] : types[0];
// Since attribute vectors are sorted we can track last spotted locations to be more efficient.
size_t last = 0;
for (size_t i = 0; i < workingNames.size(); ++i)
{
for (size_t j = last; j < testingNames.size(); ++j)
{
if (workingNames[i] == testingNames[j])
{
if (workingTypes[i] == testingTypes[j])
{
intersection.push_back(workingNames[i]);
}
// Store hit location to start next search
last = j;
break;
}
}
}
return intersection.size();
}
return 0;
}
inline std::pair<std::vector<Token>, std::vector<Type>> StageAtTime::getAttributeNamesAndTypes(
const PrimBucketList& primBucketList, size_t primBucketListIndex) const
{
std::vector<Token> outNames;
std::vector<Type> outTypes;
std::vector<std::vector<Token>> names;
std::vector<std::vector<Type>> types;
std::tie(names, types) = m_historyWindow.getAttributeNamesAndTypes(primBucketList, primBucketListIndex);
if (names.size() == 1)
{
outNames = std::move(names[0]);
outTypes = std::move(types[0]);
}
if (names.size() == 2)
{
// Assuming that the invariant that names and types of the same slot are the same count holds.
outNames.reserve(std::min(names[0].size(), names[1].size()));
outTypes.reserve(std::min(types[0].size(), types[1].size()));
// Perform a set intersection but we need to track the types as we intersect
std::vector<Token>& workingNames = names[0].size() < names[1].size() ? names[0] : names[1];
std::vector<Type>& workingTypes = names[0].size() < names[1].size() ? types[0] : types[1];
std::vector<Token>& testingNames = names[0].size() < names[1].size() ? names[1] : names[0];
std::vector<Type>& testingTypes = names[0].size() < names[1].size() ? types[1] : types[0];
// Since attribute vectors are sorted we can track last spotted locations to be more efficient.
size_t last = 0;
for (size_t i = 0; i < workingNames.size(); ++i)
{
for (size_t j = last; j < testingNames.size(); ++j)
{
if (workingNames[i] == testingNames[j])
{
if (workingTypes[i] == testingTypes[j])
{
outNames.push_back(workingNames[i]);
outTypes.push_back(workingTypes[i]);
}
// Store hit location to start next search
last = j;
break;
}
}
}
}
return { outNames, outTypes };
}
inline uint64_t StageAtTime::writeCacheToDisk(const char* file, uint8_t* workingBuffer, uint64_t workingBufferSize) const
{
size_t sampleCount = m_historyWindow.getTimeSampleCount();
if (sampleCount != 1)
{
CARB_LOG_ERROR_ONCE("Can't call StageAtTime::WriteCacheToDisk for interpolated values");
return 0;
}
return m_historyWindow.writeCacheToDisk(file, workingBuffer, workingBufferSize);
}
inline void StageAtTime::addRefCount()
{
m_historyWindow.addRefCount();
}
inline bool StageAtTime::removeRefCount()
{
return m_historyWindow.removeRefCount();
}
inline unsigned int StageAtTime::getRefCount()
{
return m_historyWindow.getRefCount();
}
// StageWithHistory implementation starts here
inline StageWithHistory::StageWithHistory(UsdStageId usdStageId, size_t historyFrameCount, RationalTime simPeriod, bool withCuda)
{
auto iStageWithHistory = carb::getCachedInterface<carb::flatcache::IStageWithHistory>();
m_stageWithHistory = iStageWithHistory->create2(usdStageId, historyFrameCount, simPeriod, withCuda);
m_usdStageId = usdStageId;
}
inline StageWithHistory::~StageWithHistory()
{
auto iStageWithHistory = carb::getCachedInterface<carb::flatcache::IStageWithHistory>();
iStageWithHistory->destroy(m_usdStageId);
}
inline ListenerId StageWithHistory::createListener()
{
auto iChangeTrackerConfig = carb::getCachedInterface<carb::flatcache::IStageWithHistory>();
ListenerId newId = iChangeTrackerConfig->createListener();
return newId;
}
// Templated methods do not get compiled unless they are instantiated.
// The following code is not intended to be executed, it just instantiates each
// templated method once to make sure that they compile.
inline void instantiationTest(StageInProgress& stage,
StageAtTimeInterval& stageAtInterval,
StageAtTime& stageAtTime,
const Path& path,
const Token& attrName)
{
int* x0 = stage.getAttribute<int>(path, attrName);
CARB_UNUSED(x0);
const int* x1 = stage.getAttributeRd<int>(path, attrName);
CARB_UNUSED(x1);
int* x2 = stage.getAttributeWr<int>(path, attrName);
CARB_UNUSED(x2);
gsl::span<int> x3 = stage.getArrayAttribute<int>(path, attrName);
CARB_UNUSED(x3);
gsl::span<const int> x4 = stage.getArrayAttributeRd<int>(path, attrName);
CARB_UNUSED(x4);
gsl::span<int> x5 = stage.getArrayAttributeWr<int>(path, attrName);
CARB_UNUSED(x5);
PrimBucketList pbl = stage.findPrims({}, {}, {});
gsl::span<int> x6 = stage.getAttributeArray<int>(pbl, 0, attrName);
CARB_UNUSED(x6);
std::vector<const int*> x7 = stageAtInterval.getAttributeRd<int>(path, attrName);
CARB_UNUSED(x7);
std::vector<gsl::span<const int>> x8 = stageAtInterval.getAttributeArrayRd<int>(pbl, 0, attrName);
CARB_UNUSED(x8);
optional<float> x9 = stageAtTime.getAttributeRd<float>(path, attrName);
CARB_UNUSED(x9);
optional<std::pair<optional<int>, optional<int>>> x10 = stageAtTime.getAttributeRd <std::pair<optional<int>, optional<int>>>(path, attrName);
CARB_UNUSED(x10);
carb::flatcache::AttributeArrayResult<int> x11 = stageAtTime.getAttributeArrayRd<int>(pbl, 0, attrName);
CARB_UNUSED(x11);
carb::flatcache::AttributeArrayResult<std::vector<int>> x12 = stageAtTime.getArrayAttributeArrayRd<int>(pbl, 0, attrName);
CARB_UNUSED(x12);
}
} // namespace flatcache
} // namespace carb
| 74,405 | C | 37.176501 | 153 | 0.672603 |
omniverse-code/kit/fabric/include/carb/flatcache/IPath.h | // Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <carb/Framework.h>
#include <carb/Interface.h>
#include <carb/flatcache/IToken.h>
#include <carb/flatcache/Intrinsics.h>
#include <functional>
// Set to empty macro when IPath::iPath static member is removed
#define FLATCACHE_IPATH_INIT \
const carb::flatcache::IPath* carb::flatcache::Path::iPath = nullptr;
namespace carb
{
namespace flatcache
{
// PathC are integer keys that identify paths to C-ABI interfaces
struct PathC
{
uint64_t path;
constexpr bool operator<(const PathC& other) const
{
return path < other.path;
}
constexpr bool operator==(const PathC& other) const
{
return path == other.path;
}
constexpr bool operator!=(const PathC& other) const
{
return path != other.path;
}
};
static_assert(std::is_standard_layout<PathC>::value, "Struct must be standard layout as it is used in C-ABI interfaces");
// We don't reference count the uninitialized (or empty) path, and we use
// this fact to avoid unnecessary dll calls to addRef()/removeRef(), for
// example during std::vector resize. To do this we need to check whether a
// path is uninitialized without the dll call getEmptyPath(), so we store
// its value here in a constant.
// We run automated test "IPath::getEmptyPath() dll call can be replaced with
// constant, Path::kUninitializedPath" to ensure that this constant never
// changes.
static constexpr PathC kUninitializedPath{0};
// C-ABI interface to pxr::SdfPath
struct IPath
{
CARB_PLUGIN_INTERFACE("carb::flatcache::IPath", 0, 1);
PathC (*getHandle)(const char* name);
const char* (*getText)(PathC handle);
PathC (*getParent)(PathC handle);
PathC (*appendChild)(PathC handle, TokenC childName);
void (*addRef)(PathC handle);
void (*removeRef)(PathC handle);
PathC (*getEmptyPath)();
// Creates a path by appending a given relative path to this path.
PathC (*appendPath)(PathC handle, PathC path);
// Returns the number of path elements in this path.
uint32_t (*getPathElementCount)(PathC handle);
};
// C++ wrapper for IPath
class Path
{
static carb::flatcache::IPath& sIPath();
public:
// DEPRECATED: keeping for binary compatibility
// Will be removed in October 2021 - @TODO set FLATCACHE_IPATH_INIT to empty macro when removed!
// Still safe to use if initialized in a given dll
static const carb::flatcache::IPath* iPath;
Path() : mHandle(kUninitializedPath)
{
}
Path(const char* path)
{
mHandle = sIPath().getHandle(path);
}
// Needs to be noexcept for std::vector::resize() to move instead of copy
~Path() noexcept
{
// We see the compiler construct and destruct many uninitialized
// temporaries, for example when resizing std::vector.
// We don't want to do an IPath dll call for these, so skip if handle
// is uninitialized.
if (mHandle != kUninitializedPath)
{
sIPath().removeRef(mHandle);
}
}
// Copy constructor
Path(const Path& other) : mHandle(other.mHandle)
{
if (mHandle != kUninitializedPath)
{
sIPath().addRef(mHandle);
}
}
// Copy construct from integer
Path(PathC handle) : mHandle(handle)
{
if (mHandle != kUninitializedPath)
{
sIPath().addRef(mHandle);
}
}
// Move constructor
// Needs to be noexcept for std::vector::resize() to move instead of copy
Path(Path&& other) noexcept
{
// We are moving the src handle so don't need to change its refcount
mHandle = other.mHandle;
// Make source invalid
other.mHandle = kUninitializedPath;
}
// Copy assignment
Path& operator=(const Path& other)
{
if (this != &other)
{
if (mHandle != kUninitializedPath)
{
sIPath().removeRef(mHandle);
}
if (other.mHandle != kUninitializedPath)
{
sIPath().addRef(other.mHandle);
}
}
mHandle = other.mHandle;
return *this;
}
// Move assignment
Path& operator=(Path&& other) noexcept
{
if (&other == this)
return *this;
// We are about to overwrite the dest handle, so decrease its refcount
if (mHandle != kUninitializedPath)
{
sIPath().removeRef(mHandle);
}
// We are moving the src handle so don't need to change its refcount
mHandle = other.mHandle;
other.mHandle = kUninitializedPath;
return *this;
}
const char* getText() const
{
return sIPath().getText(mHandle);
}
constexpr bool operator<(const Path& other) const
{
return mHandle < other.mHandle;
}
constexpr bool operator!=(const Path& other) const
{
return mHandle != other.mHandle;
}
constexpr bool operator==(const Path& other) const
{
return mHandle == other.mHandle;
}
constexpr operator PathC() const
{
return mHandle;
}
private:
PathC mHandle;
};
static_assert(std::is_standard_layout<Path>::value, "Path must be standard layout as it is used in C-ABI interfaces");
#ifndef __CUDACC__
inline carb::flatcache::IPath& Path::sIPath()
{
// Acquire carbonite interface on first use
carb::flatcache::IPath* iPath = carb::getCachedInterface<carb::flatcache::IPath>();
CARB_ASSERT(iPath);
return *iPath;
}
#endif // __CUDACC__
}
}
namespace std
{
template <>
class hash<carb::flatcache::PathC>
{
public:
inline size_t operator()(const carb::flatcache::PathC& key) const
{
// lower 8 bits have no entropy, so just remove the useless bits
return key.path >> 8;
}
};
template <>
class hash<carb::flatcache::Path>
{
public:
inline size_t operator()(const carb::flatcache::Path& key) const
{
return std::hash<carb::flatcache::PathC>()(carb::flatcache::PathC(key));
}
};
}
| 6,493 | C | 26.171548 | 121 | 0.639458 |
omniverse-code/kit/fabric/include/carb/flatcache/FlatCacheUSD.h | // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <carb/flatcache/IPath.h>
#include <carb/logging/Log.h>
#include <pxr/base/tf/token.h>
#include <pxr/usd/sdf/path.h>
namespace carb
{
namespace flatcache
{
// asInt() is the same as SdfPath::_AsInt()
// Flatcache relies on asInt(a)==asInt(b) <=> a is same path as b,
// which is how SdfPath::operator== is currently defined.
// If USD changes sizeof(pxr::SdfPath), we will need to change PathC to make it
// the same size.
inline PathC asInt(const pxr::SdfPath& path)
{
static_assert(sizeof(pxr::SdfPath) == sizeof(PathC), "Change PathC to make the same size as pxr::SdfPath");
PathC ret;
std::memcpy(&ret, &path, sizeof(pxr::SdfPath));
return ret;
}
inline const PathC* asInt(const pxr::SdfPath* path)
{
static_assert(sizeof(pxr::SdfPath) == sizeof(PathC), "Change PathC to make the same size as pxr::SdfPath");
return reinterpret_cast<const PathC*>(path);
}
inline TokenC asInt(const pxr::TfToken& token)
{
static_assert(sizeof(pxr::TfToken) == sizeof(TokenC), "Change TokenC to make the same size as pxr::TfToken");
TokenC ret;
std::memcpy(&ret, &token, sizeof(pxr::TfToken));
return ret;
}
inline const TokenC* asInt(const pxr::TfToken* token)
{
static_assert(sizeof(pxr::TfToken) == sizeof(TokenC), "Change TokenC to make the same size as pxr::TfToken");
return reinterpret_cast<const TokenC*>(token);
}
// Return reference to ensure that reference count doesn't change
inline const pxr::TfToken& intToToken(const TokenC& token)
{
static_assert(sizeof(pxr::TfToken) == sizeof(TokenC), "Change TokenC to make the same size as pxr::TfToken");
return reinterpret_cast<const pxr::TfToken&>(token);
}
inline const pxr::SdfPath& intToPath(const PathC& path)
{
static_assert(sizeof(pxr::SdfPath) == sizeof(PathC), "Change PathC to make the same size as pxr::SdfPath");
return reinterpret_cast<const pxr::SdfPath&>(path);
}
inline const pxr::SdfPath* intToPath(const Path* path)
{
static_assert(sizeof(pxr::SdfPath) == sizeof(Path), "Change Path to make the same size as pxr::SdfPath");
return reinterpret_cast<const pxr::SdfPath*>(path);
}
}
}
| 2,585 | C | 31.325 | 113 | 0.716828 |
omniverse-code/kit/fabric/include/carb/flatcache/PrimChanges.h | // Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <carb/flatcache/AttrNameAndType.h>
#include <carb/flatcache/IFlatcache.h>
#include <carb/flatcache/IPath.h>
#include <gsl/span>
#include <cstddef>
namespace carb
{
namespace flatcache
{
struct AttrAndChangedIndices
{
AttrNameAndType attr;
// For which prims did this attribute change?
bool allIndicesChanged;
gsl::span<const size_t> changedIndices;
};
struct BucketChanges
{
// For each attribute, which prims changed?
std::vector<AttrAndChangedIndices> attrChangedIndices;
gsl::span<const Path> pathArray;
BucketChanges() = default;
BucketChanges(BucketChangesC in) :
pathArray({ in.pathArray.ptr,in.pathArray.elementCount })
{
size_t count = in.changedIndices.elementCount;
attrChangedIndices.resize(count);
for (size_t i = 0; i != count; i++)
{
const ConstChangedIndicesC& inAttrChanges = in.changedIndices.ptr[i];
attrChangedIndices[i].attr = in.changedAttributes.ptr[i];
attrChangedIndices[i].allIndicesChanged = inAttrChanges.allIndicesChanged;
attrChangedIndices[i].changedIndices =
gsl::span<const size_t>(inAttrChanges.changedIndices.ptr, inAttrChanges.changedIndices.elementCount);
}
}
};
class AddedPrimIndices
{
// Which prims were added?
gsl::span<const size_t> addedIndices;
public:
AddedPrimIndices(AddedPrimIndicesC in)
{
addedIndices = gsl::span<const size_t>(in.addedIndices.ptr, in.addedIndices.elementCount);
}
size_t size() const
{
return addedIndices.size();
}
// This iterator first iterates over the deletedElements that were replaced
// by new elements, then the contiguous range of elements added at the end
// of the bucket
struct iterator
{
using iterator_category = std::input_iterator_tag;
using difference_type = size_t;
using value_type = size_t;
using reference = size_t;
iterator(
gsl::span<const size_t>::iterator _addedIndicesIterator,
gsl::span<const size_t>::iterator _addedIndicesEnd) :
addedIndicesIterator(_addedIndicesIterator),
addedIndicesEnd(_addedIndicesEnd)
{}
reference operator*() const
{
return *addedIndicesIterator;
}
iterator& operator++()
{
addedIndicesIterator++;
return *this;
}
bool operator==(iterator other) const
{
return addedIndicesIterator == other.addedIndicesIterator;
}
bool operator!=(iterator other) const { return !(*this == other); }
difference_type operator-(iterator other)
{
return addedIndicesIterator - other.addedIndicesIterator;
}
private:
gsl::span<const size_t>::iterator addedIndicesIterator;
gsl::span<const size_t>::iterator addedIndicesEnd;
};
iterator begin()
{
return iterator(addedIndices.begin(), addedIndices.end());
}
iterator end()
{
return iterator(addedIndices.end(), addedIndices.end());
}
};
}
}
| 3,624 | C | 26.462121 | 117 | 0.656457 |
omniverse-code/kit/fabric/include/carb/flatcache/Intrinsics.h | // Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <cstdint>
#include <cstdlib>
#include <cstddef>
#include <carb/flatcache/Defines.h>
#if USING( WINDOWS_BUILD )
#include <intrin.h>
#elif USING( LINUX_BUILD ) // #if USING( WINDOWS_BUILD )
// no linux-specific includes at this time
#else // #if USING( WINDOWS_BUILD )
#error "Unsupported platform"
#endif // #if USING( WINDOWS_BUILD )
namespace carb
{
namespace flatcache
{
inline uint32_t clz32( const uint32_t x )
{
#if USING( WINDOWS_BUILD )
DWORD z;
return _BitScanReverse( &z, x ) ? 31 - z : 32;
#elif USING( LINUX_BUILD ) // #if USING( WINDOWS_BUILD )
return x ? __builtin_clz( x ) : 32;
#else // #if USING( WINDOWS_BUILD )
#error "Unsupported platform"
#endif // #if USING( WINDOWS_BUILD )
}
inline uint32_t clz64( const uint64_t x )
{
#if USING( WINDOWS_BUILD )
DWORD z;
return _BitScanReverse64( &z, x ) ? 63 - z : 64;
#elif USING( LINUX_BUILD ) // #if USING( WINDOWS_BUILD )
return x ? __builtin_clzll( x ) : 64;
#else // #if USING( WINDOWS_BUILD )
#error "Unsupported platform"
#endif // #if USING( WINDOWS_BUILD )
}
inline uint32_t ctz32( const uint32_t x )
{
#if USING( WINDOWS_BUILD )
DWORD z;
return _BitScanForward( &z, x ) ? z : 32;
#elif USING( LINUX_BUILD ) // #if USING( WINDOWS_BUILD )
return x ? __builtin_ctz( x ) : 32;
#else // #if USING( WINDOWS_BUILD )
#error "Unsupported platform"
#endif // #if USING( WINDOWS_BUILD )
}
inline uint32_t ctz64( const uint64_t x )
{
#if USING( WINDOWS_BUILD )
DWORD z;
return _BitScanForward64( &z, x ) ? z : 64;
#elif USING( LINUX_BUILD ) // #if USING( WINDOWS_BUILD )
return x ? __builtin_ctzll( x ) : 64;
#else // #if USING( WINDOWS_BUILD )
#error "Unsupported platform"
#endif // #if USING( WINDOWS_BUILD )
}
inline uint64_t bswap64( const uint64_t x )
{
#if USING( WINDOWS_BUILD )
return _byteswap_uint64( x );
#elif USING( LINUX_BUILD ) // #if USING( WINDOWS_BUILD )
return __builtin_bswap64 ( x );
#else // #if USING( WINDOWS_BUILD )
#error "Unsupported platform"
#endif // #if USING( WINDOWS_BUILD )
}
inline uint64_t rotr64( const uint64_t value, const int shift )
{
#if USING( WINDOWS_BUILD )
return _rotr64( value, shift );
#elif USING( LINUX_BUILD ) // #if USING( WINDOWS_BUILD )
return (value >> shift) | (value << (64 - shift));
#else // #if USING( WINDOWS_BUILD )
#error "Unsupported platform"
#endif // #if USING( WINDOWS_BUILD )
}
inline uint64_t rotl64( const uint64_t value, const int shift )
{
#if USING( WINDOWS_BUILD )
return _rotl64( value, shift );
#elif USING( LINUX_BUILD ) // #if USING( WINDOWS_BUILD )
return (value << shift) | (value >> (64 - shift));
#else // #if USING( WINDOWS_BUILD )
#error "Unsupported platform"
#endif // #if USING( WINDOWS_BUILD )
}
} // namespace flatcache
} // namespace carb | 3,199 | C | 27.318584 | 77 | 0.680213 |
omniverse-code/kit/fabric/include/carb/flatcache/FlatCache.h | // Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <carb/Interface.h>
#include <carb/flatcache/PathToAttributesMap.h>
namespace carb
{
namespace flatcache
{
// Callers of createCache() and getCache() can store anything they want in
// UserId. For example, OmniGraph uses it to store the OmniGraph pointer.
struct UserId
{
uint64_t id;
bool operator<(UserId b) const
{
return id < b.id;
}
bool operator==(UserId b) const
{
return id == b.id;
}
bool operator!=(UserId b) const
{
return id != b.id;
}
};
constexpr UserId kDefaultUserId = { 0 };
constexpr UserId kInvalidUserId = { ~uint64_t(0) };
// Flatcache has the option to save a finite number of frames of history,
// organized as a ringbuffer. This is typically used to buffer data between
// simulation rendering. The simplest case, double buffering, allows simulation
// and rendering to run in parallel, each running for the full frame.
// Longer buffers can be used to feed one or more renderers running at
// different rates to simulation.
// To enable this feature, pass CacheType::eWithHistory to createCache(),
// otherwise pass CacheType::eWithoutHistory.
// Multiple caches can be created for each UsdStageId, but at most one can have
// history.
enum class CacheType
{
eWithHistory,
eWithoutHistory,
eWithoutHistoryAndWithCuda,
eWithHistoryAndCuda
};
struct FlatCache
{
CARB_PLUGIN_INTERFACE("carb::flatcache::FlatCache", 0, 4);
// Abstractly, a flatcache maps USD paths to USD attributes, just like a
// UsdStage does.
// Concretely we represent a flatcache by objects of type PathToAttributesMap.
// This method creates a PathToAttributesMap for a given stage, but doesn't
// populate it with values. This allows the cache to be filled lazily as
// values are needed.
// Instead, it traverses the given Usd stage making an index of paths to
// attributes.
// The cache uses the index to organize data into contiguous arrays,
// and also allows you to find prims by type and/or attribute without
// traversing the stage.
// This method also specifies the stage to be used by calls to usdToCache()
// and cacheToUsd().
PathToAttributesMap&(CARB_ABI* createCache)(UsdStageId usdStageId, UserId userId, CacheType cacheType);
void(CARB_ABI* addPrimToCache)(PathToAttributesMap& cache, const pxr::UsdPrim& prim, const std::set<TokenC>& filter);
// Destroy the cache associated with the given stage.
void(CARB_ABI* destroyCache)(UsdStageId usdStageId, UserId userId);
// Prefetch the whole USD stage to the cache
// Typically you only call this at stage load time, because the USD notify
// handler updates the cache if the stage changes.
void(CARB_ABI* usdToCache)(PathToAttributesMap& cache);
// Write back all dirty cached data to the USD stage.
// If your renderer doesn't use the cache then you need to do this
// before rendering.
void(CARB_ABI* cacheToUsd)(PathToAttributesMap& cache);
// Write back only one bucket to usd
void(CARB_ABI* cacheBucketToUsd)(PathToAttributesMap& cache, BucketId bucketId, bool skipMeshPoints);
TypeC(CARB_ABI* usdTypeToTypeC)(pxr::SdfValueTypeName usdType);
PathToAttributesMap*(CARB_ABI* getCache)(UsdStageId usdStageId, UserId userId);
pxr::SdfValueTypeName(CARB_ABI* typeCtoUsdType)(TypeC typeC);
size_t(CARB_ABI* getUsdTypeCount)();
void(CARB_ABI* getAllUsdTypes)(TypeC* outArray, size_t outArraySize);
/** @brief Import a prim in cache
*/
void(CARB_ABI* addPrimToCacheNoOverwrite)(PathToAttributesMap& cache,
const pxr::UsdPrim& prim,
const std::set<TokenC>& filter);
void(CARB_ABI* initStaticVariables)();
void(CARB_ABI* exportUsd)(PathToAttributesMap& cache, pxr::UsdStageRefPtr usdStage, const double* timeCode, const double* prevTimeCode);
/** @brief Attempt to serialize the cache into the specified buffer.
*
* @cache[in] The cache to serialize
* @dest[in/out] Pointer to buffer to be written to, will start writing to head
* of pointer. dest will be left pointing to the point after the last write
* @destSize Size of buffer that was allocated for the data (in bytes)
* @pathStringCache - looking up strings is slow, yo
*
* @return Number of bytes written success is determined by (return <= @destSize)
*
*
* @invariant It is safe to write to any memory within[dest, dest+size] for the
* duration of the function call.
*
* @note If the cache will not fit into the size of memory allocated in
* @dest then it will stop writing, but continue to run the serialize
* algorithm to calculate the actual amount of data that needs to be
* written
*
* @Todo : make cache const - not possible because serializeMirroredAray is not
* const, however, that is because getArraySpanC is used which also doesn't
* have a const version, so that needs to be addressed first, this is because
* in the call stack we end up with a copy from GPU -> CPU which would need to
* be avoided
*/
uint64_t(CARB_ABI* serializeCache)(PathToAttributesMap& cache, uint8_t* dest, size_t destSize, SerializationCache& pathStringCache);
/** @brief Given a buffer that has the serialized version of a cache written
* using the serialize function, this function will override all the data
* in the cache with the data from the buffer
*
* @cache[in/out] Reference to the cache to be populated
* @pathCache[in/out] Looking up SDFPath via string can be expensive to it
* is worthwhile to cache this data across many repeated
* calls.
* @input[in] Pointer to buffer of data containing serialized cache
* @inputSize[in] Size of data in the buffer
* @skipStageConfirmation[in] Whether we should skip making sure the destination stage is open.
*
* @return True if buffer was successfully de-serialized
*
* @note : this currently has to clear the cache before it is populated which is a possibly
* expensive operation
*
* @TODO: whould we care that it came from the same version of the USD file?
*/
bool(CARB_ABI* deserializeCache)(
PathToAttributesMap& destStage,
DeserializationCache& pathCache,
const uint8_t* input,
const size_t inputSize,
bool skipStageConfirmation);
/** @brief Write a cache file to disk at a specified location
*
* @note many parameters to this function are optional
* @cache[in] That cache to be written to disk
* @file[in The location the file is desired to be written to
* @workingBuffer[in] [Optional] In order to avoid costly reallocations
* the code will attempt to re-use the memory at the buffer
* location if it is large enough. If the buffer isn't larg
* enough the cost of allocation, and re-traversal may be paid
* @workingBufferSize[in] [Optional] If workingBuffer is non null, then this desrcibes the length
* of the buffer
* @return The amount of data needed to serialize the cache, a return value of 0 indicates an error
*
*/
uint64_t(CARB_ABI* writeCacheToDisk)(PathToAttributesMap& cache,
const char* file,
uint8_t* workingBuffer,
uint64_t workingBufferSize);
/** @brief Read a cache file from the specified location
*
* @file[in] The location the file is desired to be written to
* @cache[in/out] That cache to be populates
* @pathCache[in/out] Looking up SDFPath via string can be expensive to it
* is worthwhile to cache this data across many repeated
* calls.
* @buffer[in/out] Buffer to use to read the cache file in to, passed to
* allow reuse than allocate per call. Will be resized if not large enough.
* @return Whether the read was successful
*
*/
bool(CARB_ABI* readCacheFromDisk)(PathToAttributesMap& cache,
const char* fileName,
DeserializationCache& pathCache,
std::vector<uint8_t>& buffer);
/** @brief Enable/Disable change notifications on USD changes.
*
* @enable[in] True/False enable notifications
*
*/
void(CARB_ABI* setEnableChangeNotifies)(bool enable);
/** @brief Return whether change notifications on USD changes is enabled.
*
* @return True if change notifications on USD changes is enabled, else False.
*
*/
bool(CARB_ABI* getEnableChangeNotifies)();
/** @brief make buckets for all prims on a USD stage, but only if this
* hasn't been done before.
*
* This is used to lazily create an index of all prims on a stage, without
* the time or memory cost of fetching all the attribute values. The user
* can then use findPrims to, for example, find all the prims of a
* particular type.
*
* If a SimStageWithHistory hasn't been created for this stage then a
* warning will be printed and no population will be done.
*
* @cache[in] The PathToAttributesMap to populate
*/
void(CARB_ABI* minimalPopulateIfNecessary)(PathToAttributesMap& cache);
};
}
}
| 9,956 | C | 40.144628 | 140 | 0.677581 |
omniverse-code/kit/fabric/include/carb/flatcache/Allocator.h | #pragma once
#include <cmath>
#include <carb/logging/Log.h>
#include <carb/Defines.h>
#include <carb/flatcache/Defines.h>
#include <carb/flatcache/Intrinsics.h>
#define ALLOCATOR_HEADER USE_IF( USING( DEVELOPMENT_BUILD ) )
#define ALLOCATOR_STATS USE_IF( USING( ALLOCATOR_HEADER ) ) // requires Header's byte tracking per-allocation
#define ALLOCATOR_LEAK_CHECK USE_IF( USING( ALLOCATOR_HEADER ) ) // requires Header's byte tracking per-allocation
namespace carb
{
namespace flatcache
{
inline const char* humanReadableSize( const uint64_t bytes ) noexcept
{
auto va = [](auto ...params) -> const char* {
static char tmp[1024];
#ifdef _WIN32
_snprintf_s(tmp, sizeof(tmp), params...);
#else
snprintf(tmp, sizeof(tmp), params...);
#endif
return (const char*)&tmp;
};
constexpr const char SIZE_UNITS[64][3]{
" B", " B", " B", " B", " B", " B", " B", " B", " B", " B",
"KB", "KB", "KB", "KB", "KB", "KB", "KB", "KB", "KB", "KB",
"MB", "MB", "MB", "MB", "MB", "MB", "MB", "MB", "MB", "MB",
"GB", "GB", "GB", "GB", "GB", "GB", "GB", "GB", "GB", "GB",
"TB", "TB", "TB", "TB", "TB", "TB", "TB", "TB", "TB", "TB",
"PB", "PB", "PB", "PB", "PB", "PB", "PB", "PB", "PB", "PB",
"EB", "EB", "EB", "EB"
};
static constexpr size_t B = 1ull;
static constexpr size_t KB = 1024ull;
static constexpr size_t MB = (1024ull*1024ull);
static constexpr size_t GB = (1024ull*1024ull*1024ull);
static constexpr size_t TB = (1024ull*1024ull*1024ull*1024ull);
static constexpr size_t PB = (1024ull*1024ull*1024ull*1024ull*1024ull);
static constexpr size_t EB = (1024ull*1024ull*1024ull*1024ull*1024ull*1024ull);
constexpr const size_t SIZE_BASE[64]{
B, B, B, B, B, B, B, B, B, B,
KB, KB, KB, KB, KB, KB, KB, KB, KB, KB,
MB, MB, MB, MB, MB, MB, MB, MB, MB, MB,
GB, GB, GB, GB, GB, GB, GB, GB, GB, GB,
TB, TB, TB, TB, TB, TB, TB, TB, TB, TB,
PB, PB, PB, PB, PB, PB, PB, PB, PB, PB,
EB, EB, EB, EB
};
const uint32_t power = bytes ? ( 64u - clz64( bytes ) ) - 1u : 0;
const char *const units = SIZE_UNITS[power];
const size_t base = SIZE_BASE[power];
const size_t count = bytes / base;
return va("%zu %s", count, units);
}
// A wrapper around malloc/free that aims to:
//
// * Cheaply track allocation counts and bytes, and detect leaks automatically at ~Allocator()
//
// * Cheaply track usage in terms of peak memory usage, and total lifetime usage broken down by size. Sample output:
// dumped to console appears like so:
//
// == Allocator 0x000000E67BEFCEA0 Stats ==
// allocCount: 0
// allocBytes: 0 B
// peakAllocCount: 4002
// peakAllocBytes: 4 GB
// minAllocBytes: 312 B
// maxAllocBytes: 6 MB
//
// Lifetime Allocation Histogram:
// Normalized over TOTAL allocations: 13956
// < 512 B|***** 29% 4002
// < 1 KB| 0% 0
// < 2 KB| 0% 0
// < 4 KB| 0% 0
// < 8 KB|*** 14% 2000
// < 16 KB| 0% 0
// < 32 KB|*** 14% 1994
// < 64 KB| 0% 0
// < 128 KB|*** 14% 1976
// < 256 KB| 0% 0
// < 512 KB|*** 14% 1904
// < 1 MB| 0% 0
// < 2 MB|** 12% 1616
// < 4 MB| 0% 0
// < 8 MB|* 3% 464
// ========================
struct Allocator
{
Allocator();
~Allocator();
void* alloc(const size_t bytes);
void free(void *const ptr);
template<typename T, typename ...Params>
T* new_(Params&& ...params);
template<typename T>
void delete_(T*const t);
void resetStats() noexcept;
void reportUsage() noexcept;
bool checkLeaks() noexcept;
private:
#if USING( ALLOCATOR_HEADER )
struct BlockHeader
{
size_t bytes;
};
#endif // #if USING( ALLOCATOR_HEADER )
#if USING( ALLOCATOR_STATS ) || USING( ALLOCATOR_LEAK_CHECK )
size_t allocCount;
size_t allocBytes;
#endif // #if USING( ALLOCATOR_STATS ) || USING( ALLOCATOR_LEAK_CHECK )
#if USING( ALLOCATOR_STATS )
size_t peakAllocCount;
size_t peakAllocBytes;
size_t minAllocBytes;
size_t maxAllocBytes;
static constexpr size_t ALLOC_BUCKET_COUNT = 65;
size_t lifetimeAllocCount;
size_t lifetimeAllocBuckets[ALLOC_BUCKET_COUNT];
#endif // #if USING( ALLOCATOR_STATS )
};
struct AllocFunctor
{
Allocator *allocator;
void* operator()(const size_t bytes)
{
CARB_ASSERT(allocator);
return allocator->alloc(bytes);
}
};
struct FreeFunctor
{
Allocator *allocator;
void operator()(void *const ptr)
{
CARB_ASSERT(allocator);
return allocator->free(ptr);
}
};
inline Allocator::Allocator()
{
#if USING( ALLOCATOR_STATS ) || USING( ALLOCATOR_LEAK_CHECK )
allocCount = 0;
allocBytes = 0;
#endif // #if USING( ALLOCATOR_STATS ) || USING( ALLOCATOR_LEAK_CHECK )
resetStats();
}
inline Allocator::~Allocator()
{
checkLeaks();
reportUsage();
}
inline void* Allocator::alloc(const size_t bytes)
{
#if USING( ALLOCATOR_HEADER )
const size_t totalBytes = bytes + sizeof(BlockHeader);
#endif // #if USING( ALLOCATOR_HEADER )
#if USING( ALLOCATOR_STATS ) || USING( ALLOCATOR_LEAK_CHECK )
CARB_ASSERT((allocCount + 1) > allocCount);
CARB_ASSERT((allocBytes + totalBytes) > allocBytes);
++allocCount;
allocBytes += totalBytes;
#endif // #if USING( ALLOCATOR_STATS ) || USING( ALLOCATOR_LEAK_CHECK )
#if USING( ALLOCATOR_STATS )
if ( allocBytes > peakAllocBytes )
{
peakAllocBytes = allocBytes;
peakAllocCount = allocCount;
}
if ( totalBytes < minAllocBytes )
{
minAllocBytes = totalBytes;
}
if ( totalBytes > maxAllocBytes )
{
maxAllocBytes = totalBytes;
}
const uint32_t bucket = ( 64u - clz64( totalBytes - 1ull ) );
CARB_ASSERT(lifetimeAllocBuckets[bucket] + 1 > lifetimeAllocBuckets[bucket]);
++lifetimeAllocBuckets[bucket];
++lifetimeAllocCount;
#endif // #if USING( ALLOCATOR_STATS )
#if USING( ALLOCATOR_HEADER )
BlockHeader *const header = (BlockHeader*)malloc(totalBytes);
CARB_ASSERT(header);
header->bytes = totalBytes;
return header+1;
#else // #if USING( ALLOCATOR_HEADER )
return malloc(bytes);
#endif // #if USING( ALLOCATOR_STATS )
}
inline void Allocator::free(void *const ptr)
{
#if USING( ALLOCATOR_HEADER )
CARB_ASSERT(ptr);
BlockHeader *header = (BlockHeader*)ptr;
--header;
#if USING( ALLOCATOR_STATS ) || USING( ALLOCATOR_LEAK_CHECK )
const size_t totalBytes = header->bytes;
CARB_ASSERT((allocCount - 1) < allocCount);
CARB_ASSERT((allocBytes - totalBytes) < allocBytes);
--allocCount;
allocBytes -= totalBytes;
#endif // #if USING( ALLOCATOR_STATS ) || USING( ALLOCATOR_LEAK_CHECK )
::free(header);
#else // #if USING( ALLOCATOR_STATS )
::free(ptr);
#endif // #if USING( ALLOCATOR_STATS )
}
template<typename T, typename ...Params>
inline T* Allocator::new_(Params&& ...params)
{
T *const t = (T*)Allocator::alloc(sizeof(T));
new (t) T(std::forward<Params>(params)...);
return t;
}
template<typename T>
inline void Allocator::delete_(T*const t)
{
CARB_ASSERT(t);
t->~T();
#if USING( ALLOCATOR_HEADER )
BlockHeader *header = (BlockHeader*)t;
header--;
CARB_ASSERT(header->bytes == (sizeof(BlockHeader) + sizeof(T)));
#endif // #if USING( ALLOCATOR_HEADER )
Allocator::free(t);
}
inline void Allocator::resetStats() noexcept
{
#if USING( ALLOCATOR_STATS )
peakAllocCount = 0;
peakAllocBytes = 0;
minAllocBytes = SIZE_MAX;
maxAllocBytes = 0;
lifetimeAllocCount = 0;
for ( size_t i = 0; i < ALLOC_BUCKET_COUNT; ++i )
{
lifetimeAllocBuckets[i] = 0;
}
#endif // #if USING( ALLOCATOR_STATS )
}
inline void Allocator::reportUsage() noexcept
{
#if USING( ALLOCATOR_STATS )
CARB_LOG_INFO("== Allocator 0x%p Stats ==", this);
if (!lifetimeAllocCount)
{
CARB_LOG_INFO("<no stats to report; unused allocator>");
CARB_LOG_INFO("========================");
return;
}
CARB_LOG_INFO("allocCount: %12zu", allocCount);
CARB_LOG_INFO("allocBytes: %15s", humanReadableSize(allocBytes));
CARB_LOG_INFO("peakAllocCount: %12zu", peakAllocCount);
CARB_LOG_INFO("peakAllocBytes: %15s", humanReadableSize(peakAllocBytes));
CARB_LOG_INFO("minAllocBytes: %15s", humanReadableSize(minAllocBytes));
CARB_LOG_INFO("maxAllocBytes: %15s", humanReadableSize(maxAllocBytes));
CARB_LOG_INFO("");
CARB_LOG_INFO("Lifetime Allocation Histogram:");
size_t begin = 0;
for ( ; begin < ALLOC_BUCKET_COUNT; ++begin )
{
if ( lifetimeAllocBuckets[begin] )
{
break;
}
}
size_t end = 0;
for ( ; end < ALLOC_BUCKET_COUNT; ++end )
{
if ( lifetimeAllocBuckets[ALLOC_BUCKET_COUNT - end - 1] )
{
end = ALLOC_BUCKET_COUNT - end;
break;
}
}
CARB_LOG_INFO(" Normalized over TOTAL allocations: %zu", lifetimeAllocCount);
size_t i;
float normalized[ALLOC_BUCKET_COUNT];
for ( i = begin; i < end; ++i )
{
normalized[i] = (float)lifetimeAllocBuckets[i] / (float)lifetimeAllocCount;
}
constexpr size_t WIDTH = 16;
for ( i = begin; i < end; ++i )
{
char buf[WIDTH+1] = {};
const size_t w = ( size_t )std::ceil(WIDTH * normalized[i]);
for( size_t j = 0; j < w; ++j)
{
buf[j] = '*';
}
static_assert(WIDTH == 16, "Fix CARB_LOG_INFO below");
CARB_LOG_INFO(" <%7s|%-16s %3.0f%% %12zu", humanReadableSize(1ull<<i), buf, (normalized[i] * 100.f), lifetimeAllocBuckets[i]);
}
CARB_LOG_INFO("========================");
#endif // #if USING( ALLOCATOR_STATS )
}
inline bool Allocator::checkLeaks() noexcept
{
#if USING( ALLOCATOR_LEAK_CHECK )
if (allocCount || allocBytes)
{
CARB_LOG_ERROR("PathToAttributesMap detected a memory leak of %s!\n", humanReadableSize(allocBytes));
CARB_ASSERT(false, "PathToAttributesMap detected a memory leak of %s!\n", humanReadableSize(allocBytes));
return true;
}
#endif // #if USING( ALLOCATOR_LEAK_CHECK )
return false;
}
} // namespace flatcache
} // namespace carb
| 10,798 | C | 28.425068 | 135 | 0.571217 |
omniverse-code/kit/fabric/include/carb/flatcache/InterpolationUsd.h | // Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
// This include must come first
// clang-format off
#include "UsdPCH.h"
// clang-format on
#include <pxr/base/gf/matrix4d.h>
#include <pxr/base/gf/quatf.h>
#include "carb/logging/Log.h"
/**
* @brief Defined in a separate location to the other lerp functions
* in order to avoid breaking C-ABI compatibility
*/
namespace carb
{
namespace flatcache
{
/**
* @brief Spherical interpolation specialization relying on pxr native
* interpolation for quaternions
*/
template <>
inline const pxr::GfQuatf interpolate(const pxr::GfQuatf& q0, const pxr::GfQuatf& q1, float theta)
{
if (theta < 0.0f || theta > 1.0f)
{
CARB_LOG_WARN_ONCE("InterpolationUsd interpolate(): theta %f outside range [0.0, 1.0]", theta);
}
pxr::GfQuatf result = pxr::GfSlerp(theta, q0, q1);
return result;
}
/**
* @brief pxr::Matrix4d interpolation specialization Used in Kit by OmniHydraDelegate
*/
template <>
inline const pxr::GfMatrix4d interpolate(const pxr::GfMatrix4d& m0, const pxr::GfMatrix4d& m1, float theta)
{
if (theta < 0.0f || theta > 1.0f)
{
CARB_LOG_WARN_ONCE("InterpolationUsd interpolate(): theta %f outside range [0.0, 1.0]", theta);
}
pxr::GfMatrix4d r0, r1; // rotations, where -r is inverse of r
pxr::GfVec3d s0, s1; // scale
pxr::GfMatrix4d u0, u1; // rotations, may contain shear info
pxr::GfVec3d t0, t1; // translations
pxr::GfMatrix4d p0, p1; // p is never modified; can contain projection info
// Account for rotation, translation, scale
// (order is mat = r * s * -r * u * t), eps=1e-10 used to avoid zero values
m0.Factor(&r0, &s0, &u0, &t0, &p0);
m1.Factor(&r1, &s1, &u1, &t1, &p1);
// Interpolate component-wise
pxr::GfVec3d tResult = pxr::GfLerp(theta, t0, t1);
pxr::GfVec3d sResult = pxr::GfLerp(theta, s0, s1);
pxr::GfQuatd rResult = pxr::GfSlerp(u0.ExtractRotationQuat(), u1.ExtractRotationQuat(), theta);
pxr::GfMatrix4d result = pxr::GfMatrix4d(pxr::GfRotation(rResult), pxr::GfCompMult(sResult, tResult));
return result;
}
}
}
| 2,530 | C | 29.865853 | 107 | 0.686166 |
omniverse-code/kit/fabric/include/carb/flatcache/RationalTime.h | // Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <map>
#include <stdint.h>
namespace carb
{
namespace flatcache
{
// Each frame in the history buffer is timestamped with a frame time, stored as
// a rational number to minimize rounding issues. See threadgate::TimeRatio.
struct RationalTime
{
int64_t numerator;
uint64_t denominator;
// Minimize denominator small by dividing by gcd(numerator,denominator)
RationalTime reduce() const
{
RationalTime result{0, 0};
int64_t gcdNumDen = gcd(numerator, denominator);
if (gcdNumDen != 0)
{
result.numerator = numerator / gcdNumDen;
result.denominator = denominator / gcdNumDen;
}
return result;
}
bool operator==(RationalTime rhs) const
{
RationalTime thisReduced = reduce();
RationalTime rhsReduced = rhs.reduce();
return (thisReduced.numerator == rhsReduced.numerator) && (thisReduced.denominator == rhsReduced.denominator);
}
bool operator!=(RationalTime rhs) const
{
return !(*this == rhs);
}
static int64_t gcd(int64_t a, int64_t b)
{
while (b != 0)
{
int64_t t = b;
b = a % b;
a = t;
}
return std::max(a, -a);
}
RationalTime operator-(RationalTime b) const
{
RationalTime result;
result.numerator = numerator * int64_t(b.denominator) - b.numerator * int64_t(denominator);
result.denominator = denominator * b.denominator;
return result.reduce();
}
RationalTime operator*(int64_t b) const
{
RationalTime result;
result.numerator = numerator * b;
result.denominator = denominator;
return result.reduce();
}
};
static const RationalTime kInvalidTime = { 0, 0 };
} // namespace flatcache
} // namespace carb
| 2,308 | C | 24.94382 | 118 | 0.642548 |
omniverse-code/kit/fabric/include/carb/flatcache/ApiLogger.h | // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <carb/flatcache/IPath.h>
#include <carb/flatcache/IToken.h>
#include <iostream>
// To log all FlatCache methods that access a particular path and attribute,
// set the following three defines
#define ENABLE_FLATCACHE_API_LOG 0
#if ENABLE_FLATCACHE_API_LOG
#define attrToTrace "attrToLog"
#define pathToTrace "/primToLog"
namespace carb
{
namespace flatcache
{
struct ApiLogger
{
bool& enabled;
const char* desc;
ApiLogger(const char* desc, bool& enabled, const TokenC& attrNameC) : desc(desc), enabled(enabled)
{
Token attrName(attrNameC);
if (attrName == Token(attrToTrace))
{
std::cout << "begin " << desc << "\n";
enabled = true;
}
}
ApiLogger(const char* desc, bool& enabled, const PathC& pathC, const TokenC& attrNameC) : desc(desc), enabled(enabled)
{
Path path(pathC);
Token attrName(attrNameC);
if (path == Path(pathToTrace) && attrName == Token(attrToTrace))
{
std::cout << "begin " << desc << "\n";
enabled = true;
}
}
~ApiLogger()
{
if (enabled)
{
std::cout << "end " << desc << "\n";
}
enabled = false;
}
};
#define APILOGGER(...) ApiLogger logger(__VA_ARGS__)
}
}
#else
#define APILOGGER(...)
#endif
| 1,789 | C | 23.861111 | 122 | 0.636669 |
omniverse-code/kit/fabric/include/carb/flatcache/underlying.h | #pragma once
#include <type_traits>
namespace carb {
namespace flatcache {
template <typename EnumT>
constexpr inline typename std::underlying_type<EnumT>::type underlying(const EnumT& t)
{
return static_cast<typename std::underlying_type<EnumT>::type>(t);
}
} // namespace flatcache
} // namespace carb
| 312 | C | 18.562499 | 86 | 0.740385 |
omniverse-code/kit/fabric/include/carb/flatcache/Ordered_Set.h | // Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <algorithm>
#include <functional>
#include <initializer_list>
#include <vector>
namespace carb
{
namespace flatcache
{
template <class T, class Compare = std::less<T>>
struct set
{
using value_type = T;
std::vector<T> v;
Compare cmp;
using iterator =typename std::vector<T>::iterator;
using const_iterator =typename std::vector<T>::const_iterator;
iterator begin()
{
return v.begin();
}
iterator end()
{
return v.end();
}
const_iterator begin() const
{
return v.begin();
}
const_iterator end() const
{
return v.end();
}
set(const Compare& c = Compare()) : v(), cmp(c)
{
}
template <class InputIterator>
set(InputIterator first, InputIterator last, const Compare& c = Compare()) : v(first, last), cmp(c)
{
std::sort(begin(), end(), cmp);
}
set(std::initializer_list<T> _Ilist) : set(_Ilist.begin(), _Ilist.end())
{
}
void reserve(size_t newCapacity)
{
v.reserve(newCapacity);
}
void clear()
{
v.clear();
}
iterator insert(const T& t)
{
iterator i = std::lower_bound(begin(), end(), t, cmp);
if (i == end() || cmp(t, *i))
i = v.insert(i, t);
return i;
}
iterator insert(T&& t)
{
iterator i = std::lower_bound(begin(), end(), t, cmp);
if (i == end() || cmp(t, *i))
i = v.insert(i, std::move(t));
return i;
}
template <class _Iter>
void insert(_Iter _First, _Iter _Last)
{ // insert [_First, _Last) one at a time
for (; _First != _Last; ++_First)
{
insert(*_First);
}
}
iterator insert(const_iterator hint, const value_type& value)
{
// Measurements show it is faster to ignore hint in this application
return insert(value);
}
void insert(std::initializer_list<T> _Ilist)
{
insert(_Ilist.begin(), _Ilist.end());
}
size_t erase(const T& key)
{
iterator removeElement = find(key);
if (removeElement != v.end())
{
v.erase(removeElement);
return 1;
}
else
{
return 0;
}
}
iterator erase(iterator iter)
{
return v.erase(iter);
}
const_iterator find(const T& t) const
{
const_iterator i = std::lower_bound(begin(), end(), t, cmp);
return i == end() || cmp(t, *i) ? end() : i;
}
iterator find(const T& t)
{
iterator i = std::lower_bound(begin(), end(), t, cmp);
return i == end() || cmp(t, *i) ? end() : i;
}
bool contains(const T& t) const
{
const_iterator i = std::lower_bound(begin(), end(), t, cmp);
return i != end() && !cmp(t, *i);
}
bool operator==(const set<T>& other) const
{
return v == other.v;
}
bool operator!=(const set<T>& other) const
{
return v != other.v;
}
size_t size() const
{
return v.size();
}
T* data()
{
return v.data();
}
const T* data() const
{
return v.data();
}
};
template <class T, class Compare = std::less<T>>
bool operator<(const set<T, Compare>& left, const set<T, Compare>& right)
{
return left.v < right.v;
}
template<typename T>
flatcache::set<T> nWayUnion(std::vector<flatcache::set<T>>& srcBuckets)
{
flatcache::set<T> retval;
// Calculate the maximum number of destination attributes
// We could instead calculate it exactly by finding union of attribute names
size_t maxDestAttrCount = 0;
for (flatcache::set<T>& srcBucket : srcBuckets)
{
maxDestAttrCount += srcBucket.size();
}
retval.reserve(maxDestAttrCount);
auto currentDest = std::back_inserter(retval.v);
size_t bucketCount = srcBuckets.size();
// Initialize invariant that nonEmpty is the vector of buckets that have
// non-zero attribute counts
struct NonEmptySegment
{
// Invariant is current!=end
typename std::vector<T>::iterator current;
typename std::vector<T>::iterator end;
};
std::vector<NonEmptySegment> nonEmpty;
nonEmpty.reserve(bucketCount);
for (size_t i = 0; i != bucketCount; i++)
{
if (srcBuckets[i].begin() != srcBuckets[i].end())
{
nonEmpty.push_back({ srcBuckets[i].begin(), srcBuckets[i].end() });
}
}
// Keep going until there's only 1 non-empty bucket
// At that point we can just copy its attributes to the output
while (1 < nonEmpty.size())
{
// Find all the buckets that have the minimum element
// These are the ones whose iterators will get advanced
// By the loop guard and the invariant, we know that nonEmpty[0] exists
// and that nonEmpty[0].current!=nonEmpty[0].end.
// So *nonEmpty[0].current is a safe dereference
T minSoFar = *nonEmpty[0].current;
std::vector<size_t> indicesAtMin;
indicesAtMin.reserve(nonEmpty.size());
indicesAtMin.push_back(0);
for (size_t i = 1; i != nonEmpty.size(); i++)
{
if (*nonEmpty[i].current < minSoFar)
{
minSoFar = *nonEmpty[i].current;
indicesAtMin = { i };
}
else if (*nonEmpty[i].current == minSoFar)
{
indicesAtMin.push_back(i);
}
}
// Copy minimum element to the output
*currentDest = minSoFar;
++(*currentDest);
// Advance the iterators that pointed to the min
std::vector<NonEmptySegment> tempNonEmpty;
tempNonEmpty.reserve(indicesAtMin.size());
for (size_t i = 0; i != indicesAtMin.size(); i++)
{
nonEmpty[indicesAtMin[i]].current++;
}
// Maintain the invariant that nonEmpty are the non empty ones
// Replace with O(n) copy into a temporary if necessary
auto it = nonEmpty.begin();
while (it != nonEmpty.end())
{
if (it->current == it->end)
{
it = nonEmpty.erase(it);
}
else
{
++it;
}
}
}
// By the negation of the guard we know that nonEmpty has zero or one elements
if (nonEmpty.size() == 1)
{
// If one bucket is left, copy its elements to the output
std::copy(nonEmpty[0].current, nonEmpty[0].end, currentDest);
}
return retval;
}
}
}
| 7,055 | C | 24.381295 | 103 | 0.556768 |
omniverse-code/kit/fabric/include/carb/flatcache/StageWithHistory.h | // Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "IFlatcache.h"
#include <carb/Framework.h>
#include <carb/Interface.h>
#include <carb/flatcache/AttrNameAndType.h>
#include <carb/flatcache/IPath.h>
#include <carb/flatcache/IToken.h>
#include <carb/flatcache/PrimChanges.h>
#include <carb/logging/Log.h>
#include <gsl/span>
#include <carb/flatcache/Type.h>
#include <map>
#include <stdint.h>
#include "carb/cpp17/Optional.h"
using carb::cpp17::optional;
namespace carb
{
namespace flatcache
{
// The comments in this file are intended to be read top to bottom, like a book.
// This file defines flatcache::StageWithHistory, an instance of which stores
// the current stage and a configurable number of frames of state history. It
// is thread safe, in the sense that the current state can be safely read and
// written in parallel with multiple threads reading the history. The class
// definition is towards the end of this file. We first define some basic types,
// a read/write accessor class for use by the main/game/sim thread, and two
// read-only accessor classes for use on render threads. These classes provide
// access to an interpolated state and a window of state history respectively.
// To specify paths, attribute names, and attribute types we use
// flatcache::Path, flatcache::Token and graph::Type types, rather than
// USD's SdfPath, TfToken and TfType. This allows us to access the stage and
// history without including USD headers.
// The main class is this file is StageWithHistory, which is defined towards
// the end of the file.
class StageWithHistory;
template<typename T>
class AttributeArrayResult;
/**
* @invariant arrayBytes.size() must be a multiple of bytesPerElement
*/
class ConstArrayAsBytes
{
public:
gsl::span<const gsl::byte> arrayBytes;
size_t bytesPerElement;
Type elementType;
};
// findPrims() returns a list of buckets of prims, represented by PrimBucketList.
class PrimBucketList
{
friend class StageAtTimeInterval;
friend class StageInProgress;
protected:
PrimBucketListId m_primBucketListId;
static carb::flatcache::IPrimBucketList* sIPrimBucketList();
PrimBucketList(PrimBucketListId id) : m_primBucketListId(id)
{
}
public:
// PrimBucketList is opaque, you have to use the getAttributesArray methods
// of StageInProgress, StageAtTime or StageAtTimeInterval to read the
// attributes of its elements.
size_t bucketCount() const;
size_t size() const;
void print() const;
PrimBucketListId getId() const
{
return m_primBucketListId;
}
~PrimBucketList();
};
// ChangedPrimBucketList is a PrimBucketList that has changes stored for a
// particular listener. It is returned by StageInProgress::getChanges().
class ChangedPrimBucketList : public PrimBucketList
{
ChangedPrimBucketList(PrimBucketListId id) : PrimBucketList(id) {}
friend class StageInProgress;
public:
BucketChanges getChanges(size_t index);
AddedPrimIndices getAddedPrims(size_t index);
};
// The main/game/sim thread uses the following class to read and write the
// state at the current frame.
//
// StageInProgress can either be used RAII style, you construct it from a frameNumber,
// or non-RAII style, where you construct it from an existing stageInProgressId.
class StageInProgress
{
StageInProgressId m_stageInProgress;
bool m_createdFromId;
UsdStageId m_usdStageId; // Only valid if m_createFromId == false
public:
// The constructor creates a new frame and locks it for read/write
StageInProgress(StageWithHistory& stageWithHistory, size_t simFrameNumber);
// Create from an already locked frame
StageInProgress(StageInProgressId stageInProgressId);
// Returns the frame number allocated by constructor
size_t getFrameNumber();
// Returns the frame time allocated by constructor
RationalTime getFrameTime();
// Returns which mirrored array is valid: CPU, GPU, etc.
ValidMirrors getAttributeValidBits(const Path& path, const Token& attrName) const;
// getAttribute returns a read/write pointer to a non-array attribute
// If it returns nullptr then the attribute doesn't exist in the stage
template <typename T>
T* getAttribute(const Path& path, const Token& attrName);
// getAttribute returns a read-only pointer to a non-array attribute
// If it returns nullptr then the attribute doesn't exist in the stage
template <typename T>
const T* getAttributeRd(const Path& path, const Token& attrName);
// getAttribute returns a write-only pointer to a non-array attribute
// If it returns nullptr then the attribute doesn't exist in the stage
template <typename T>
T* getAttributeWr(const Path& path, const Token& attrName);
// getAttribute returns a read/write pointer to a non-array attribute
// If it returns nullptr then the attribute doesn't exist in the stage
template <typename T>
T* getAttributeGpu(const Path& path, const Token& attrName);
// getAttribute returns a read-only pointer to a non-array attribute
// If it returns nullptr then the attribute doesn't exist in the stage
template <typename T>
const T* getAttributeRdGpu(const Path& path, const Token& attrName);
// getAttribute returns a write-only pointer to a non-array attribute
// If it returns nullptr then the attribute doesn't exist in the stage
template <typename T>
T* getAttributeWrGpu(const Path& path, const Token& attrName);
// getOrCreateAttributeWr returns a write-only pointer to a non-array
// attribute. If the attribute doesn't exist, then it will create it.
// The return type is a reference rather than a pointer because the
// attribute is guaranteed to exist on exit
template <typename T>
T& getOrCreateAttributeWr(const Path& path, const Token& attrName, Type type);
// getAttribute returns a read/write span of an array attribute
// The span allows the array size to be read, but not written
// To set the array size, use setArrayAttributeSize
template <typename T>
gsl::span<T> getArrayAttribute(const Path& path, const Token& attrName);
// getAttributeRd returns a read-only span of an array attribute
// The array size is also read only
template <typename T>
gsl::span<const T> getArrayAttributeRd(const Path& path, const Token& attrName);
// getAttributeRd returns a write-only span of an array attribute
// The array size is read only, to resize use setArrayAttributeSize
template <typename T>
gsl::span<T> getArrayAttributeWr(const Path& path, const Token& attrName);
// Get the size of an array attribute. When writing CPU code, it isn't
// normally necessary to use this method, as getArrayAttribute returns a
// span containing the data pointer and the size.
// However, when writing mixed CPU/GPU code it is wasteful to copy the
// array data from GPU to CPU when just the size is required, so use this
// method in that case.
size_t getArrayAttributeSize(const Path& path, const Token& attrName);
// Set the size of an array attribute
void setArrayAttributeSize(const Path& path, const Token& attrName, size_t elemCount);
template <typename T>
gsl::span<T> setArrayAttributeSizeAndGet(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
size_t indexInBucket,
const Token& attrName,
size_t newElemCount);
// createPrim, destroyPrim, createAttribute and destroyAttribute do what
// you'd expect
void createPrim(const Path& path);
void destroyPrim(const Path& path);
void createAttribute(const Path& path, const Token& attrName, Type type);
template<int n>
void createAttributes(const Path& path, std::array<AttrNameAndType, n> attributes);
// Deprecated: type argument is not used.
void destroyAttribute(const Path& path, const Token& attrName, Type type);
void destroyAttribute(const Path& path, const Token& attrName);
template <int n>
void destroyAttributes(const Path& path, const std::array<Token, n>& attributes);
void destroyAttributes(const Path& path, const std::vector<Token>& attributes);
// findPrims() finds prims that have all the attributes in "all", and any
// of the attributes in "any", and none of the attributes in "none".
// The attributes of the resulting prims can be accessed as piecewise
// contiguous arrays, using getAttributeArray() below, which is typically
// faster than calling getAttribute for each prim.
PrimBucketList findPrims(const carb::flatcache::set<AttrNameAndType>& all,
const carb::flatcache::set<AttrNameAndType>& any = {},
const carb::flatcache::set<AttrNameAndType>& none = {});
/**
* Tell a listener to log changes for an attribute.
* Attaches listener to stage if not already attached
*
* @param[in] attrName The attribute's name
* @param[in] listenerId The listener
*/
void attributeEnableChangeTracking(const Token& attrName, ListenerId listenerId);
/**
* Tell a listener to stop logging changes for an attribute.
* Attaches listener to stage if not already attached
*
* @param[in] attrName The attribute's name
* @param[in] listenerId The listener
*/
void attributeDisableChangeTracking(const Token& attrName, ListenerId listenerId);
/**
* Tell a listener to log prim creates
* Attaches listener to stage if not already attached
*
* @param[in] attrName The attribute's name
* @param[in] listenerId The listener
*/
void enablePrimCreateTracking(ListenerId listenerId);
/**
* Pause change tracking.
*
* @param[in] listenerId The listener to pause
*/
void pauseChangeTracking(ListenerId listenerId);
/**
* Resume change tracking.
*
* @param[in] listenerId The listener to resume
*/
void resumeChangeTracking(ListenerId listenerId);
/**
* Is change tracking paused?
*
* @param[in] listenerId The listener
* @return Whether the listener is paused
*/
bool isChangeTrackingPaused(ListenerId listenerId);
/**
* Get changes
*
* @param[in] listenerId The listener
* @return The changes that occured since the last time the listener was popped
*/
ChangedPrimBucketList getChanges(ListenerId listenerId);
/**
* Clear the list of changes
*
* @param[in] listenerId The listener
*/
void popChanges(ListenerId listenerId);
/**
* Get the number of listeners
*
* @return The number of listeners listening to this stage
*/
size_t getListenerCount();
/**
* Is the listener attached to this stage
*
* @return Whether the listener is attached to this stage
*/
bool isListenerAttached(ListenerId listenerId);
/**
* Detach the listener from the stage. Future changes will not be logged for this listener.
*
* @param[in] listenerId The listener
* @return Whether the listener is attached to this stage
*/
void detachListener(ListenerId listenerId);
// getAttributeArray(primBucketList, index, attrName) returns a read/write
// contiguous array of the values of attribute "attrName" for each prim of
// bucket "index" of "primBucketList".
// "index" must be in the range [0..primBucketList.getBucketCount())
template <typename T>
gsl::span<T> getAttributeArray(const PrimBucketList& primBucketList, size_t primBucketListIndex, const Token& attrName);
template <typename T>
gsl::span<const T> getAttributeArrayRd(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName) const;
template <typename T>
gsl::span<T> getAttributeArrayWr(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName);
template <typename T>
gsl::span<T> getAttributeArrayGpu(const PrimBucketList& primBucketList, size_t primBucketListIndex, const Token& attrName);
template <typename T>
gsl::span<const T> getAttributeArrayRdGpu(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName) const;
template <typename T>
gsl::span<T> getAttributeArrayWrGpu(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName);
template <typename T>
gsl::span<T> getOrCreateAttributeArrayWr(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName,
Type type);
// getAttributeArray(primBucketList, index, attrName) returns a vector of
// array-valued attributes "attrName" for the prims of bucket "index" of
// "primBucketList". "index" must be in the range [0..primBucketList.getBucketCount())
// It gives read/write access to the values of each prim's array
template <typename T>
std::vector<gsl::span<T>> getArrayAttributeArray(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName) const;
// getAttributeArray(primBucketList, index, attrName) returns a vector of
// array-valued attributes "attrName" for the prims of bucket "index" of
// "primBucketList". "index" must be in the range [0..primBucketList.getBucketCount())
// It gives read-only access to the values of each prim's array
template <typename T>
std::vector<gsl::span<const T>> getArrayAttributeArrayRd(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName) const;
// getAttributeArray(primBucketList, index, attrName) returns a vector of
// array-valued attributes "attrName" for the prims of bucket "index" of
// "primBucketList". "index" must be in the range [0..primBucketList.getBucketCount())
// It gives write-only access to the values of each prim's array
template <typename T>
std::vector<gsl::span<T>> getArrayAttributeArrayWr(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName) const;
// getPathArray(primBucketList, index) returns a read-only contiguous array
// of the paths of the prims of bucket "index" of "primBucketList".
// "index" must be in the range [0..primBucketList.getBucketCount())
gsl::span<const Path> getPathArray(const PrimBucketList& primBucketList, size_t primBucketListIndex) const;
flatcache::set<AttrNameAndType> getAttributeNamesAndTypes(const PrimBucketList& primBucketList,
size_t primBucketListIndex) const;
// TODO: replace with an iterator for iterating over bucket names
void printBucketNames() const;
// Record that the attribute at path.attrName has been modified. Right now this is
// done explicitly to give a high degree of control over which attributes get
// passed to the notice.
void logAttributeWriteForNotice(const Path& path, const Token& attrName);
// Construct and send a TfNotice with a vector of objects paths
// that have changed, much like the ObjectsChanged notice from USD
void broadcastTfNoticeForAttributesChanged() const;
// Connection API
/**
* @brief Create a connection on the target prim
*
* @param path the target prim on which to create a connection
* @param connectionName specifies the connections attribute name on the prim
* @param connection specifies the target prim and attribute of the connection
*/
void createConnection(const Path& path, const Token& connectionName, const Connection& connection);
/**
* @brief Create an arbitrary number of connections on the target prim
*
* @param path the target prim on which to create a connection
* @param connectionNames a span of attribute names. Must match the size of the connections vector
* @param connections a span of connections. Must match the size of the connectionNames vector
*/
void createConnections(const Path& path, const gsl::span<Token>& connectionNames, const gsl::span<Connection>& connections );
/**
* @brief removes a connection from a prim
*
* @param path the target prim from which to remove a connection
* @param connectionName the name of the connection to remove
*/
void destroyConnection(const Path& path, const Token& connectionName);
/**
* @brief removes an arbitary number of connections from a prim
*
* @param path the target prim from which to remove the connections
* @param connectionNames the names of the connections to be removed
*/
void destroyConnections(const Path& path, const gsl::span<Token>& connectionNames);
/**
* @brief Get a R/W pointer to a connection on the target prim
*
* @param path the target prim
* @param connectionName the target connection name
* @return a R/W pointer to the connection
*/
Connection* getConnection(const Path& path, const Token& connectionName);
/**
* @brief Get a read only pointer to a connection on the target prim
*
* @param path the target prim
* @param connectionName the target connection name
* @return a read only pointer to the connection
*/
const Connection* getConnectionRd(const Path& path, const Token& connectionName);
/**
* @brief Get a write only pointer to a connection on the target prim
*
* @param path the target prim
* @param connectionName the target connection name
* @return a write only pointer to the connection
*/
Connection* getConnectionWr(const Path& path, const Token& connectionName);
/**
* @brief Copy all attributes from the source prim to the destination prim
* Will create attributes if they do not exist on the destination prim
* If an attribute exists on both prims they must have compatible types to copy.
*
* @param[in] srcPath the source prim
* @param[in] dstPath the destination prim
*/
void copyAttributes(const Path& srcPath, const Path& dstPath);
/**
* @brief Copy the specified attributes from the source prim to the destination prim
* Will create attributes if they do not exist on the destination prim
* If an attribute exists on both prims they must have compatible types to copy.
*
* @param[in] srcPath the source prim
* @param[in] srcAttrs a span of attributes to be copied.
* @param[in] dstPath the destination prim
*/
void copyAttributes(const Path& srcPath, const gsl::span<Token>& srcAttrs, const Path& dstPath);
/**
* @brief Copy the specified attributes from the source prim to the the specified
* attributes on the destination prim
* Will create attributes if they do not exist on the destination prim
* If an attribute exists on both prims they must have compatible types to copy.
* Note: The srcAttrs and dstAttrs must be the same size as the function assumes
* that the copy is 1 to 1 in terms of name alignment
*
* @param[in] srcPath the source prim
* @param[in] srcAttrs a span of attributes to be copied.
* @param[in] dstPath the destination prim
* @param[in] dstAttrs a span of attributes to be copied.
*/
void copyAttributes(const Path& srcPath, const gsl::span<Token>& srcAttrs, const Path& dstPath, const gsl::span<Token>& dstAttrs);
StageInProgressId getId() const
{
return m_stageInProgress;
}
/**
* @brief Check whether a prim exists at a given path
* @param[in] the path
* @return true if a prim exists at the path
*/
bool primExists(const Path& path);
// If StageInProgress was created from an Id, then do nothing
// Else unlock the current sim frame, allowing it to be read by
// other threads
~StageInProgress();
};
// The following two classes, StageAtTime and StageAtTimeInterval
// are used by reader threads to read the history. StageAtTime is
// used when the state of a stage is needed at a particular point in time.
// StageAtTimeInterval is used when we need all the stage history in a given time
// window.
//
// There can be multiple threads reading the history buffer, for example
// multiple sensor renderers running at different rates. We use shared locks
// to allow multiple threads to read the same frame of history.
//
// StageAtTimeInterval takes an RAII approach to locking, constructing one locks
// a range of slots for reading, and destructing unlocks them.
class StageAtTimeInterval
{
StageAtTimeIntervalId m_stageAtTimeInterval;
static carb::flatcache::IStageAtTimeInterval* sIStageAtTimeInterval();
public:
// The constructor locks frames of history
StageAtTimeInterval(StageWithHistory& stageWithHistory,
RationalTime beginTime,
RationalTime endTime,
bool includeEndTime = false);
StageAtTimeInterval(StageWithHistoryId stageWithHistoryId,
RationalTime beginTime,
RationalTime endTime,
bool includeEndTime = false);
ValidMirrors getAttributeValidBits(const PathC& path, const TokenC& attrName) const;
// Get values of locked elements
template <typename T>
std::vector<const T*> getAttributeRd(const Path& path, const Token& attrName) const;
// Get GPU pointer and size of locked elements
template <typename T>
std::vector<const T*> getAttributeRdGpu(const Path& path, const Token& attrName) const;
// Get the size of an array attribute. When writing CPU code, it isn't
// normally necessary to use this method, as getArrayAttribute returns a
// span containing the data pointer and the size.
// However, when writing mixed CPU/GPU code it is wasteful to copy the
// array data from GPU to CPU when just the size is required, so use this
// method in that case.
std::vector<size_t> getArrayAttributeSize(const Path& path, const Token& attrName) const;
/**
* @brief Get an array-valued attribute for reading from a single prim
*
* @param path The path of the prim
* @param attrName The name of the attribute
*
* @return a vector of array spans, one for each time sample within the current StageAtTimeInterval
*/
template <typename T>
std::vector<gsl::span<const T>> getArrayAttributeRd(const Path& path, const Token& attrName) const;
/**
* @brief Get an array-valued attribute as bytes for reading from a single prim.
* This is useful for converting to VtValue
*
* @param path The path of the prim
* @param attrName The name of the attribute
*
* @return a vector of array spans, one for each time sample within the
* current StageAtTimeInterval
*/
std::vector<ConstArrayAsBytes> getArrayAttributeRawRd(const Path& path, const Token& attrName) const;
// Get timestamps of locked elements
std::vector<RationalTime> getTimestamps() const;
size_t getTimeSampleCount() const;
// findPrims() finds prims that have all the attributes in "all", and any
// of the attributes in "any", and none of the attributes in "none".
// The attributes of the resulting prims can be accessed as piecewise
// contiguous arrays, using getAttributeArray() below, which is typically
// faster than calling getAttribute for each prim.
PrimBucketList findPrims(const carb::flatcache::set<AttrNameAndType>& all,
const carb::flatcache::set<AttrNameAndType>& any = {},
const carb::flatcache::set<AttrNameAndType>& none = {});
// getAttributeArray(primBucketList, index, attrName) returns for each
// timesample, a read-only, contiguous array of the values of attribute
// "attrName" for each prim of bucket "index" of "primBucketList".
// "index" must be in the range [0..primBucketList.getBucketCount())
template <typename T>
std::vector<gsl::span<const T>> getAttributeArrayRd(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName) const;
template <typename T>
std::vector<gsl::span<const T>> getAttributeArrayRdGpu(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName) const;
template <typename T>
std::vector<std::vector<gsl::span<const T>>> getArrayAttributeArrayRd(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName) const;
/**
* @brief Read a raw byte representation for a given attribute from a given bucket. This is useful for doing things such as batched type conversions.
*
* @param primBucketList the list of buckets
* @param primBucketListIndex the specific bucket to search
* @param attrName the token describing the desired attribute
*
* @return a vector of byte arrays, one for each time sample within the current StageAtTimeInterval
*/
std::vector<gsl::span<const char>> getAttributeArrayRawRd(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName) const;
// getPathArray(primBucketList, index) returns for each timesample a
// read-only contiguous array of the paths of the prims of bucket "index"
// of "primBucketList".
// "index" must be in the range [0..primBucketList.getBucketCount())
// The reason a separate path array is returned per sample is that prims
// can be added and deleted from frame to frame, and we need to check which
// prim a sample corresponds to when interpolating.
std::vector<gsl::span<const Path>> getPathArray(const PrimBucketList& primBucketList,
size_t primBucketListIndex) const;
/**
* @brief Get a Connection on a target prim
*
* @param path the target prim
* @param connectionName the connection name
* @return a vector of read only pointers to connections
*/
std::vector<const Connection*> getConnectionRd(const Path& path, const Token& connectionName);
// TODO: replace with an iterator for iterating over bucket names
void printBucketNames() const;
/**
* @brief write the current data for this stageInProgress to the specified UsdStage
* this will write all attributes at the currentTime in getFrameNumber()
*
* @param usdStageId Valid usdStage in the stage cache
*
* @return none
*/
void exportUsd(UsdStageId usdStageId) const;
// Get the number of attributes for a given bucket.
std::vector<size_t> getAttributeCounts(const PrimBucketList& primBucketList, size_t primBucketListIndex) const;
// Get the name and type of each attribute for a given bucket.
std::pair< std::vector<std::vector<Token>>, std::vector<std::vector<Type>>>
getAttributeNamesAndTypes(const PrimBucketList& primBucketList, size_t primBucketListIndex) const;
/** @brief Write a cache file to disk at a specified location
*
* @note Many parameters to this function are optional
* @note This currently only writes the first time in the interval
*
* @file[in The location the file is desired to be written to
* @workingBuffer[in] [Optional] In order to avoid costly reallocations
* the code will attempt to re-use the memory at the buffer
* location if it is large enough. If the buffer isn't larg
* enough the cost of allocation, and re-traversal may be paid
* @workingBufferSize[in] [Optional] If workingBuffer is non null, then this desrcibes the length
* of the buffer
* @return The amount of data needed to serialize the cache, a return value of 0 indicates an error
*
*/
uint64_t writeCacheToDisk(const char* file, uint8_t* workingBuffer, uint64_t workingBufferSize) const;
/** @brief Add a ref count to any data backed by the StageAtTimeIntercal
*
* @note The ref count will not enforce any behavior currently, but will
* print a warning if backing data is deleted before all ref counts
* are cleared
*
* @return None
*
*/
void addRefCount();
/** @brief Remove a ref count from an existing timeInterval
*
* @return True if ref count was removed successfully, failure conditions may
* include
* (1) StageAtTimeInterval doesn't exist
* (2) RefCount was already 0
*/
bool removeRefCount();
/** @brief Query ref count for a stage at time
*
* @note A stage at time might be represented by multiple actual data sources
* in that case we return the largest refcount of all the data sources
*
* @return number of reference counts
*/
unsigned int getRefCount();
// Unlocks elements to allow them to be reused.
~StageAtTimeInterval();
};
// StageAtTime is used when the state of a stage is needed at
// a particular point in time, which may or may not be one of the times sampled
// in the history. If it is, then getAttributeRd returns the exact value sampled.
// If not, it linearly interpolates using the two closest samples in the history.
//
// StageAtTime takes an RAII approach to locking, constructing one
// locks one or two frames in the history (depending on whether interpolation
// is needed), and destructing unlocks them.
class StageAtTime
{
// Invariants:
// I0: if sampleTimes.size()==2, m_theta = (m_time - sampleTimes[0]) /
// (sampleTimes[1] - sampleTimes[0])
// where sampleTimes = m_historyWindow.getTimestamps()
//
// In particular, m_theta increases linearly from 0 to 1 as m_time
// increases from sampleTimes[0] to sampleTimes[1]
//
// TODO: do we need to delay conversion from rational number to double?
StageAtTimeInterval m_historyWindow;
RationalTime m_time;
double m_theta;
void initInterpolation()
{
std::vector<RationalTime> sampleTimes = m_historyWindow.getTimestamps();
if (sampleTimes.size() == 2)
{
if ((double)sampleTimes[0].denominator == 0.0 || (double)sampleTimes[1].denominator == 0.0)
{
CARB_LOG_WARN_ONCE("StageWithHistory initInterpolation(): cannot divide by a denominator with a value of zero.");
m_theta = 0.0;
}
else
{
double a_t = (double)sampleTimes[0].numerator / (double)sampleTimes[0].denominator;
double b_t = (double)sampleTimes[1].numerator / (double)sampleTimes[1].denominator;
if (a_t == b_t)
m_theta = 0.0;
else
{
double c_t = (double)m_time.numerator / (double)m_time.denominator;
m_theta = (c_t - a_t) / (b_t - a_t);
}
}
}
else if (sampleTimes.size() == 1)
m_theta = 0.0;
}
public:
// Locks one or two history elements for read.
StageAtTime(StageWithHistory& stageWithHistory, RationalTime time)
: m_historyWindow(stageWithHistory, time, time, true), m_time(time)
{
initInterpolation();
}
StageAtTime(StageWithHistoryId stageWithHistoryId, RationalTime time)
: m_historyWindow(stageWithHistoryId, time, time, true), m_time(time)
{
initInterpolation();
}
// Auxiliary method to communicate attributes of types which will not be interpolated
// Supported types: bool, int, uint
// no samples found: return nullopt
// samples found: return pair{value of sample in frame n, value of sample in frame n+1}
template <typename T>
optional<std::pair<optional<T>, optional<T>>> getNonInterpolatableAttributeRd(const Path& path, const Token& attrName) const;
ValidMirrors getAttributeValidBits(const PathC& path, const TokenC& attrName) const;
// Read interpolated elements
template <typename T>
optional<T> getAttributeRd(const Path& path, const Token& attrName) const;
// Read GPU elements (interpolation not supported yet!)
template <typename T>
const T* getAttributeRdGpu(const Path& path, const Token& attrName) const;
// Get array attribute size, useful for GPU attributes
size_t getArrayAttributeSize(const Path& path, const Token& attrName) const;
// Get arrau attribute read
template <typename T>
gsl::span<const T> getArrayAttributeRd(const Path& path, const Token& attrName);
// findPrims() finds prims that have all the attributes in "all", and any
// of the attributes in "any", and none of the attributes in "none".
// The attributes of the resulting prims can be accessed as piecewise
// contiguous arrays, using getAttributeArray() below, which is typically
// faster than calling getAttribute for each prim.
PrimBucketList findPrims(const carb::flatcache::set<AttrNameAndType>& all,
const carb::flatcache::set<AttrNameAndType>& any = {},
const carb::flatcache::set<AttrNameAndType>& none = {})
{
return m_historyWindow.findPrims(all, any, none);
}
// getAttributeArray(primBucketList, index, attrName) returns a read-only
// contiguous array of the values of attribute "attrName" for each prim of
// bucket "index" of "primBucketList".
// "index" must be in the range [0..primBucketList.getBucketCount())
template <typename T>
AttributeArrayResult<T> getAttributeArrayRd(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName) const;
template <typename T>
AttributeArrayResult<T> getAttributeArrayRdGpu(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName) const;
template <typename T>
AttributeArrayResult<std::vector<T>> getArrayAttributeArrayRd(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName) const;
/**
* @brief Read a raw byte representation for a given attribute from a given bucket. This is useful for doing things such as batched type conversions.
*
* @param primBucketList the list of buckets
* @param primBucketListIndex the specific bucket to search
* @param attrName the token describing the desired attribute
*
* @return a vector of byte arrays, one for each time sample underlying the current StageAtTime. Note: Does not perform any interpolation.
*/
std::vector<gsl::span<const char>> getAttributeArrayRawRd(const PrimBucketList& primBucketList,
size_t primBucketListIndex,
const Token& attrName) const;
// getPathArray(primBucketList, index) returns a read-only contiguous array
// of the paths of the prims of bucket "index" of "primBucketList".
// "index" must be in the range [0..primBucketList.getBucketCount())
gsl::span<const Path> getPathArray(const PrimBucketList& primBucketList, size_t primBucketListIndex) const;
/**
* @brief Get a read only pointer to a connection on a prim
*
* @param path the target prim
* @param connectionName the target connection
* @return returns a vector read only pointers to connections, one per time sample
*/
std::vector<const Connection*> getConnectionRd(const Path& path, const Token& connectionName);
// TODO: replace with an iterator for iterating over bucket names
void printBucketNames() const;
// Get the number of attributes for a given bucket.
size_t getAttributeCount(const PrimBucketList& primBucketList, size_t primBucketListIndex) const;
// Get the name and type of each attribute for a given bucket.
std::pair< std::vector<Token>, std::vector<Type>>
getAttributeNamesAndTypes(const PrimBucketList& primBucketList, size_t primBucketListIndex) const;
// Unlocks elements to allow them to be reused.
~StageAtTime() = default;
/** @brief Write a cache file to disk at a specified location
*
* @note Many parameters to this function are optional
*
* @file[in The location the file is desired to be written to
* @workingBuffer[in] [Optional] In order to avoid costly reallocations
* the code will attempt to re-use the memory at the buffer
* location if it is large enough. If the buffer isn't larg
* enough the cost of allocation, and re-traversal may be paid
* @workingBufferSize[in] [Optional] If workingBuffer is non null, then this desrcibes the length
* of the buffer
* @return The amount of data needed to serialize the cache, a return value of 0 indicates an error
*
*/
uint64_t writeCacheToDisk(const char* file, uint8_t* workingBuffer, uint64_t workingBufferSize) const;
/** @brief Add a ref count to any data backed by the StageAtTimeInterval
*
* @note The ref count will not enforce any behavior currently, but will
* print a warning if backing data is deleted before all ref counts
* are cleared
*
* @return None
*
*/
void addRefCount();
/** @brief Remove a ref count from an existing timeInterval
*
* @return True if ref count was removed successfully, failure conditions may
* include
* (1) StageAtTimeInterval doesn't exist
* (2) RefCount was already 0
*/
bool removeRefCount();
/** @brief Query ref count for a stage at time
*
* @note A stage at time might be represented by multiple actual data sources
* in that case we return the largest refcount of all the data sources
*
* @return number of reference counts
*/
unsigned int getRefCount();
};
// Finally, here is the main class, StageWithHistory.
class StageWithHistory
{
StageWithHistoryId m_stageWithHistory;
UsdStageId m_usdStageId;
friend class StageInProgress;
friend class StageAtTimeInterval;
public:
StageWithHistory(UsdStageId usdStageId, size_t historyFrameCount, RationalTime simPeriod, bool withCuda=false);
~StageWithHistory();
/**
* Create a listener
* This just creates a listener ID, you have to attach it to a stage to use it.
* Note that there is no destroyListener method. To stop using an ID, detach it from all stages it is attached to.
* @return The listener
*/
ListenerId createListener();
};
const ListenerId kInvalidListenerId = { 0 };
} // namespace flatcache
} // namespace carb
// Implement above C++ methods by calling C-ABI interfaces
#include "WrapperImpl.h"
| 41,215 | C | 41.799585 | 153 | 0.664855 |
omniverse-code/kit/fabric/include/carb/flatcache/USDValueAccessors.h | // Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "UsdPCH.h"
#include <carb/Defines.h>
#include <carb/InterfaceUtils.h>
#include <carb/Types.h>
#include <carb/flatcache/FlatCacheUSD.h>
#include <carb/flatcache/IPath.h>
#include <carb/flatcache/PathToAttributesMap.h>
#include <carb/flatcache/StageWithHistory.h>
#include <iostream>
#include <vector>
namespace carb
{
namespace flatcache
{
// A TfNotice sent with a vector of paths for attribute that
// have changed. Sent by StateInProgress upon request, contains
// the paths of attributes that the StageInProgress has flagged
// as modified during it's lifetime, which is typically one frame.
//
// primPaths and attributeNames are required to the same length,
// the prim and attribute name within that prim whose value
// changed
class AttributeValuesChangedNotice : public pxr::TfNotice
{
public:
AttributeValuesChangedNotice(const std::vector<pxr::SdfPath>& primPaths,
const std::vector<pxr::TfToken>& attributeNames)
: _primPaths(primPaths), _attributeNames(attributeNames)
{
}
~AttributeValuesChangedNotice()
{
}
const std::vector<pxr::SdfPath>& GetPrimPaths() const
{
return _primPaths;
}
const std::vector<pxr::TfToken>& GetAttributeNames() const
{
return _attributeNames;
}
private:
const std::vector<pxr::SdfPath> _primPaths;
const std::vector<pxr::TfToken> _attributeNames;
};
void broadcastTfNoticeForAttributesChanged(StageInProgressId stageInProgressId);
template <typename T>
T getValue(const pxr::UsdAttribute& attribute, const pxr::UsdTimeCode& timeCode)
{
// First, look in flatcache to see if a value is present. If not, fall back
// to read USD's composed attribute value.
{
// read from flatcache via StageInProgress, this is called during a run
// loop where extensions are modifying one timeslice within StageWithHisotry
// Look up the long int identifier for the attribute's UsdStage
auto usdStageId = PXR_NS::UsdUtilsStageCache::Get().GetId(attribute.GetStage()).ToLongInt();
// grab the carb interface for StageInProgress and use it to access the
// (potentially NULL) current stageInProgress for the UsdStage
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
auto stageInProgress = iStageInProgress->get(usdStageId);
if (stageInProgress.id)
{
// Grab a pointer to in-memory representation for the attribute value, in this
// case a pointer to a T. Will be NULL if attribute doesn't exist in flatcache
auto valueSpan =
iStageInProgress->getAttribute(stageInProgress, carb::flatcache::asInt(attribute.GetPrimPath()),
carb::flatcache::asInt(attribute.GetName()));
T* valuePtr = (T*)valueSpan.ptr;
if (valuePtr)
{
// We have a value stored for this attribute in flatcache, return it
return *valuePtr;
}
}
}
// If we get here we didn't find a value stored for this attribute in flatcache,
// so call USD API
pxr::VtValue val;
attribute.Get(&val, timeCode);
return val.UncheckedGet<T>();
}
template <typename T_VALUETYPE>
void setFlatCacheValue(const pxr::UsdAttribute& attribute, T_VALUETYPE value, bool writeToUSD)
{
if (writeToUSD)
{
// write to the USD layer
attribute.Set(value);
}
else
{
// write to flatcache, via StageInProgress
// grab const references to the path of the attribute's parent
// prim and the name of the attribute. Avoid copies here.
const pxr::SdfPath& path = attribute.GetPrimPath();
const pxr::TfToken& name = attribute.GetName();
const pxr::SdfPath& attrPath = attribute.GetPath();
// Convert the bits into a carb-safe value
auto pathId = carb::flatcache::asInt(path);
auto nameId = carb::flatcache::asInt(name);
// Look up the long int identifier for the attribute's UsdSage
auto usdStageId = carb::flatcache::UsdStageId{
(uint64_t)PXR_NS::UsdUtilsStageCache::Get().GetId(attribute.GetStage()).ToLongInt()
};
// grab the carb interface for StageInProgress and use it to access the
// (potentially NULL) current stageInProgress for the UsdStage
auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>();
auto stageInProgress = iStageInProgress->get(usdStageId);
if (!stageInProgress.id)
{
// No one created a stageInProgress, we're expecting this
// to be created by another extension or run loop
//
// XXX: warn, or return falsse?
return;
}
// Grab a pointer to in-memory representation for the attribute value, in this
// case a pointer to a float
auto valuePtr = iStageInProgress->getAttribute(stageInProgress, pathId, nameId);
// Set the value within stageInProgress
((T_VALUETYPE*)valuePtr.ptr)[0] = value;
}
}
// This should be in UsdValueAccessors.cpp, but when it goes there
// clients in DriveSim can't find the symbol. Needs fixing.
inline void setFlatCacheValueFloat(const pxr::UsdAttribute& attribute, float value, bool writeToUSD)
{
setFlatCacheValue<float>(attribute, value, writeToUSD);
}
}
}
| 5,939 | C | 33.941176 | 112 | 0.675535 |
omniverse-code/kit/fabric/include/carb/flatcache/IFlatcache.h | // Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "IPath.h"
#include <carb/Interface.h>
#include <carb/flatcache/AttrNameAndType.h>
#include <carb/flatcache/IdTypes.h>
#include <carb/flatcache/Ordered_Set.h>
#include <carb/flatcache/RationalTime.h>
#include <map>
#include <stdint.h>
namespace carb
{
namespace flatcache
{
struct UsdStageId
{
uint64_t id;
constexpr bool operator<(const UsdStageId& other) const
{
return id < other.id;
}
constexpr bool operator==(const UsdStageId& other) const
{
return id == other.id;
}
constexpr bool operator!=(const UsdStageId& other) const
{
return id != other.id;
}
};
static_assert(std::is_standard_layout<UsdStageId>::value,
"Struct must be standard layout as it is used in C-ABI interfaces");
static constexpr UsdStageId kUninitializedStage{ 0 };
}
}
namespace std
{
template <>
class hash<carb::flatcache::UsdStageId>
{
public:
size_t operator()(const carb::flatcache::UsdStageId& key) const
{
return key.id;
}
};
}
namespace carb
{
namespace flatcache
{
struct BucketId
{
uint64_t id;
constexpr bool operator<(const BucketId& other) const
{
return id < other.id;
}
constexpr bool operator<=(const BucketId& other) const
{
return id <= other.id;
}
constexpr bool operator==(const BucketId& other) const
{
return id == other.id;
}
constexpr bool operator!=(const BucketId& other) const
{
return id != other.id;
}
constexpr BucketId& operator++()
{
++id;
return *this;
}
constexpr BucketId& operator--()
{
--id;
return *this;
}
constexpr explicit operator size_t() const
{
return id;
}
};
static_assert(std::is_standard_layout<BucketId>::value,
"Struct must be standard layout as it is used in C-ABI interfaces");
static constexpr BucketId kInvalidBucketId{ 0xffff'ffff'ffff'ffff };
// A struct that represents a subset of a bucket
struct BucketSubset
{
BucketId bucket; // The target bucket from which we define a subset
set<TokenC>* attributes; // The subset of attributes to consider - only used if allAttributes == false, MUST be set otherwise
set<PathC>* paths; // The subset of paths to consider - only used if allPaths == false, MUST be set otherwise
bool allAttributes; //attribute filtering or not
bool allPaths; //path filtering or not
};
static_assert(std::is_standard_layout<BucketSubset>::value,
"BucketSubset must be standard layout as it is used in C-ABI interfaces");
}
}
namespace std
{
template <>
class hash<carb::flatcache::BucketId>
{
public:
size_t operator()(const carb::flatcache::BucketId& key) const
{
return key.id;
}
};
}
namespace carb
{
namespace flatcache
{
// Flatcache stores data in untyped (byte) arrays.
// For conversion back to typed arrays, getArraySpan methods return the
// element size in bytes. They also return elementCount to allow the caller to
// wrap the array in std::span, or bounds check array access themselves.
// Flatcache methods can't return std::span or gsl::span directly, because they
// are not C-ABI compatible. So we define SpanC/ConstSpanC, which are.
struct ConstSpanC
{
const uint8_t* ptr;
size_t elementCount;
size_t elementSize;
};
struct SpanC
{
uint8_t* ptr;
size_t elementCount;
size_t elementSize;
// Casting SpanC to ConstSpanC is allowed, but not vice versa
operator ConstSpanC() const
{
return { ptr, elementCount, elementSize };
}
};
struct ConstSpanWithTypeC
{
const uint8_t* ptr;
size_t elementCount;
size_t elementSize;
TypeC type;
};
struct SpanWithTypeC
{
uint8_t* ptr;
size_t elementCount;
size_t elementSize;
TypeC type;
// Casting SpanWithTypeC to ConstSpanWithTypeC is allowed, but not vice versa
operator ConstSpanWithTypeC() const
{
return { ptr, elementCount, elementSize, type };
}
};
struct SpanSizeC
{
size_t* ptr;
size_t elementCount;
};
struct ConstSpanSizeC
{
const size_t* ptr;
size_t elementCount;
};
// An ArrayPointersAndSizesC is an array of immutably sized mutable
// data arrays
//
// Rules (enforced by const):
// {
// ArrayPointersAndSizesC ps;
//
// // Allowed: Changing inner array values
// ps.arrayPtrs[0][0] = 1
//
// // Disallowed: Changing array pointers
// ps.arrayPtrs[0] = (uint8_t*)p;
//
// // Disallowed: Changing inner array sizes
// ps.sizes[0] = 1;
// }
struct ArrayPointersAndSizesC
{
uint8_t* const* arrayPtrs;
const size_t* sizes;
const size_t elementCount;
};
// A ConstArrayPointersAndSizesC is an array of immutably sized immutable
// data arrays
//
// Rules (enforced by const):
// {
// ConstArrayPointersAndSizesC ps;
//
// // Disallowed: Changing inner array values
// ps.arrayPtrs[0][0] = 1
//
// // Disallowed: Changing array pointers
// ps.arrayPtrs[0] = (uint8_t*)p;
//
// // Disallowed: Changing inner array sizes
// ps.sizes[0] = 1;
// }
struct ConstArrayPointersAndSizesC
{
const uint8_t* const* arrayPtrs;
const size_t* sizes;
size_t elementCount;
};
static_assert(std::is_standard_layout<Path>::value, "Path must be standard layout as it is used in C-ABI interfaces");
struct ConstPathCSpan
{
const Path* ptr;
size_t elementCount;
};
struct ConstAttrNameAndTypeSpanC
{
const AttrNameAndType* ptr;
size_t elementCount;
};
struct ConstChangedIndicesC
{
bool allIndicesChanged;
ConstSpanSizeC changedIndices;
};
struct ConstChangedIndicesSpanC
{
const ConstChangedIndicesC* ptr;
size_t elementCount;
};
struct BucketChangesC
{
// Which attributes changed
flatcache::ConstAttrNameAndTypeSpanC changedAttributes;
// For each attribute, which prims changed?
flatcache::ConstChangedIndicesSpanC changedIndices;
flatcache::ConstPathCSpan pathArray;
};
struct AddedPrimIndicesC
{
// Which prims were added?
flatcache::ConstSpanSizeC addedIndices;
};
struct StageWithHistorySnapshot
{
bool valid;
size_t id;
};
enum class ValidMirrors
{
eNone = 0,
eCPU = 1,
eCudaGPU = 2,
eGfxGPU = 4
};
constexpr enum ValidMirrors operator|(const enum ValidMirrors a, const enum ValidMirrors b)
{
return (enum ValidMirrors)(uint32_t(a) | uint32_t(b));
}
constexpr enum ValidMirrors operator&(const enum ValidMirrors a, const enum ValidMirrors b)
{
return (enum ValidMirrors)(uint32_t(a) & uint32_t(b));
}
using PrimBucket = carb::flatcache::set<AttrNameAndType>;
//
// Note when extending the interface please add to the end so
// that dependencies don't break as easily before they are rebuilt
//
struct IStageInProgress
{
CARB_PLUGIN_INTERFACE("carb::flatcache::IStageInProgress", 0, 2);
StageInProgressId(CARB_ABI* create)(UsdStageId usdStageId, size_t simFrameNumber);
StageInProgressId(CARB_ABI* get)(UsdStageId usdStageId);
void(CARB_ABI* destroy)(UsdStageId usdStageId);
size_t(CARB_ABI* getFrameNumber)(StageInProgressId stageId);
// Prefetch prim from USD stage
// This guarantees that subsequent gets of the prim from the cache will succeed
void(CARB_ABI* prefetchPrim)(UsdStageId usdStageId, PathC path);
// Get attribute for read/write access
SpanC(CARB_ABI* getAttribute)(StageInProgressId stageId, PathC path, TokenC attrName);
// Get attribute for read only access
ConstSpanC(CARB_ABI* getAttributeRd)(StageInProgressId stageId, PathC path, TokenC attrName);
// Get attribute for write only access
SpanC(CARB_ABI* getAttributeWr)(StageInProgressId stageId, PathC path, TokenC attrName);
// Get attribute for write only access, creating it if necessary
SpanC(CARB_ABI* getOrCreateAttributeWr)(StageInProgressId stageId, PathC path, TokenC attrName, TypeC typeC);
size_t(CARB_ABI* getArrayAttributeSize)(StageInProgressId stageId, PathC path, TokenC attrName);
void(CARB_ABI* setArrayAttributeSize)(StageInProgressId stageId, PathC path, TokenC attrName, size_t elemCount);
SpanC(CARB_ABI* setArrayAttributeSizeAndGet)(StageInProgressId stageId, PrimBucketListId primBucketList,
size_t primBucketListIndex, size_t indexInBucket, TokenC attrName, size_t newElemCount);
// Get an attribute's type
Type(CARB_ABI* getType)(StageInProgressId stageId, PathC path, TokenC attrName);
// Get prim's attribute count
size_t(CARB_ABI* getAttributeCount)(StageInProgressId stageId, PathC path);
// Get the names of a prim's attributes
void(CARB_ABI* getAttributeNamesAndTypes)(Token* outNames,
Type* outTypes,
size_t outCount,
StageInProgressId stageInProgressId, PathC path);
// Attribute/prim create/destroy
void(CARB_ABI* createPrim)(StageInProgressId stageId, PathC path);
void(CARB_ABI* destroyPrim)(StageInProgressId stageId, PathC path);
void(CARB_ABI* createAttribute)(StageInProgressId stageId, PathC path, TokenC attrName, TypeC type);
void(CARB_ABI* createAttributes)(
StageInProgressId stageId, PathC path, TokenC* attrNames, TypeC* types, uint32_t attrNameAndTypeCount);
// Deprecated as type attribute is not required!
void(CARB_ABI* destroyAttribute)(StageInProgressId stageId, PathC path, TokenC attrName, TypeC type);
// see new destroyAttribute and destroyAttributes functions at end of IFlatcache
// Attribute SOA accessors
PrimBucketListId(CARB_ABI* findPrims)(StageInProgressId stageInProgressId,
const carb::flatcache::set<AttrNameAndType>& all,
const carb::flatcache::set<AttrNameAndType>& any,
const carb::flatcache::set<AttrNameAndType>& none);
void(CARB_ABI* getAttributeArray)(SpanC* out,
StageInProgressId stageInProgressId,
PrimBucketListId primBucketList,
size_t primBucketListIndex,
TokenC attrName);
void(CARB_ABI* getAttributeArrayRd)(ConstSpanC* out,
StageInProgressId stageInProgressId,
PrimBucketListId primBucketList,
size_t primBucketListIndex,
TokenC attrName);
void(CARB_ABI* getAttributeArrayWr)(SpanC* out,
StageInProgressId stageInProgressId,
PrimBucketListId primBucketList,
size_t primBucketListIndex,
TokenC attrName);
void(CARB_ABI* getOrCreateAttributeArrayWr)(SpanC* out,
StageInProgressId stageInProgressId,
PrimBucketListId primBucketList,
size_t primBucketListIndex,
TokenC attrName,
TypeC typeC);
size_t(CARB_ABI* getBucketPrimCount)(StageInProgressId stageInProgressId,
PrimBucketListId primBucketList,
size_t primBucketListIndex,
TokenC attrName);
size_t(CARB_ABI* getBucketAttributeCount)(StageInProgressId stageInProgressId,
PrimBucketListId primBucketList,
size_t primBucketListIndex);
void(CARB_ABI* getBucketAttributeNamesAndTypes)(AttrNameAndType* out, size_t outCount,
StageInProgressId stageInProgressId, PrimBucketListId primBucketList,
size_t primBucketListIndex);
ConstSpanSizeC(CARB_ABI* getArrayAttributeSizeArrayRd)(StageInProgressId stageInProgressId,
PrimBucketListId primBucketList,
size_t primBucketListIndex,
TokenC attrName);
ArrayPointersAndSizesC(CARB_ABI* getArrayAttributeArrayWithSizes)(StageInProgressId stageInProgressId,
PrimBucketListId primBucketList,
size_t primBucketListIndex,
TokenC attrName);
ConstArrayPointersAndSizesC(CARB_ABI* getArrayAttributeArrayWithSizesRd)(StageInProgressId stageInProgressId,
PrimBucketListId primBucketList,
size_t primBucketListIndex,
TokenC attrName);
ArrayPointersAndSizesC(CARB_ABI* getArrayAttributeArrayWithSizesWr)(StageInProgressId stageInProgressId,
PrimBucketListId primBucketList,
size_t primBucketListIndex,
TokenC attrName);
void(CARB_ABI* getPathArray)(ConstPathCSpan* out,
StageInProgressId stageInProgressId,
PrimBucketListId primBucketList,
size_t primBucketListIndex);
void(CARB_ABI* printBucketNames)(StageInProgressId stageInProgressId);
void(CARB_ABI* createForAllStages)(size_t simFrameNumber);
void(CARB_ABI* destroyForAllStages)();
void(CARB_ABI* logAttributeWriteForNotice)(StageInProgressId stageId, PathC path, TokenC attrName);
// Broadcast a USD TfNotice to all registered listeners containing paths of
// all attributes passed to logAttributeWriteForNotice since this StageInProgress was constructed.
// This is used, for example, to send changes to PhysX.
void(CARB_ABI* broadcastTfNoticeForAttributesChanged)(StageInProgressId stageInProgressId);
PrimBucketListId(CARB_ABI* getChanges)(StageInProgressId stageInProgressId, ListenerId listenerId);
void(CARB_ABI* popChanges)(StageInProgressId stageInProgressId, ListenerId listenerId);
RationalTime(CARB_ABI* getFrameTime)(StageInProgressId stageId);
/** @brief Get a Span with a pointer to the head of the relevant array of data
* with elementCount and elementSize reflecting the underlying data
*
* @stageId[in] Id for the stage to look in
* @path[in] Path to the prim holding the attribute
* @name[in] Name of the array attribute
*
* @return If a valid prim/attribute that hold an array returns a valid span, otherwise
* returns an empty span.
*
*/
SpanC(CARB_ABI* getArrayAttribute)(StageInProgressId stageId, PathC path, TokenC attrName);
/** @brief Get a const Span with a pointer to the head of the relevant array of data
* with elementCount and elementSize reflecting the underlying data
*
* @stageId[in] Id for the stage to look in
* @path[in] Path to the prim holding the attribute
* @name[in] Name of the array attribute
*
* @return If a valid prim/attribute that hold an array returns a valid span, otherwise
* returns an empty span.
*
*/
ConstSpanC(CARB_ABI* getArrayAttributeRd)(StageInProgressId stageId, PathC path, TokenC attrName);
/** @brief Get a Span with a pointer to the head of the relevant array of data
* with elementCount and elementSize reflecting the underlying data
*
* @stageId[in] Id for the stage to look in
* @path[in] Path to the prim holding the attribute
* @name[in] Name of the array attribute
*
* @return If a valid prim/attribute that hold an array returns a valid span, otherwise
* returns an empty span.
*
*/
SpanC(CARB_ABI* getArrayAttributeWr)(StageInProgressId stageId, PathC path, TokenC attrName);
/** @brief Destroy attribute with matching name
*
* Overloads and superseeds destroyAttribute which takes a unnecessary attribute type.
*
* @stageId[in] Id for the stage to look in
* @path[in] Path to the prim holding the attribute
* @attrNames[in] Attribute name
*
*/
void(CARB_ABI* destroyAttribute2)(StageInProgressId stageId, PathC path, TokenC attrName);
/** @brief Destroy all attributes with matching names
*
* @stageId[in] Id for the stage to look in
* @path[in] Path to the prim holding the attribute
* @attrNames[in] Attribute name array
* @attrNames[in] Attribute name array count
*
*/
void(CARB_ABI* destroyAttributes)(StageInProgressId stageId, PathC path, TokenC* attrNames, uint32_t attrNameCount);
void(CARB_ABI* getAttributeArrayGpu)(SpanC* out,
StageInProgressId stageInProgressId,
PrimBucketListId primBucketList,
size_t primBucketListIndex,
TokenC attrName);
void(CARB_ABI* getAttributeArrayRdGpu)(ConstSpanC* out,
StageInProgressId stageInProgressId,
PrimBucketListId primBucketList,
size_t primBucketListIndex,
TokenC attrName);
void(CARB_ABI* getAttributeArrayWrGpu)(SpanC* out,
StageInProgressId stageInProgressId,
PrimBucketListId primBucketList,
size_t primBucketListIndex,
TokenC attrName);
// Get GPU attribute for read/write access
SpanC(CARB_ABI* getAttributeGpu)(StageInProgressId stageId, PathC path, TokenC attrName);
// Get GPU attribute for read only access
ConstSpanC(CARB_ABI* getAttributeRdGpu)(StageInProgressId stageId, PathC path, TokenC attrName);
// Get GPU attribute for write only access
SpanC(CARB_ABI* getAttributeWrGpu)(StageInProgressId stageId, PathC path, TokenC attrName);
/** @brief Returns which mirrors of the array are valid: CPU, GPU, etc.
*
* @stageId[in] The stage to query validity from
* @path[in] The prim path
* @attrName[in] The attribute name
*
* @return ValidMirrors struct
*
*/
ValidMirrors(CARB_ABI* getAttributeValidBits)(StageInProgressId stageId, const PathC& path, const TokenC& attrName);
// Connection API
/**
* @brief Create a connection on a prim
*
* @param[in] stageId the stage to work on
* @param[in] path the prim to create the connection on
* @param[in] connectionName the name of the connection attribute
* @param[in] connection the target prim and attribute for the connection
*/
void(CARB_ABI* createConnection)(StageInProgressId stageId, PathC path, TokenC connectionName, Connection connection);
/**
* @brief Create multiple connections on a prim
*
* @param[in] stageId the stage to work on
* @param[in] path the prim to create the connection on
* @param[in] connectionNames the name of the connection attributes to create
* @param[in] connection the target prim and attribute for the connections
* @param[in] connectionCount the number of connections to be created.
*/
void(CARB_ABI* createConnections)(StageInProgressId stageId, PathC path, const TokenC* connectionNames, const Connection* connections, size_t connectionCount);
/**
* @brief remove a connection on a prim
*
* @param[in] stageId the stage to work on
* @param[in] path the prim to remove the connection from
* @param[in] connectionName the name of the connection attribute
*/
void(CARB_ABI* destroyConnection)(StageInProgressId stageId, PathC path, TokenC connectionName);
/**
* @brief Remove multiple connections from a prim
*
* @param[in] stageId the stage to work on
* @param[in] path the prim to remove the connections from
* @param[in] connectionNames the name of the connection attributes to be removed
* @param[in] connectionCount the number of connections to be removed.
*/
void(CARB_ABI* destroyConnections)(StageInProgressId stageId, PathC path, const TokenC* connectionNames, size_t connectionCount);
/**
* @brief Retrieves a connection attribute from a prim
*
* @param[in] stageId the stage to work on
* @param[in] path the prim to fetch the connection from
* @param[in] connectionName the name of the connection attribute to fetch
* @return a read/write pointer to the connection
*/
Connection*(CARB_ABI* getConnection)(StageInProgressId stageId, PathC path, TokenC connectionName);
/**
* @brief Retrieves a connection attribute from a prim
*
* @param[in] stageId the stage to work on
* @param[in] path the prim to fetch the connection from
* @param[in] connectionName the name of the connection attribute to fetch
* @return a read only pointer to the connection
*/
const Connection*(CARB_ABI* getConnectionRd)(StageInProgressId stageId, PathC path, TokenC connectionName);
/**
* @brief Retrieves a connection attribute from a prim
*
* @param[in] stageId the stage to work on
* @param[in] path the prim to fetch the connection from
* @param[in] connectionName the name of the connection attribute to fetch
* @return a write only pointer to the connection
*/
Connection*(CARB_ABI* getConnectionWr)(StageInProgressId stageId, PathC path, TokenC connectionName);
/**
* @brief Copy all attributes from the source prim to the destination prim
* Will create attributes if they do not exist on the destination prim
* If an attribute exists on both prims they must have compatible types to copy.
*
* @param[in] stageId the stage id to use for copying
* @param[in] srcPath the source prim
* @param[in] dstPath the destination prim
*/
void(CARB_ABI* copyAllAttributes)(StageInProgressId stageId, PathC srcPath, PathC dstPath);
/**
* @brief Copy the specified attributes from the source prim to the the specified
* attributes on the destination prim
* Will create attributes if they do not exist on the destination prim
* If an attribute exists on both prims they must have compatible types to copy.
* Note: The srcAttrs and dstAttrs must be the same size as the function assumes
* that the copy is 1 to 1 in terms of name alignment
*
* @param[in] stageId the stage id to use for copying
* @param[in] srcPath the source prim
* @param[in] srcAttrs a vector of attributes to be copied.
* @param[in] dstPath the destination prim
* @param[in] dstAttrs a vector of attributes to be copied.
* @param[in] count the number of attributes to copy
*/
void(CARB_ABI* copySpecifiedAttributes)(StageInProgressId stageId, PathC srcPath, const TokenC* srcAttrs, PathC dstPath, const TokenC* dstAttrs, size_t count);
};
struct IStageAtTimeInterval
{
CARB_PLUGIN_INTERFACE("carb::flatcache::IStageAtTimeInterval", 0, 1);
StageAtTimeIntervalId(CARB_ABI* create)(StageWithHistoryId stageWithHistoryId,
RationalTime beginTime,
RationalTime endTime,
bool includeEndTime);
void(CARB_ABI* destroy)(StageAtTimeIntervalId stageAtTimeIntervalId);
size_t(CARB_ABI* getTimesampleCount)(StageAtTimeIntervalId stageAtTimeIntervalId);
void(CARB_ABI* getTimestamps)(RationalTime* out, size_t outCount, StageAtTimeIntervalId stageAtTimeIntervalId);
// Single attribute accessor
size_t(CARB_ABI* getAttributeRd)(const void** out,
size_t outCount,
StageAtTimeIntervalId stageAtTimeIntervalId,
PathC path,
TokenC attrName);
// Attribute SOA accessors
PrimBucketListId(CARB_ABI* findPrims)(StageAtTimeIntervalId stageAtTimeIntervalId,
const carb::flatcache::set<AttrNameAndType>& all,
const carb::flatcache::set<AttrNameAndType>& any,
const carb::flatcache::set<AttrNameAndType>& none);
void(CARB_ABI* getAttributeArrayRd)(ConstSpanC* out,
size_t outCount,
StageAtTimeIntervalId stageAtTimeIntervalId,
PrimBucketListId primBucketList,
size_t primBucketListIndex,
TokenC attrName);
void(CARB_ABI* getArrayAttributeArrayWithSizesRd)(ConstArrayPointersAndSizesC* out,
size_t outCount,
StageAtTimeIntervalId stageAtTimeIntervalId,
PrimBucketListId primBucketList,
size_t primBucketListIndex,
TokenC attrName);
void(CARB_ABI* getPathArray)(ConstPathCSpan* out,
size_t outCount,
StageAtTimeIntervalId stageAtTimeIntervalId,
PrimBucketListId primBucketList,
size_t primBucketListIndex);
void(CARB_ABI* printBucketNames)(StageAtTimeIntervalId stageAtTimeIntervalId);
void(CARB_ABI* exportUsd)(StageAtTimeIntervalId stageAtTimeIntervalId, UsdStageId usdStageId);
RationalTime(CARB_ABI* getSimPeriod)(UsdStageId usdStageId);
void(CARB_ABI* getAttributeCounts)(StageAtTimeIntervalId stageAtTimeIntervalId,
PrimBucketListId primBucketList,
size_t primBucketListIndex,
size_t timesamples,
size_t* outCounts);
void(CARB_ABI* getAttributeNamesAndTypes)(StageAtTimeIntervalId stageAtTimeIntervalId,
PrimBucketListId primBucketList,
size_t primBucketListIndex,
size_t timesamples,
const size_t* attributeCounts,
Token** outNames,
Type** outTypes);
size_t(CARB_ABI* getAttributeCountForTimesample)(StageAtTimeIntervalId stageAtTimeIntervalId,
PrimBucketListId primBucketList,
size_t primBucketListIndex,
size_t timesampleIndex);
void(CARB_ABI* getAttributeNamesAndTypesForTimesample)(StageAtTimeIntervalId stageAtTimeIntervalId,
PrimBucketListId primBucketList,
size_t primBucketListIndex,
size_t timesampleIndex,
size_t attributeCount,
Token* outNames,
Type* outTypes);
void(CARB_ABI* getArrayAttributeWithSizeRd)(ConstSpanWithTypeC* out,
size_t outCount,
StageAtTimeIntervalId stageAtTimeIntervalId,
carb::flatcache::PathC path,
carb::flatcache::TokenC attrName);
/** @brief Write a cache file to disk at a specified location
*
* @note Many parameters to this function are optional
* @note This currently only writes the first time in the interval
* @stageAtTimeIntervalId[in] The stage at time to be written to disk
* @file[in The location the file is desired to be written to
* @workingBuffer[in] [Optional] In order to avoid costly reallocations
* the code will attempt to re-use the memory at the buffer
* location if it is large enough. If the buffer isn't larg
* enough the cost of allocation, and re-traversal may be paid
* @workingBufferSize[in] [Optional] If workingBuffer is non null, then this desrcibes the length
* of the buffer
* @return The amount of data needed to serialize the cache, a return value of 0 indicates an error
*
*/
uint64_t(CARB_ABI* writeCacheToDisk)(
StageAtTimeIntervalId stageAtTimeIntervalId,
const char* file,
uint8_t* workingBuffer,
uint64_t workingBufferSize);
/** @brief Add a ref count to any data backed by the StageAtTimeIntercal
*
* @note The ref count will not enforce any behavior currently, but will
* print a warning if backing data is deleted before all ref counts
* are cleared
*
* @stageAtTimeIntervalId[in] The stage at time tracked for the ref counting
*
* @return None
*
*/
void(CARB_ABI* addRefCount)(StageAtTimeIntervalId stageAtTimeIntervalId);
/** @brief Remove a ref count from an existing timeInterval
*
*
* @stageAtTimeIntervalId[in] The stage at time tracked for the ref counting
*
* @return True if ref count was removed successfully, failure conditions may
* include
* (1) StageAtTimeInterval doesn't exist
* (2) RefCount was already 0
*
*/
bool(CARB_ABI* removeRefCount)(StageAtTimeIntervalId stageAtTimeIntervalId);
/** @brief Query ref count for a stage at time
*
* @note A stage at time might be represented by multiple actual data sources
* in that case we return the largest refcount of all the data sources
*
* @stageAtTimeIntervalId[in] The stage at time tracked for the ref counting
*
* @return number of reference counts
*
*/
unsigned int(CARB_ABI* getRefCount)(StageAtTimeIntervalId stageAtTimeIntervalId);
// Access GPU Array attribute
void(CARB_ABI* getAttributeArrayRdGpu)(ConstSpanC* out,
size_t outCount,
StageAtTimeIntervalId stageAtTimeIntervalId,
PrimBucketListId primBucketList,
size_t primBucketListIndex,
TokenC attrName);
// Access GPU pointer attribute
void(CARB_ABI* getAttributeRdGpu)(ConstSpanC* out,
size_t outCount,
StageAtTimeIntervalId stageAtTimeIntervalId,
PathC path,
TokenC attrName);
// Get array size, useful for GPU attributes
size_t(CARB_ABI* getArrayAttributeSize)(size_t* out, size_t outCount, StageAtTimeIntervalId stageAtTimeIntervalId, PathC path, TokenC attrName);
/** @brief Returns which mirrors of the array are valid: CPU, GPU, etc.
*
* @stageAtTimeIntervalId[in] The stage to query validity from
* @path[in] The prim path
* @attrName[in] The attribute name
*
* @return ValidMirrors struct
*
*/
ValidMirrors(CARB_ABI* getAttributeValidBits)(StageAtTimeIntervalId stageAtTimeIntervalId, const PathC& path, const TokenC& attrName);
/**
* @brief
*
*/
void(CARB_ABI* getConnectionRd)(const void** out,
size_t outCount,
StageAtTimeIntervalId stageAtTimeIntervalId,
PathC path,
TokenC connectionName);
};
struct IStageWithHistory
{
CARB_PLUGIN_INTERFACE("carb::flatcache::IStageWithHistory", 0, 1);
StageWithHistoryId(CARB_ABI* create)(UsdStageId usdStageId, size_t historyFrameCount, RationalTime simPeriod);
StageWithHistoryId(CARB_ABI* get)(UsdStageId usdStageId);
void(CARB_ABI* destroy)(UsdStageId usdStageId);
//
// Create a snapshot of the stageWIthHistory for the usdStageId, this currently just resets
// the stage in progress, but it probably should be extended to copy the entire ringbuffer if we intend to
// so anything other than reset to the start frame.
//
StageWithHistorySnapshot(CARB_ABI* saveSnapshot)(UsdStageId usdStageId);
bool(CARB_ABI* deleteSnapshot)(UsdStageId usdStageId, size_t snapshotId);
bool(CARB_ABI* restoreFromSnapshot)(UsdStageId usdStageId, size_t snapshotId);
RationalTime(CARB_ABI* getSimPeriod)(UsdStageId usdStageId);
// For multi-process replication. Stores the link between the stage id on the master process and the local stage id.
void(CARB_ABI* setStageIdMapping)(UsdStageId usdStageIdMaster, UsdStageId usdStageIdLocal);
ListenerId(CARB_ABI* createListener)();
/** @brief Get the last frame that was written to the StageWithHistory
*
* @usdStageId[in] The identifier for the statge
*
* @return the time, and period of the last valid data written to the StageWithHistory
*
*/
RationalTime(CARB_ABI* getLatestFrame)(UsdStageId usdStageId);
StageWithHistoryId(CARB_ABI* create2)(UsdStageId usdStageId, size_t historyFrameCount, RationalTime simPeriod, bool withCuda);
UsdStageId(CARB_ABI* getLocalStageId)(UsdStageId usdStageIdMaster);
};
struct IStageWithHistoryDefaults
{
CARB_PLUGIN_INTERFACE("carb::flatcache::IStageWithHistoryDefaults", 0, 1);
void(CARB_ABI* setStageHistoryFrameCount)(size_t historyFrameCount);
void(CARB_ABI* setStageHistoryUpdatePeriod)(uint64_t periodNumerator, uint64_t periodDenominator);
};
struct IPrimBucketList
{
CARB_PLUGIN_INTERFACE("carb::flatcache::IPrimBucketList", 0, 2);
void(CARB_ABI* destroy)(PrimBucketListId primBucketListId);
size_t(CARB_ABI* getBucketCount)(PrimBucketListId primBucketListId);
void(CARB_ABI* print)(PrimBucketListId primBucketListId);
BucketChangesC(CARB_ABI* getChanges)(PrimBucketListId changeListId, size_t index);
AddedPrimIndicesC(CARB_ABI* getAddedPrims)(PrimBucketListId changeListId, size_t index);
};
struct IChangeTrackerConfig
{
CARB_PLUGIN_INTERFACE("carb::flatcache::IChangeTrackerConfig", 0, 3);
void(CARB_ABI* pause)(StageInProgressId stageInProgressId, ListenerId listenerId);
void(CARB_ABI* resume)(StageInProgressId stageInProgressId, ListenerId listenerId);
bool(CARB_ABI* isChangeTrackingPaused)(StageInProgressId stageInProgressId, ListenerId listenerId);
void(CARB_ABI* attributeEnable)(StageInProgressId stageInProgressId, TokenC attrName, ListenerId listenerId);
void(CARB_ABI* attributeDisable)(StageInProgressId stageInProgressId, TokenC attrName, ListenerId listenerId);
bool(CARB_ABI* isListenerAttached)(StageInProgressId stageInProgressId, ListenerId listenerId);
void(CARB_ABI* detachListener)(StageInProgressId stageInProgressId, ListenerId listenerId);
size_t(CARB_ABI* getListenerCount)(StageInProgressId stageInProgressId);
void(CARB_ABI* enablePrimCreateTracking)(StageInProgressId stageInProgressId, ListenerId listenerId);
};
/** @brief The Serializer interface provides the C-ABI compatible functions for
* working with all serialization of SWH and workflows. This covers
* (1) In-memory serialization/deserialization
* (2) Serialization to Disk and From
* (3) Functions to support replication based on serialization
* Because of the nature of SWH there are multiple places one might want to
* actually serialize the cache from, we provide some convenience functions
* that wrap this up, but also the direct functionality to serialize a
* PathToAttributesMap directly to/from a buffer for convenience.
*
*/
struct ISerializer
{
CARB_PLUGIN_INTERFACE("carb::flatcache::ISerializer", 0, 2);
//
// deprecated for more appropriately named serializeRingBuffer
//
uint64_t(CARB_ABI* serializeStage)(StageWithHistoryId stageWithHistoryId, size_t slot, uint8_t* dest, size_t destSize);
//
// deprecated for more appropriately named deserializeIntoRingBuffer
//
bool (CARB_ABI* deserializeStage)(StageWithHistoryId stageWithHistoryId, size_t slot, const uint8_t* input, const size_t inputSize,
size_t simFrameNumber, carb::flatcache::RationalTime simFrameTime);
/** @brief Attempt to serialize the stage into the provided buffer. This function
* is intended to be used when you want to serialize all the data within a
* ring buffer entry, however this is often more data than needs to be sent.
*
* @stage[in] The StageWithHistory with the ring buffer to be serialized
* @slot[in] The slot from the ring buffer to send
* @dest[in/out] Pointer to buffer to be written to, will start writing to head
* of pointer. dest will be left pointing to the point after the last write
* @destSize Size of buffer that was allocated for the data (in bytes)
*
* @return Number of bytes written success is determined by (return <= @destSize)
*
*
* @invariant It is safe to write to any memory within[dest, dest+size] for the
* duration of the function call.
*
* @note If the cache will not fit into the size of memory allocated in
* @dest then it will stop writing, but continue to run the serialize
* algorithm to calculate the actual amount of data that needs to be
* written
*
*/
uint64_t(CARB_ABI* serializeRingBuffer)(StageWithHistoryId stageWithHistoryId, size_t slot, uint8_t* dest, size_t destSize);
/** @brief Given a buffer that has the serialized version of a cache written
* using the serialize function, this function will override all the data
* in the ringbuffer at the requested slot with the data encoded in the
* buffer. This function will only succeed if the StageWithHistory that
* is passed in was created from the same UsdStage (opened at the same root layer)
* that was used to create the original serialized cache.
*
*
* @stageWithHistoryId[in] The stage to write the data to
* @slot[in] The index in the ring buffer to pull to
* @input[in] Pointer to buffer of data containing serialized cache
* @inputSize[in] Size of data in the buffer
* @simFrameNumber[in] The frame of the simulation to set the ring buffer entry to
* @simFrameTime[in] The simFrame time to set the ring buffer to
*
* @return True if buffer was successfully de-serialized
*
* @TODO: whould we care that it came from the same version of the USD file?
*/
bool (CARB_ABI* deserializeIntoRingBuffer)(StageWithHistoryId stageWithHistoryId, size_t slot, const uint8_t* input, const size_t inputSize,
size_t simFrameNumber, carb::flatcache::RationalTime simFrameTime);
/** @brief Replicate the ring buffers from the master to the workers when running
* multiple processes. Data is serialized into buffers allocated and broadcast
* by Realm, followed by deserialization into the remote ring buffers. This
* function is synchronous, i.e., the remote FlatCaches have finished updating
* when this function returns.
*/
void (CARB_ABI* replicateRingBuffers)();
};
struct Platform;
struct IPlatform
{
CARB_PLUGIN_INTERFACE("carb::flatcache::IPlatform", 0, 1);
const Platform& (CARB_ABI* get)(const PlatformId& platformId);
Platform& (CARB_ABI* getMutable)(const PlatformId& platformId);
void (CARB_ABI* reset)(const PlatformId& platformId);
void (CARB_ABI* resetAll)();
};
} // namespace flatcache
} // namespace carb
| 41,868 | C | 40.169125 | 164 | 0.630386 |
omniverse-code/kit/fabric/include/carb/flatcache/GetArrayGPU.h | // Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "PathToAttributesMap.h"
#include <carb/profiler/Profile.h>
namespace carb
{
namespace flatcache
{
const uint64_t kProfilerMask = 1;
// If this is an array-of-arrays:
// array.cpuData - array of CPU pointers on CPU
// gpuPointerArray->cpuData() - array of GPU pointers on CPU
inline void PathToAttributesMap::enableGpuRead(MirroredArray& array,
const size_t* elemToArraySize,
MirroredArray* elemToArrayCpuCapacity,
MirroredArray* elemToArrayGpuCapacity,
MirroredArray* gpuPointerArray)
{
CARB_PROFILE_ZONE(kProfilerMask, "enableGpuRead");
using omni::gpucompute::MemcpyKind;
log("begin enableGpuRead\n");
bool& cpuValid = array.cpuValid;
bool& gpuValid = array.gpuValid;
bool& gpuAllocedWithCuda = array.gpuAllocedWithCuda;
uint8_t* cpuArray = array.cpuData();
uint8_t*& gpuArray = array.gpuArray;
if (gpuValid)
{
// Nothing to do
}
else if (cpuValid)
{
std::lock_guard<MirroredArray::AttributeMutex> hostDeviceLock(array.attributeMutex);
const TypeC type = array.type;
const Typeinfo &typeInfo = getTypeInfo(type);
if (typeInfo.isArray)
{
const size_t elemCount = array.count;
uint8_t** cpuPointers = reinterpret_cast<uint8_t**>(cpuArray);
uint8_t** gpuPointers = reinterpret_cast<uint8_t**>(gpuPointerArray->cpuData());
for (size_t elem = 0; elem != elemCount; elem++)
{
const size_t desiredCapacity = elemToArraySize[elem];
const size_t cpuCapacity = reinterpret_cast<size_t*>(elemToArrayCpuCapacity->cpuData())[elem];
size_t& gpuCapacity = reinterpret_cast<size_t*>(elemToArrayGpuCapacity->cpuData())[elem];
if(gpuCapacity != desiredCapacity)
{
destructiveResizeIfNecessaryGPU(*gpuPointerArray, elem, gpuCapacity, desiredCapacity, typeInfo.arrayElemSize, platform.gpuCuda, platform.gpuCudaCtx);
}
const size_t copyByteCount = std::min(desiredCapacity, cpuCapacity) * typeInfo.arrayElemSize;
if (copyByteCount > 0)
{
void* cpuPointer = cpuPointers[elem];
void* gpuPointer = gpuPointers[elem];
CARB_ASSERT(cpuPointer);
CARB_ASSERT(gpuPointer);
platform.gpuCuda->memcpyAsync( *platform.gpuCudaCtx, gpuPointer, cpuPointer, copyByteCount, MemcpyKind::hostToDevice);
}
}
gpuPointerArray->cpuValid = true;
}
else
{
// Copy the outer array from CPU to GPU
size_t byteCount = array.size();
allocGpuMemIfNecessary(array, byteCount, typeInfo.size, platform.gpuCuda, platform.gpuCudaCtx);
log("array values: to GPU\n");
uint8_t* cpuArray = array.cpuData();
carb::profiler::ZoneId zoneId = CARB_PROFILE_BEGIN(kProfilerMask, "outer array values");
platform.gpuCuda->memcpyAsync(*platform.gpuCudaCtx, gpuArray, cpuArray, byteCount, MemcpyKind::hostToDevice);
CARB_PROFILE_END(kProfilerMask, zoneId);
}
// New state
cpuValid = true;
gpuValid = true;
gpuAllocedWithCuda = true;
}
}
inline void PathToAttributesMap::enableGpuWrite(PathToAttributesMap::MirroredArray& array,
const size_t* elemToArraySize,
PathToAttributesMap::MirroredArray* elemToArrayCpuCapacity,
PathToAttributesMap::MirroredArray* elemToArrayGpuCapacity,
PathToAttributesMap::MirroredArray* arrayGpuDataArray)
{
CARB_PROFILE_ZONE(kProfilerMask, "enableGpuWrite");
using omni::gpucompute::MemcpyKind;
bool& usdValid = array.usdValid;
bool& cpuValid = array.cpuValid;
bool& gpuValid = array.gpuValid;
bool& gpuAllocedWithCuda = array.gpuAllocedWithCuda;
const TypeC type = array.type;
const Typeinfo &typeInfo = getTypeInfo(type);
std::lock_guard<MirroredArray::AttributeMutex> hostDeviceLock(array.attributeMutex);
if (!typeInfo.isArray && !gpuValid)
{
size_t byteCount = array.size();
allocGpuMemIfNecessary(array, byteCount, typeInfo.size, platform.gpuCuda, platform.gpuCudaCtx);
}
else if (typeInfo.isArray)
{
// Array-valued elements are lazily allocated, meaning they are only
// resized when write access is requested.
// Write access has been requested, so resize if necessary
size_t elemCount = array.count;
for (size_t elem = 0; elem != elemCount; elem++)
{
size_t& gpuCapacity = reinterpret_cast<size_t*>(elemToArrayGpuCapacity->cpuData())[elem];
size_t desiredElemCount = elemToArraySize[elem];
resizeIfNecessaryGPU(
*arrayGpuDataArray, elem, gpuCapacity, desiredElemCount, typeInfo.arrayElemSize, platform.gpuCuda, platform.gpuCudaCtx);
}
// Upload of allocated pointers to GPU happens outside this function
}
// New state
usdValid = false;
cpuValid = false;
gpuValid = true;
gpuAllocedWithCuda = true;
if (elemToArrayCpuCapacity) elemToArrayCpuCapacity->usdValid = false;
if (elemToArrayGpuCapacity) elemToArrayGpuCapacity->usdValid = false;
if (arrayGpuDataArray) arrayGpuDataArray->usdValid = false;
}
inline ConstSpanC PathToAttributesMap::getArraySpanRdGpuC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix)
{
APILOGGER("getArraySpanRdGpuC", apiLogEnabled, attrName);
BucketId bucketId = findBucketId(bucket);
if (bucketId == kInvalidBucketId)
return { nullptr, 0, 0 };
// We don't set dirty indices here because this method gives read-only access
return getArraySpanC(bucketId, attrName, CudaReadConfig(), suffix).array;
}
inline const void* PathToAttributesMap::getArrayRdGpuC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix)
{
APILOGGER("getArrayRdGpuC", apiLogEnabled, attrName);
BucketId bucketId = findBucketId(bucket);
if (bucketId == kInvalidBucketId)
return nullptr;
// We don't set dirty indices here because this method gives read-only access
return getArraySpanC(bucketId, attrName, CudaReadConfig(), suffix).array.ptr;
}
inline SpanC PathToAttributesMap::getArrayGpuC(BucketId bucketId, const TokenC& attrName, NameSuffix suffix)
{
APILOGGER("getArrayGpuC", apiLogEnabled, attrName);
ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(bucketId, attrName, CudaReadWriteConfig(), suffix);
setArrayDirty(arrayAndDirtyIndices);
return arrayAndDirtyIndices.array;
}
inline ConstSpanC PathToAttributesMap::getArrayRdGpuC(BucketId bucketId, const TokenC& attrName, NameSuffix suffix)
{
APILOGGER("getArrayRdGpuC", apiLogEnabled, attrName);
const ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(bucketId, attrName, CudaReadConfig(), suffix);
// We don't set dirty indices here because this method gives read-only access
return arrayAndDirtyIndices.array;
}
inline SpanC PathToAttributesMap::getArrayWrGpuC(BucketId bucketId, const TokenC& attrName, NameSuffix suffix)
{
APILOGGER("getArrayWrC", apiLogEnabled, attrName);
ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(bucketId, attrName, CudaWriteConfig(), suffix);
setArrayDirty(arrayAndDirtyIndices);
return arrayAndDirtyIndices.array;
}
inline SpanC PathToAttributesMap::getArraySpanWrGpuC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix)
{
APILOGGER("getArraySpanWrGpuC", apiLogEnabled, attrName);
BucketId bucketId = findBucketId(bucket);
if (bucketId == kInvalidBucketId)
return { nullptr, 0, 0 };
ArrayAndDirtyIndices array = getArraySpanC(bucketId, attrName, CudaWriteConfig(), suffix);
setArrayDirty(array);
return array.array;
}
inline void* PathToAttributesMap::getArrayWrGpuC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix)
{
APILOGGER("getArrayWrGpuC", apiLogEnabled, attrName);
// Get write-only GPU access
BucketId bucketId = findBucketId(bucket);
if (bucketId == kInvalidBucketId)
return nullptr;
ArrayAndDirtyIndices array = getArraySpanC(bucketId, attrName, CudaWriteConfig(), suffix);
setArrayDirty(array);
return array.array.ptr;
}
inline SpanC PathToAttributesMap::getArraySpanGpuC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix)
{
APILOGGER("getArraySpanGpuC", apiLogEnabled, attrName);
BucketId bucketId = findBucketId(bucket);
if (bucketId == kInvalidBucketId)
return { nullptr, 0, 0 };
ArrayAndDirtyIndices array = getArraySpanC(bucketId, attrName, CudaReadWriteConfig(), suffix);
setArrayDirty(array);
return array.array;
}
inline void* PathToAttributesMap::getArrayGpuC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix)
{
APILOGGER("getArrayGpuC", apiLogEnabled, attrName);
BucketId bucketId = findBucketId(bucket);
if (bucketId == kInvalidBucketId)
return nullptr;
ArrayAndDirtyIndices array = getArraySpanC(bucketId, attrName, CudaReadWriteConfig(), suffix);
setArrayDirty(array);
return array.array.ptr;
}
inline SpanC PathToAttributesMap::getAttributeGpuC(const PathC& path, const TokenC& attrName, PtrToPtrKind ptrToPtrKind, NameSuffix suffix)
{
APILOGGER("getAttributeGpuC", apiLogEnabled, path, attrName);
bool present; // Whether this path has a bucket
BucketId bucketId; // Pointer to the bucket if it does
size_t element; // Index corresponding to path in this bucket's arrays
std::tie(present, bucketId, element) = getPresentAndBucketAndElement(path);
if (!present)
return { nullptr, 0, 0 };
ArrayAndDirtyIndices array = getArraySpanC(bucketId, attrName, CudaReadWriteConfig().withPtrToPtrKind(ptrToPtrKind), suffix);
setArrayElementDirty(array, element);
return getArrayElementPtr(array.array, element);
}
inline ConstSpanC PathToAttributesMap::getAttributeRdGpuC(const PathC& path, const TokenC& attrName, PtrToPtrKind ptrToPtrKind, NameSuffix suffix)
{
APILOGGER("getAttributeRdGpuC", apiLogEnabled, path, attrName);
bool present;
BucketId bucketId;
size_t element;
std::tie(present, bucketId, element) = getPresentAndBucketAndElement(path);
if (!present)
return { nullptr, 0, 0 };
ConstSpanC array = getArraySpanC(bucketId, attrName, CudaReadConfig().withPtrToPtrKind(ptrToPtrKind), suffix).array;
// We don't set dirty indices here because this method gives read-only access
return getArrayElementPtr(array, element);
}
inline SpanC PathToAttributesMap::getAttributeWrGpuC(const PathC& path, const TokenC& attrName, PtrToPtrKind ptrToPtrKind, NameSuffix suffix)
{
APILOGGER("getAttributeWrGpuC", apiLogEnabled, path, attrName);
// Writing an element is a RMW on the whole array, so use getArrayGpu instead of getArrayGpuWr
bool present;
BucketId bucketId;
size_t element;
std::tie(present, bucketId, element) = getPresentAndBucketAndElement(path);
if (!present)
return { nullptr, 0, 0 };
ArrayAndDirtyIndices array = getArraySpanC(bucketId, attrName, CudaReadWriteConfig().withPtrToPtrKind(ptrToPtrKind), suffix);
setArrayElementDirty(array, element);
return getArrayElementPtr(array.array, element);
}
// Typed accessors
template <typename T>
inline const T* PathToAttributesMap::getArrayRdGpu(const Bucket& bucket, const TokenC& attrName)
{
APILOGGER("getArrayRdGpu", apiLogEnabled, attrName);
// TODO: check that T is the correct type
return reinterpret_cast<const T*>(getArrayRdGpuC(bucket, attrName));
}
template <typename T>
inline T* PathToAttributesMap::getArrayWrGpu(const Bucket& bucket, const TokenC& attrName)
{
APILOGGER("getArrayWrGpu", apiLogEnabled, attrName);
// TODO: check that T is the correct type
return reinterpret_cast<T*>(getArrayWrGpuC(bucket, attrName));
}
template <typename T>
inline T* PathToAttributesMap::getArrayGpu(const Bucket& bucket, const TokenC& attrName)
{
APILOGGER("getArrayGpu", apiLogEnabled, attrName);
// TODO: check that T is the correct type
return reinterpret_cast<T*>(getArrayGpuC(bucket, attrName));
}
template <typename T>
inline T* PathToAttributesMap::getAttributeGpu(const PathC& path, const TokenC& attrName, PtrToPtrKind ptrToPtrKind)
{
APILOGGER("getAttributeGpu", apiLogEnabled, path, attrName);
// TODO: check that T is the correct type
return reinterpret_cast<T*>(getAttributeGpuC(path, attrName, ptrToPtrKind).ptr);
}
template <typename T>
inline const T* PathToAttributesMap::getAttributeRdGpu(const PathC& path, const TokenC& attrName, PtrToPtrKind ptrToPtrKind)
{
APILOGGER("getAttributeRdGpu", apiLogEnabled, path, attrName);
// TODO: check that T is the correct type
return reinterpret_cast<const T*>(getAttributeRdGpuC(path, attrName, ptrToPtrKind).ptr);
}
template <typename T>
inline T* PathToAttributesMap::getAttributeWrGpu(const PathC& path, const TokenC& attrName, PtrToPtrKind ptrToPtrKind)
{
APILOGGER("getAttributeWrGpu", apiLogEnabled, path, attrName);
// TODO: check that T is the correct type
return reinterpret_cast<T*>(getAttributeWrGpuC(path, attrName, ptrToPtrKind).ptr);
}
}
}
| 14,162 | C | 37.909341 | 169 | 0.69856 |
omniverse-code/kit/fabric/include/carb/flatcache/AttrNameAndType.h | // Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <carb/flatcache/IToken.h>
#include <carb/flatcache/IPath.h>
#include <carb/flatcache/Type.h>
namespace carb
{
namespace flatcache
{
// Basic types
// Types and methods ending in C are intended to be used with C-ABI interfaces.
// PathToAttributesMap allows metadata to be attached to attributes.
// The metadata that exist currently are for (flatcache) internal use only.
// Abstractly it holds an array for each attribute, where element 0
// (NameSuffix::none) is the value itself, and other elements are the metadata.
// It is called NameSuffix because conceptually each metadatum adds a
// new attribute with a name suffix specifying the type of metadata.
// For example, suppose you have an attribute "points" that has an attached
// elemCount. Conceptually you have two attributes "points" and
// "points_elemCount".
enum class NameSuffix
{
none = 0, // Index NameSuffix::value is the index of the data itself
// The following metadata is present on USD attributes that connect to others
connection // The target(prim, attribute) of the connection
};
struct Connection
{
PathC path;
TokenC attrName;
};
// AttrNameAndType specifies the name and type of an attribute. When the user
// searches for buckets of prims they use this type to specify which attributes
// the prims must have. Also the user can query the name and type of an
// attribute at a given path, and the output has this type.
//
// This version of the struct contains the type in flatcache format only.
// The original, AttrNameAndType, additionally contains the type in USD format,
// but that version will be deprecated.
struct AttrNameAndType
{
Type type;
Token name;
NameSuffix suffix;
AttrNameAndType() = default;
AttrNameAndType(Type type, Token name, NameSuffix suffix = NameSuffix::none)
: type(type), name(name), suffix(suffix)
{
}
// Note that in the name comparisons below TokenC masks off USD's lifetime bit.
// For example, tokens created from the same string are considered equal even
// if one was created with finite lifetime and the other infinite lifetime.
bool operator<(const AttrNameAndType& rhs) const
{
if (TypeC(type) < TypeC(rhs.type))
return true;
if (TypeC(rhs.type) < TypeC(type))
return false;
if (TokenC(name) < TokenC(rhs.name))
return true;
if (TokenC(rhs.name) < TokenC(name))
return false;
return suffix < rhs.suffix;
}
bool operator==(const AttrNameAndType& other) const
{
return type == other.type && name == other.name && suffix == other.suffix;
}
};
static_assert(std::is_standard_layout<AttrNameAndType>::value,
"AttrNameAndType must be standard layout as it is used in C-ABI interfaces");
// NOTE: This type alias provides source level compatibility. Usage of the original AttrNameAndType structure has
// been replaced with what was previously called AttrNameAndType_v2 and the _v2 suffix dropped. This alias allows code
// which still refers to AttrNameAndType_v2 to compile.
using AttrNameAndType_v2 = AttrNameAndType;
}
}
namespace std
{
template <>
struct hash<carb::flatcache::AttrNameAndType>
{
// Use the same hash_combine as boost
template <class T>
static inline void hash_combine(std::size_t& seed, const T& v)
{
std::hash<T> hasher;
seed ^= hasher(v) + 0x9e3779b9 + (seed << 6) + (seed >> 2);
}
std::size_t operator()(const carb::flatcache::AttrNameAndType& key) const
{
size_t hash = std::hash<carb::flatcache::Type>{}(key.type);
hash_combine(hash, std::hash<carb::flatcache::Token>{}(key.name));
hash_combine(hash, uint32_t(key.suffix));
return hash;
}
};
}
| 4,247 | C | 32.984 | 118 | 0.705204 |
omniverse-code/kit/fabric/include/carb/flatcache/Platform.h | #pragma once
#include <carb/flatcache/Allocator.h>
namespace omni
{
namespace gpucompute
{
struct GpuCompute;
struct Context;
} // namespace gpucompute
} // namespace omni
namespace carb
{
namespace flatcache
{
struct Platform
{
Allocator allocator;
omni::gpucompute::GpuCompute* gpuCuda = nullptr;
omni::gpucompute::Context* gpuCudaCtx = nullptr;
// The gpuD3dVk interface is used only if you access GPU arrays using D3D or Vulkan.
// If you're only using CPU or CUDA GPU arrays then you don't set it.
omni::gpucompute::GpuCompute* gpuD3dVk = nullptr;
omni::gpucompute::Context* gpuD3dVkCtx = nullptr;
Platform() = default;
Platform(const Platform& other) = delete;
Platform& operator=(const Platform& other) = delete;
Platform(Platform&& other) = default;
Platform& operator=(Platform&& other) = default;
inline void reset()
{
gpuD3dVk = nullptr;
gpuD3dVkCtx = nullptr;
gpuCuda = nullptr;
gpuCudaCtx = nullptr;
allocator.~Allocator();
new (&allocator) Allocator();
}
// mirror of IPlatform functions
static void get(const PlatformId& id);
static void getMutable(const PlatformId& id);
static void reset(const PlatformId& id);
static void resetAll();
};
} // namespace flatcache
} // namespace carb
| 1,343 | C | 21.032787 | 88 | 0.676843 |
omniverse-code/kit/fabric/include/carb/flatcache/Type.h | // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <sstream>
#include <string>
namespace carb
{
namespace flatcache
{
// TypeC are integer keys that identify types, like float3, int[] etc.
// There isn't a USD type that can be cast to TypeC,
// please use carb::flatcache::usdTypeToTypeC().
struct TypeC
{
uint32_t type;
constexpr bool operator<(const TypeC& other) const
{
return type < other.type;
}
constexpr bool operator==(const TypeC& other) const
{
return type == other.type;
}
constexpr bool operator!=(const TypeC& other) const
{
return type != other.type;
}
};
static_assert(std::is_standard_layout<TypeC>::value, "Struct must be standard layout as it is used in C-ABI interfaces");
static constexpr TypeC kUnknownType{ 0 };
enum class BaseDataType : uint8_t
{
eUnknown = 0,
eBool,
eUChar,
eInt,
eUInt,
eInt64,
eUInt64,
eHalf,
eFloat,
eDouble,
eToken,
// RELATIONSHIP is stored as a 64-bit integer internally, but shouldn't be
// treated as an integer type by nodes.
eRelationship,
// For internal use only
eAsset,
ePrim,
eConnection,
// eTags are attributes that have a name but no type or value
// They are used for named tags, including USD applied schemas
eTag
};
inline std::ostream& operator<<(std::ostream& s, const BaseDataType& type)
{
static const std::string names[] = { "unknown", "bool", "uchar", "int", "uint",
"int64", "uint64", "half", "float", "double",
"token", "rel", "asset", "prim", "connection",
"tag" };
if (type <= BaseDataType::eTag)
{
return s << names[uint8_t(type)];
}
return s;
}
// These correspond with USD attribute "roles", with the exception of eString.
// For example that a vector3f or vector3d (VECTOR) would be transformed
// differently from a point3f or point3d (POSITION).
enum class AttributeRole : uint8_t
{
eNone = 0,
eVector,
eNormal,
ePosition,
eColor,
eTexCoord,
eQuaternion,
eTransform,
eFrame,
eTimeCode,
// eText is not a USD role. If a uchar[] attribute has role eText then
// the corresponding USD attribute will have type "string", and be human
// readable in USDA. If it doesn't, then it will have type "uchar[]" in USD
// and appear as an array of numbers in USDA.
eText,
// eAppliedSchema is not a USD role, eTags with this role are USD applied schema.
eAppliedSchema,
// ePrimTypeName is not a USD role, eTags with this role are USD prim types.
ePrimTypeName,
// eExecution is not a USD role, uint attributes with this role are used for control flow in Action Graphs.
eExecution,
eMatrix,
// eObjectId is not a USD role, uint64 attributes with this role are used for Python object identification.
eObjectId,
// eBundle is not a USD role, ePrim and eRelationship attributes with this role identify OmniGraph bundles
eBundle,
// ePath is not a USD role, it refers to strings that are reinterpreted as SdfPaths. The attribute type must
// be a uchar[] with a USD type "string".
ePath,
// eInstancedAttribute is used as a role on tag types in place of attribute types on instanced prims.
eInstancedAttribute,
// eAncestorPrimTypeName is not a USD role, eTags with this role are ancestor types of a USD prim type.
eAncestorPrimTypeName,
// Special marker for roles that are not yet determined
eUnknown,
};
inline std::ostream& operator<<(std::ostream& s, const AttributeRole& type)
{
static const std::string ognRoleNames[] = { "none", "vector", "normal", "point", "color",
"texcoord", "quat", "transform", "frame", "timecode",
"text", "appliedSchema", "primTypeName", "execution", "matrix",
"objectId", "bundle", "path", "instancedAttribute", "ancestorPrimTypeName",
"unknown" };
if (type <= AttributeRole::eUnknown)
{
return s << ognRoleNames[uint8_t(type)];
}
return s;
}
// Role names as used by USD, which are slightly different from the internal names used
inline std::string usdRoleName(const AttributeRole& type)
{
static const std::string usdRoleNames[] = { "none", "vector", "normal", "position", "color",
"texCoord", "quaternion", "transform", "frame", "timecode",
"text", "appliedSchema", "primTypeName", "execution", "matrix",
"objectId", "bundle", "path", "instancedAttribute", "ancestorPrimTypeName",
"unknown" };
if (type <= AttributeRole::eUnknown)
{
return usdRoleNames[uint8_t(type)];
}
return usdRoleNames[uint8_t(AttributeRole::eUnknown)];
}
struct Type
{
BaseDataType baseType; // 1 byte
// 1 for raw base types; 2 for vector2f, int2, etc; 3 for point3d, normal3f, etc;
// 4 for quatf, color4d, float4, matrix2f etc; 9 for matrix3f, etc; 16 for matrix4d, etc.
uint8_t componentCount; // 1 byte
// 0 for a single value
// 1 for an array
// 2 for an array of arrays (not yet supported)
uint8_t arrayDepth; // 1 byte
AttributeRole role; // 1 byte
constexpr Type(BaseDataType baseType, uint8_t componentCount = 1, uint8_t arrayDepth = 0, AttributeRole role = AttributeRole::eNone)
: baseType(baseType), componentCount(componentCount), arrayDepth(arrayDepth), role(role)
{
}
constexpr Type() : Type(BaseDataType::eUnknown)
{
}
// Matches little endian interpretation of the four bytes
constexpr explicit Type(const TypeC& t)
: baseType(BaseDataType(t.type & 0xff)),
componentCount((t.type >> 8) & 0xff),
arrayDepth((t.type >> 16) & 0xff),
role(AttributeRole((t.type >> 24) & 0xff))
{
}
constexpr explicit operator TypeC() const
{
uint32_t type = uint8_t(role) << 24 | arrayDepth << 16 | componentCount << 8 | uint8_t(baseType);
return TypeC{ type };
}
constexpr bool operator==(const Type& rhs) const
{
return compatibleRawData(rhs) && role == rhs.role;
}
constexpr bool operator!=(const Type& rhs) const
{
return !((*this) == rhs);
}
constexpr bool operator<(const Type& rhs) const
{
return TypeC(*this) < TypeC(rhs);
}
/**
* Role-insensitive equality check
*/
constexpr bool compatibleRawData(const Type& otherType) const
{
return baseType == otherType.baseType && componentCount == otherType.componentCount &&
arrayDepth == otherType.arrayDepth;
}
/**
* Check to see if this is one of the matrix types
*/
constexpr bool isMatrixType() const
{
return (role == AttributeRole::eMatrix) || (role == AttributeRole::eFrame) || (role == AttributeRole::eTransform);
}
/**
* Returns the dimensions of the type, componentCount for most types and square root of that for matrix types
*/
constexpr uint8_t dimension() const
{
if (isMatrixType())
{
return componentCount == 4 ? 2 : (componentCount == 9 ? 3 : (componentCount == 16 ? 4 : componentCount));
}
return componentCount;
}
std::string getTypeName() const
{
std::ostringstream typeName;
typeName << baseType;
if (componentCount > 1)
typeName << uint32_t(componentCount);
if (arrayDepth == 1)
typeName << "[]";
else if (arrayDepth == 2)
typeName << "[][]";
// Some roles are hidden from USD
if ((role != AttributeRole::eNone)
&& (role != AttributeRole::eObjectId)
&& (role != AttributeRole::eBundle)
&& (role != AttributeRole::ePath)
)
{
typeName << " (" << usdRoleName(role) << ")";
}
return typeName.str();
}
// ======================================================================
/**
* OGN formats the type names slightly differently.
* - the tuples are internal "float[3]" instead of "float3"
* - the roles replace the actual name "colord[3]" instead of "double3 (color)"
*/
std::string getOgnTypeName() const
{
std::ostringstream typeName;
if (role == AttributeRole::eText)
{
typeName << "string";
return typeName.str();
}
if (role == AttributeRole::ePath)
{
typeName << "path";
return typeName.str();
}
if (role != AttributeRole::eNone)
{
typeName << role;
// For roles with explicit types, add that to the role name
if ((role != AttributeRole::eTimeCode)
&& (role != AttributeRole::eTransform)
&& (role != AttributeRole::eFrame)
&& (role != AttributeRole::eObjectId)
&& (role != AttributeRole::eBundle)
&& (role != AttributeRole::eExecution))
{
switch (baseType)
{
case BaseDataType::eHalf:
typeName << "h";
break;
case BaseDataType::eFloat:
typeName << "f";
break;
case BaseDataType::eDouble:
typeName << "d";
break;
default:
typeName << baseType;
break;
}
}
}
else
{
typeName << baseType;
}
if (componentCount > 1)
{
typeName << "[" << uint32_t(dimension()) << "]";
}
if (arrayDepth == 1)
typeName << "[]";
else if (arrayDepth == 2)
typeName << "[][]";
return typeName.str();
}
};
inline std::ostream& operator<<(std::ostream& s, const Type& type)
{
s << type.getTypeName();
return s;
}
}
}
namespace std
{
template <>
struct hash<carb::flatcache::Type>
{
std::size_t operator()(const carb::flatcache::Type& key) const
{
return carb::flatcache::TypeC(key).type;
}
};
template <>
struct hash<carb::flatcache::TypeC>
{
std::size_t operator()(const carb::flatcache::TypeC& key) const
{
return key.type;
}
};
}
| 11,365 | C | 30.484764 | 139 | 0.560493 |
omniverse-code/kit/fabric/include/carb/flatcache/GetArrayD3dGpu.h | // Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <carb/Defines.h>
#include <carb/graphics/Graphics.h>
#include <omni/gpucompute/GpuCompute.h>
using namespace carb::graphics;
using std::unique_ptr;
namespace carb
{
namespace flatcache
{
// If this is an array-of-arrays:
// array.cpuData - array of CPU pointers on CPU
// gpuPointerArray->cpuData() - array of GPU pointers on CPU
inline void PathToAttributesMap::enableD3dGpuRead(MirroredArray& array,
const size_t* elemToArraySize,
MirroredArray* elemToArrayCpuCapacity,
MirroredArray* elemToArrayGpuCapacity,
MirroredArray* gpuPointerArray)
{
using omni::gpucompute::MemcpyKind;
bool& cpuValid = array.cpuValid;
bool& gpuValid = array.gpuValid;
bool& gpuAllocedWithCuda = array.gpuAllocedWithCuda;
uint8_t* cpuArray = array.cpuData();
uint8_t*& gpuArray = array.gpuArray;
if (gpuValid)
{
// Nothing to do
}
else if (!gpuValid && cpuValid)
{
const TypeC type = array.type;
const Typeinfo &typeInfo = getTypeInfo(type);
std::lock_guard<MirroredArray::AttributeMutex> hostDeviceLock(array.attributeMutex);
// If each element is an array, then they could be of different sizes
// So alloc and memcpy each one
if (typeInfo.isArray)
{
size_t elemCount = array.count;
uint8_t** elemToArrayCpuData = (uint8_t**)cpuArray;
uint8_t** elemToArrayGpuData = (uint8_t**)gpuPointerArray->cpuData();
for (size_t elem = 0; elem != elemCount; elem++)
{
// Make sure that the dest (GPU) buffer is large enough
const uint8_t* const& cpuData = elemToArrayCpuData[elem]; // src
size_t& destCapacity = reinterpret_cast<size_t*>(elemToArrayGpuCapacity->cpuData())[elem];
size_t desiredElemCount = elemToArraySize[elem];
destructiveResizeIfNecessaryGPU(
*gpuPointerArray, elem, destCapacity, desiredElemCount, typeInfo.arrayElemSize, platform.gpuD3dVk, platform.gpuD3dVkCtx);
// Copy from CPU to GPU
if (desiredElemCount != 0 && cpuData)
{
uint8_t*& gpuData = elemToArrayGpuData[elem]; // dest
size_t copyByteCount = desiredElemCount * typeInfo.arrayElemSize;
platform.gpuD3dVk->memcpy(*platform.gpuD3dVkCtx, gpuData, cpuData, copyByteCount, MemcpyKind::hostToDevice);
}
else if (desiredElemCount != 0 && !cpuData)
{
printf("Warning: GPU read access requested, CPU is valid but not allocated\n");
}
}
// We don't need to copy the outer array to GPU here.
// In D3dVk, the outer array is currently a CPU array of descriptors that we copy to
// a kernel calls descriptor set at dispatch time
}
else
{
// Copy the outer array from CPU to GPU
size_t byteCount = array.size();
allocGpuMemIfNecessary(array, byteCount, typeInfo.size, platform.gpuD3dVk, platform.gpuD3dVkCtx);
uint8_t* cpuArray = array.cpuData();
platform.gpuD3dVk->memcpy(*platform.gpuD3dVkCtx, gpuArray, cpuArray, byteCount, MemcpyKind::hostToDevice);
}
// New state
cpuValid = true;
gpuValid = true;
gpuAllocedWithCuda = false;
}
}
inline void PathToAttributesMap::enableD3dGpuWrite(PathToAttributesMap::MirroredArray& array,
const size_t* elemToArraySize,
PathToAttributesMap::MirroredArray* elemToArrayCpuCapacity,
PathToAttributesMap::MirroredArray* elemToArrayGpuCapacity,
PathToAttributesMap::MirroredArray* arrayGpuDataArray)
{
log("begin enableGpuWrite\n");
bool& usdValid = array.usdValid;
bool& cpuValid = array.cpuValid;
bool& gpuValid = array.gpuValid;
bool& gpuAllocedWithCuda = array.gpuAllocedWithCuda;
const TypeC type = array.type;
const Typeinfo &typeInfo = getTypeInfo(type);
std::lock_guard<MirroredArray::AttributeMutex> hostDeviceLock(array.attributeMutex);
if (!gpuValid)
{
size_t byteCount = array.size();
allocGpuMemIfNecessary(array, byteCount, typeInfo.size, platform.gpuD3dVk, platform.gpuD3dVkCtx);
// Array-valued elements are lazily allocated, meaning they are only
// resized when write access is requested.
// Write access has been requested, so resize if necessary
if (typeInfo.isArray)
{
size_t elemCount = array.count;
for (size_t elem = 0; elem != elemCount; elem++)
{
size_t& gpuCapacity = reinterpret_cast<size_t*>(elemToArrayGpuCapacity->cpuData())[elem];
size_t desiredElemCount = elemToArraySize[elem];
resizeIfNecessaryGPU(
*arrayGpuDataArray, elem, gpuCapacity, desiredElemCount, typeInfo.arrayElemSize, platform.gpuD3dVk, platform.gpuD3dVkCtx);
}
// Upload of allocated pointers to GPU happens outside this function
}
}
// New state
usdValid = false;
cpuValid = false;
gpuValid = true;
gpuAllocedWithCuda = false;
log("end enableGpuWrite\n\n");
}
inline const void* PathToAttributesMap::getArrayRdD3d(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix)
{
BucketId bucketId = findBucketId(bucket);
if (bucketId == kInvalidBucketId)
return nullptr;
// We don't set dirty indices here because this method gives read-only access
return getArraySpanC(bucketId, attrName, D3dVkReadConfig(), suffix).array.ptr;
}
inline void* PathToAttributesMap::getArrayWrD3d(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix)
{
BucketId bucketId = findBucketId(bucket);
if (bucketId == kInvalidBucketId)
return nullptr;
ArrayAndDirtyIndices array = getArraySpanC(bucketId, attrName, D3dVkReadWriteConfig(), suffix);
setArrayDirty(array);
return array.array.ptr;
}
inline void* PathToAttributesMap::getArrayD3d(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix)
{
BucketId bucketId = findBucketId(bucket);
if (bucketId == kInvalidBucketId)
return nullptr;
ArrayAndDirtyIndices array = getArraySpanC(bucketId, attrName, D3dVkReadWriteConfig(), suffix);
setArrayDirty(array);
return array.array.ptr;
}
#if 0
// If array of values, return the Buffer* that was returned by malloc
// If array of arrays, return array of Buffer* for each element array
inline std::pair<void**, size_t> PathToAttributesMap::getArrayD3d(const Bucket& bucket, const TokenC& attrName)
{
std::pair<void**, size_t> retval = { nullptr, 0 };
BucketImpl& bucketImpl = buckets[bucket];
auto iter = bucketImpl.arrays.find({ pxr::TfType(), TypeC(), attrName });
bool found = (iter != bucketImpl.arrays.end());
if (found)
{
bool isTag = (typeToInfo[iter->first.type].size == 0);
if (!isTag)
{
pxr::TfType type = iter->first.type;
size_t elemSize = typeToInfo[type].size;
size_t arrayElemSize = typeToInfo[type].arrayElemSize;
// Read enable must come before write enable
enableD3dGpuRead(iter->second, elemSize, arrayElemSize);
enableD3dGpuWrite(iter->second, elemSize, arrayElemSize);
retval.first = iter->second.d3dArrays.data();
retval.second = iter->second.d3dArrays.size();
}
else if (isTag)
{
// If is a tag, then array.data() will be zero, so set special value
// to distinguish from tag absent case
retval.first = (void**)-1;
}
}
return retval;
}
#endif
inline omni::gpucompute::GpuPointer PathToAttributesMap::getAttributeD3d(const PathC& path, const TokenC& attrName)
{
bool present; // Whether this path has a bucket
BucketId bucketId; // Pointer to the bucket if it does
size_t element; // Index corresponding to path in this bucket's arrays
std::tie(present, bucketId, element) = getPresentAndBucketAndElement(path);
if (!present)
return { nullptr, 0, 0 };
// TODO: Get rid of double hash lookup below (getArraySpanC + explicit call)
ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(bucketId, attrName, D3dVkReadWriteConfig());
setArrayElementDirty(arrayAndDirtyIndices, element);
void* array = arrayAndDirtyIndices.array.ptr;
if (array != nullptr)
{
// Get elemSize
const BucketImpl* bucketImplPtr = buckets.find(bucketId);
if (!bucketImplPtr)
return { nullptr, 0, 0 };
const AttrName name{ attrName, NameSuffix::none };
const MirroredArray *valuesArray;
if (!bucketImplPtr->scalarAttributeArrays.find(name, &valuesArray))
{
const ArrayAttributeArray *arrayAttributeArray;
if (bucketImplPtr->arrayAttributeArrays.find(name, &arrayAttributeArray))
{
valuesArray = &arrayAttributeArray->values;
}
else
{
return { nullptr, 0, 0 };
}
}
assert(valuesArray);
const Typeinfo &typeinfo = getTypeInfo(valuesArray->type);
const bool isArrayOfArray = typeinfo.isArray;
const size_t elemSize = typeinfo.size;
if (!isArrayOfArray)
{
return { array, element * elemSize, elemSize };
}
else if (isArrayOfArray)
{
// For arrays of arrays we return the Buffer* of the inner array
uint8_t* const* elemToArrayData = (uint8_t* const*)array;
return { elemToArrayData[element], 0, 0 };
}
}
return { nullptr, 0, 0 };
}
}
}
| 10,667 | C | 37.652174 | 142 | 0.626043 |
omniverse-code/kit/fabric/include/carb/flatcache/PathToAttributesMap.h | // Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <carb/Defines.h>
#include <carb/flatcache/AttrNameAndType.h>
#include <carb/flatcache/IdTypes.h>
#include <carb/flatcache/ApiLogger.h>
#include <carb/flatcache/HashMap.h>
#include <carb/flatcache/IFlatcache.h>
#include <carb/flatcache/IPath.h>
#include <carb/flatcache/IToken.h>
#include <carb/flatcache/Ordered_Set.h>
#include <carb/flatcache/Platform.h>
#include <carb/flatcache/PrimChanges.h>
#include <carb/flatcache/Type.h>
#include <carb/logging/Log.h>
#include <carb/profiler/Profile.h>
#include <carb/thread/Mutex.h>
#include <omni/gpucompute/GpuCompute.h>
#include <fstream>
// The following is needed to include USD headers
#if defined(__GNUC__)
# pragma GCC diagnostic push
# pragma GCC diagnostic ignored "-Wdeprecated-declarations"
# pragma GCC diagnostic ignored "-Wunused-local-typedefs"
# pragma GCC diagnostic ignored "-Wunused-function"
// This suppresses deprecated header warnings, which is impossible with pragmas.
// Alternative is to specify -Wno-deprecated build option, but that disables other useful warnings too.
# ifdef __DEPRECATED
# define OMNI_USD_SUPPRESS_DEPRECATION_WARNINGS
# undef __DEPRECATED
# endif
#endif
#include <pxr/base/tf/type.h>
#include <pxr/usd/usd/timeCode.h>
// PathToAttributesMap doesn't depend on USD for tokens or paths
// However, it's useful to be able to see the USD text representation of tokens and
// paths when debugging. Set ENABLE_USD_DEBUGGING to 1 to enable that.
#if defined(_DEBUG)
# define ENABLE_USD_DEBUGGING 1
#else
# define ENABLE_USD_DEBUGGING 0
#endif
// TODO: Move this to some shared macro header if needed elsewhere
#if defined(_DEBUG)
#define VALIDATE_TRUE(X) CARB_ASSERT(X)
#else
#define VALIDATE_TRUE(X) X
#endif
#define PTAM_SIZE_TYPE size_t
#define PTAM_SIZE_TYPEC (flatcache::TypeC(carb::flatcache::Type(carb::flatcache::BaseDataType::eUInt64)))
static_assert(sizeof(PTAM_SIZE_TYPE) == sizeof(uint64_t), "Unexpected sizeof size_t");
#define PTAM_POINTER_TYPE void*
#define PTAM_POINTER_TYPEC (flatcache::TypeC(carb::flatcache::Type(carb::flatcache::BaseDataType::eUInt64)))
static_assert(sizeof(PTAM_POINTER_TYPE) == sizeof(uint64_t), "Unexpected sizeof void*");
// When we switch to CUDA async CPU<->GPU copies, we'll need to use pinned CPU
// memory for performance. However, the allocations themselves will be much
// slower. If you want to see how much slower, set USE_PINNED_MEMORY to 1.
// When we do switch, we should probably do a single allocation and sub
// allocate it ourselves. That way we'd only call cudaHostAlloc once.
#define USE_PINNED_MEMORY 0
// Set this to one to enable CARB profile zones for large bucket copies
#define PROFILE_LARGE_BUCKET_COPIES 0
// We plan to move TfToken and AssetPath construction to IToken.
// Until we do we have to depend on token.h, a USD header
#include "FlatCacheUSD.h"
#include <pxr/usd/sdf/pathTable.h> // 104 only - do not port this to 105+
// Enums are in their own file since they have no external dependencies
#include "Enums.h"
#include <pxr/base/tf/token.h>
#include <pxr/usd/sdf/path.h>
#include <algorithm>
#include <iostream>
#include <iterator>
#include <map>
#include <queue>
#include <set>
#include <unordered_map>
#include <utility>
using pxr::UsdTimeCode;
using Hierarchy = pxr::SdfPathTable<int>; // 104 only - do not port this to 105+
namespace carb
{
namespace flatcache
{
struct AttrName
{
TokenC name;
NameSuffix suffix;
bool operator<(const AttrName& other) const = delete;
bool operator==(const AttrName& other) const = delete;
};
// Use the same hash_combine as boost
template <class T>
static inline size_t hash_combine(std::size_t seed, const T& v)
{
std::hash<T> hasher;
seed ^= hasher(v) + 0x9e3779b9 + (seed << 6) + (seed >> 2);
return seed;
}
}
}
namespace carb
{
namespace flatcache
{
inline std::string toString(NameSuffix suffix)
{
if (suffix == NameSuffix::connection)
return "_connection";
else if (suffix == NameSuffix::none)
return "";
return "";
}
inline std::ostream& operator<<(std::ostream& s, const NameSuffix& nameSuffix)
{
s << toString(nameSuffix);
return s;
}
// FlatCache buckets UsdPrims according to their type and the UsdAttributes
// they have. For example, all the UsdGeomMeshes can go in one bucket, all the
// UsdSkelAnimations in another. The user can then quickly get a contiguous
// array of all the meshes, without having to traverse the whole stage.
//
// The name of a bucket is its set of attribute names (Tokens).
// As in the USD API, these tokens are the names used in USDA files, not C++
// type names like UsdGeomMesh.
//
// Example (using C++11 initializer lists to create the sets):
// Bucket meshes = { Token("Mesh") };
// Bucket skelAnimations = { Token("SkelAnimation") };
//
// For efficiency, set is an ordered c++ array, not std::set
using Bucket = set<AttrNameAndType>;
struct BucketAndId
{
const Bucket bucket;
const BucketId bucketId;
};
// Buckets store attribute values in contiguous arrays, and in C++ array
// indices are size_t
using ArrayIndex = size_t;
const ArrayIndex kInvalidArrayIndex = 0xffff'ffff'ffff'ffff;
// Invariants:
// I0: setOfChangedIndices = [0..N), if allIndicesChanged
// = changedIndices, otherwise
// I1: If setOfChangedIndices==[0..N) then allIndicesChanged=true and changedIndices = {}
// where N is defined by the caller
//
// In particular this means that changedIndices can't have size N
// because if all indices were changed, then changedIndices = {}, by I1
struct ChangedIndicesImpl
{
bool allIndicesChanged = true;
flatcache::set<ArrayIndex> changedIndices;
ChangedIndicesImpl(size_t N)
{
if (N == 0)
allIndicesChanged = true;
else
allIndicesChanged = false;
}
// Create the singleton set {index}
ChangedIndicesImpl(ArrayIndex index, size_t N)
{
if (index == 0 && N == 1)
{
// Maintain invariant I0
allIndicesChanged = true;
}
else
{
// Maintain invariant I0
changedIndices = { index };
allIndicesChanged = false;
}
}
void dirtyAll()
{
// Maintain invariant I1
allIndicesChanged = true;
changedIndices.clear();
}
void insert(size_t index, size_t N)
{
CARB_ASSERT(index < N);
// If all indices already changed, then inserting an index has no
// effect
if (allIndicesChanged)
return;
changedIndices.insert(index);
// Maintain invariant I1
if (changedIndices.size() == N)
{
dirtyAll();
}
}
void decrementN(size_t newN)
{
if (allIndicesChanged)
return;
changedIndices.erase(newN);
if (changedIndices.size() == newN)
allIndicesChanged = true;
}
void erase(size_t index, size_t N)
{
CARB_ASSERT(index < N);
if (allIndicesChanged)
{
allIndicesChanged = false;
// Make a sorted list of integers [0..N) \ index
changedIndices.v.resize(N - 1);
size_t dest = 0;
for (size_t i = 0; i != index; i++)
{
changedIndices.v[dest++] = i;
}
for (size_t i = index + 1; i != N; i++)
{
changedIndices.v[dest++] = i;
}
return;
}
changedIndices.erase(index);
}
bool contains(size_t index)
{
if (allIndicesChanged)
return true;
return changedIndices.contains(index);
}
};
struct ArrayAndDirtyIndices
{
SpanC array; // We use SpanC instead of gsl::span<const uint8_t> to allow casting to array of correct type
std::vector<ChangedIndicesImpl*> changedIndicesForEachListener; // This is empty if change tracking is not enabled for this attribute
};
// Bucket vectors and their attribute arrays are public, so users can iterate
// over them directly using for loops.
// For users that prefer opaque iterators, we provide View.
struct View;
// FlatCache doesn't need all metadata in UsdAttribute, just the attribute's
// type, size in bytes, whether it is an array, and if it is an array, the
// size of each elements in bytes
struct Typeinfo
{
size_t size;
bool isArray;
size_t arrayElemSize;
};
// FlatCache stores a map from attribute names (Tokens) to their type and
// size.
using TypeToInfo = HashMap<TypeC, Typeinfo, std::hash<TypeC>, std::equal_to<TypeC>, AllocFunctor, FreeFunctor>;
// By default, an attribute's value is not in the cache, and flags == eNone
// Once the user reads a value, ePresent is true
// Once the user writes a value, eDirty is true
enum class Flags
{
eNone = 0,
ePresent = 1,
eDirty = 2
};
// Operators for combining Flags
constexpr enum Flags operator|(const enum Flags a, const enum Flags b)
{
return (enum Flags)(uint32_t(a) | uint32_t(b));
}
constexpr enum Flags operator&(const enum Flags a, const enum Flags b)
{
return (enum Flags)(uint32_t(a) & uint32_t(b));
}
struct BucketChangesImpl
{
// Which attributes changed
gsl::span<const AttrNameAndType> changedAttributes;
// For each attribute, which prims changed?
std::vector<ConstChangedIndicesC> changedIndices;
gsl::span<const Path> pathArray;
// Which indices contain newly added prims?
gsl::span<const size_t> addedIndices;
};
struct PrimBucketListImpl
{
flatcache::set<BucketId> buckets;
std::vector<BucketChangesImpl> changes;
void clear()
{
buckets.clear();
changes.clear();
}
};
using SerializationCache = HashMap<uint64_t, std::string>;
using DeserializationCache = HashMap<std::string, pxr::SdfPath>;
// Now we've defined the basic types, we can define the type of FlatCache.
//
// Abstractly, FlatCache maps each Path to the UsdAttributes of the UsdPrim
// at that path. So the type of FlatCache is "PathToAttributesMap".
struct PathToAttributesMap
{
struct MirroredArray
{
private:
std::vector<uint8_t> cpuArray;
public:
Platform& platform;
TypeC type;
Typeinfo typeinfo;
uint8_t* gpuArray;
size_t gpuCapacity; // Amount of memory allocated at gpuArray in bytes
std::vector<void*> d3dArrays; // Actually vector of Buffer*
size_t count;
bool usdValid;
bool cpuValid;
bool gpuValid;
bool gpuAllocedWithCuda;
using AttributeMutex = carb::thread::mutex;
AttributeMutex attributeMutex;
MirroredArray(Platform& platform_, const TypeC &type, const Typeinfo& typeinfo) noexcept;
~MirroredArray();
MirroredArray(const MirroredArray& other) = delete;
MirroredArray& operator=(const MirroredArray& other) noexcept;
MirroredArray(MirroredArray&& other) noexcept;
MirroredArray& operator=(MirroredArray&& other) noexcept;
friend void swap(MirroredArray& a, MirroredArray& b) noexcept;
inline bool isArrayOfArray() const
{
CARB_ASSERT((typeinfo.arrayElemSize != 0) == typeinfo.isArray);
return typeinfo.isArray;
}
inline MirroredArray* getValuesArray()
{
return this;
}
void resize(size_t byteCount)
{
// CPU
// At the moment, CPU always resizes, but eventually it will only
// resize if it is allocated and valid
// This will ensure that GPU temp data is never allocated on CPU
cpuArray.resize(byteCount);
// Don't need to resize GPU here, because it is deferred until next
// copy to/from GPU mem
}
// GPU resize that preserves existing contents
// This is used by addPath and addAttributes
void resizeGpu(omni::gpucompute::GpuCompute* computeAPI,
omni::gpucompute::Context* computeCtx,
size_t byteCount,
size_t elemSize)
{
if (!computeAPI || !computeCtx)
return;
bool capacitySufficient = (byteCount <= gpuCapacity);
if (!capacitySufficient)
{
void* oldGpuArray = gpuArray;
size_t oldByteCount = gpuCapacity;
gpuArray = reinterpret_cast<uint8_t*>(computeAPI->mallocAsync(*computeCtx, byteCount, elemSize));
gpuCapacity = byteCount;
if (oldGpuArray)
{
using omni::gpucompute::MemcpyKind;
computeAPI->memcpyAsync(*computeCtx, gpuArray, oldGpuArray, oldByteCount, MemcpyKind::deviceToDevice);
computeAPI->freeAsync(*computeCtx, oldGpuArray);
}
}
}
size_t size() const
{
return cpuArray.size();
}
uint8_t* cpuData()
{
return cpuArray.data();
}
const uint8_t* cpuData() const
{
return cpuArray.data();
}
void clear()
{
cpuArray.clear();
}
};
using ScalarAttributeArray = MirroredArray;
struct ArrayAttributeArray
{
enum class MirroredArrays : uint8_t
{
Values,
ElemCounts,
CpuElemCounts,
GpuElemCounts,
GpuPtrs,
Count
};
ArrayAttributeArray(Platform& platform_, const TypeC& type, const Typeinfo &typeinfo) noexcept;
~ArrayAttributeArray();
ArrayAttributeArray(const ArrayAttributeArray& other) = delete;
ArrayAttributeArray& operator=(const ArrayAttributeArray& other) noexcept;
ArrayAttributeArray(ArrayAttributeArray&& other) noexcept;
ArrayAttributeArray& operator=(ArrayAttributeArray&& other) noexcept;
friend void swap(ArrayAttributeArray& a, ArrayAttributeArray& b) noexcept;
inline MirroredArray* getValuesArray()
{
return &values;
}
MirroredArray values;
MirroredArray elemCounts;
MirroredArray cpuElemCounts;
MirroredArray gpuElemCounts;
MirroredArray gpuPtrs;
};
// DO NOT generalize this static_assert using globally named defines for magic numbers.
// We intentionally sprinkle static_assert on hardcoded sizes around this file to increase friction when changing
// the struct definition. Any change to ArrayAttributeArray requires evaluating multiple locations that rely on
// keeping in sync with the struct. Having each of these be hardcoded comparions forces future authors to
// individually evaluate each dependent site for correctness. If the comparison is generalized, future authors could
// simply adjust the global definition without examining every dependent routine, which might lead to errors.
static_assert(sizeof(ArrayAttributeArray) == 5 * sizeof(MirroredArray), "ArrayAttributeArray has unexpected size");
struct Changes
{
// changedAttributes and changesIndices together implement an ordered
// map, from attribute to changed indices.
// changedAttributes is a flatcache::set, which is a sorted std::vector.
//
// To lookup an element of the map, find the index, i, of the key in
// changedAttributes, then read the value from changedIndices[i]
//
// changedAttributes and changedIndices must have the same size.
//
// TODO: make a general ordered_map class based on flatcache::set
flatcache::set<AttrNameAndType> changedAttributes;
std::vector<ChangedIndicesImpl> changedIndices;
//
// New elements are stored in a set
//
flatcache::set<ArrayIndex> addedIndices;
void setDirty(const AttrNameAndType& nameAndType, size_t index, size_t maxIndex)
{
auto& keys = changedAttributes.v;
auto& values = changedIndices;
auto insertIter = lower_bound(keys.begin(), keys.end(), nameAndType);
ptrdiff_t insertIndex = insertIter - keys.begin();
bool found = (insertIter != keys.end() && !(nameAndType < *insertIter));
if (found)
{
values[insertIndex].insert(index, maxIndex);
}
else
{
keys.insert(insertIter, nameAndType);
values.insert(values.begin() + insertIndex, ChangedIndicesImpl(index, maxIndex));
}
}
void dirtyAll(const AttrNameAndType& nameAndType, size_t maxIndex)
{
auto& keys = changedAttributes.v;
auto& values = changedIndices;
auto insertIter = lower_bound(keys.begin(), keys.end(), nameAndType);
ptrdiff_t insertIndex = insertIter - keys.begin();
bool found = (insertIter != keys.end() && !(nameAndType < *insertIter));
if (found)
{
values[insertIndex].dirtyAll();
}
else
{
keys.insert(insertIter, nameAndType);
ChangedIndicesImpl changedIndices(maxIndex);
changedIndices.dirtyAll();
values.insert(values.begin() + insertIndex, changedIndices);
}
}
void addNewPrim(size_t index)
{
addedIndices.insert(index);
}
void removePrim(size_t index)
{
//
// we just clean the index from the set
//
addedIndices.erase(index);
}
size_t getNewPrimCount()
{
return addedIndices.size();
}
};
// FlatCache buckets UsdPrims according to type and attributes, and
// BucketImpl stores the attribute values of a bucket's prims in
// structure-of-arrays (SOA) format.
// BucketImpl maps each attribute name (TokenC) to a MirroredArray, a
// contiguous byte array (vector<uint8_t>) and a bitfield encoding the
// validate/dirtiness of each mirror.
// Abstractly, flatcache data is addressed like a multidimensional array
// buckets[bucket][attributeName][path].
// FlatCache uses byte arrays instead of typed arrays, because USD files,
// scripts, and plugins can define custom types, so no dll or exe knows the
// complete set of types at the time of its compilation.
// BucketImpl also contains elemToPath to map each SOA element to the
// Path it came from.
struct BucketImpl
{
struct Hasher
{
size_t operator()(const AttrName& key) const
{
return hash_combine(hash(key.name), uint32_t(key.suffix));
}
};
struct KeyEqual
{
bool operator()(const AttrName& a, const AttrName& b) const
{
return (a.name == b.name) && (a.suffix == b.suffix);
}
};
using ScalarAttributeArrays = HashMap<AttrName, ScalarAttributeArray, Hasher, KeyEqual, AllocFunctor, FreeFunctor>;
using ArrayAttributeArrays = HashMap<AttrName, ArrayAttributeArray, Hasher, KeyEqual, AllocFunctor, FreeFunctor>;
Platform& platform;
ScalarAttributeArrays scalarAttributeArrays;
ArrayAttributeArrays arrayAttributeArrays;
std::vector<pxr::SdfPath> elemToPath;
// listenerIdToChanges entries are lazily created when the user changes
// an attribute, or when an attribute moves between buckets
HashMap<ListenerId, Changes, ListenerIdHasher, std::equal_to<ListenerId>, AllocFunctor, FreeFunctor> listenerIdToChanges;
template<typename CallbackT>
void forEachValueArray(CallbackT callback);
BucketImpl(Platform &platform_)
: platform(platform_)
, scalarAttributeArrays(0, Hasher(), KeyEqual(), AllocFunctor{ &platform.allocator }, FreeFunctor{ &platform.allocator })
, arrayAttributeArrays(0, Hasher(), KeyEqual(), AllocFunctor{ &platform.allocator }, FreeFunctor{ &platform.allocator })
, elemToPath()
, listenerIdToChanges(0, ListenerIdHasher(), std::equal_to<ListenerId>(), AllocFunctor{ &platform.allocator }, FreeFunctor{ &platform.allocator })
{
}
~BucketImpl()
{
#if PROFILE_LARGE_BUCKET_COPIES
size_t count = elemToPath.size();
carb::profiler::ZoneId zoneId = carb::profiler::kUnknownZoneId;
if (1000 <= count)
zoneId = CARB_PROFILE_BEGIN(1, "Destroy Bucket %zu", count);
arrays.clear();
elemToPath.clear();
if (1000 <= count)
CARB_PROFILE_END(1, zoneId);
#endif // #if PROFILE_LARGE_BUCKET_COPIES
}
BucketImpl(const BucketImpl&) = delete;
inline BucketImpl& operator=(const BucketImpl& other) noexcept
{
#if PROFILE_LARGE_BUCKET_COPIES
size_t count = other.elemToPath.size();
carb::profiler::ZoneId zoneId = carb::profiler::kUnknownZoneId;
if (1000 <= count)
zoneId = CARB_PROFILE_BEGIN(1, "Copy Bucket %zu", count);
#endif // #if PROFILE_LARGE_BUCKET_COPIES
scalarAttributeArrays.clear();
scalarAttributeArrays.reserve(other.scalarAttributeArrays.size());
other.scalarAttributeArrays.forEach([this](const AttrName& name, const ScalarAttributeArray &otherArray) {
// construct new array with the current BucketImpl platform, but mimicing type of otherArray
ScalarAttributeArray *array;
scalarAttributeArrays.allocateEntry(name, &array);
new (array) ScalarAttributeArray(platform, otherArray.type, otherArray.typeinfo);
*array = otherArray;
});
arrayAttributeArrays.clear();
arrayAttributeArrays.reserve(other.arrayAttributeArrays.size());
other.arrayAttributeArrays.forEach([this](const AttrName& name, const ArrayAttributeArray &otherArray) {
// construct new array with the current BucketImpl platform, but mimicing type of otherArray
ArrayAttributeArray *array;
arrayAttributeArrays.allocateEntry(name, &array);
new (array) ArrayAttributeArray(platform, otherArray.values.type, otherArray.values.typeinfo);
*array = otherArray;
});
elemToPath = other.elemToPath;
listenerIdToChanges.clear();
listenerIdToChanges.reserve(other.listenerIdToChanges.size());
other.listenerIdToChanges.forEach([this](const ListenerId& listener, const Changes &otherChanges) {
Changes* changes;
VALIDATE_TRUE(listenerIdToChanges.allocateEntry(listener, &changes));
static_assert(std::is_copy_constructible<Changes>::value, "Expected listenerIdToChanges values to be copy-constructible");
new (changes) Changes(otherChanges);
});
bucket = other.bucket;
#if PROFILE_LARGE_BUCKET_COPIES
if (1000 <= count)
CARB_PROFILE_END(1, zoneId);
#endif // #if PROFILE_LARGE_BUCKET_COPIES
return *this;
}
BucketImpl(BucketImpl&& other) noexcept = delete;
inline BucketImpl& operator=(BucketImpl&& other) noexcept
{
// this->platform = std::move(b.platform); // intentionally not move-assigning platform (we can't anyways, it's a ref)
this->scalarAttributeArrays = std::move(other.scalarAttributeArrays);
this->arrayAttributeArrays = std::move(other.arrayAttributeArrays);
this->elemToPath = std::move(other.elemToPath);
this->listenerIdToChanges = std::move(other.listenerIdToChanges);
this->bucket = std::move(other.bucket);
return *this;
}
const Bucket& GetBucket() const
{
return bucket;
}
//
// TODO: In the future we should support universal ref +
// move assignments, unforuntately Bucket doesn't follow
// the rule of 5 so that is unavailable to us currently.
//
void SetBucket(const Bucket& _bucket)
{
bucket = _bucket;
}
private:
//
// bucketImpl knows the Bucket it represents
//
Bucket bucket;
};
/**
* @struct BucketIdToImpl
*
* @brief Convenience data structure for quick bucket lookups
*
* @details We want to avoid the cost of hashmap lookups when possible
* due to the large number of times that elements are looked
* up via a single element lookup. This class creates a static vector
* to track buckets. It also will keep the array densely packed
* as possible, while not incurring the cost of moves
*
* @notes 1) NOT threadsafe
*
* @todo possibly store the last and first valid so one can avoid un-needed
* iteration
* @todo provide an iterator
* @todo make deleting buckets move lastFreeSlot back where approriate so that
* it is always meaningful as "end()"
*/
struct BucketIdToImpl
{
// A reasonable first size for the number of buckets
// If one changed this number, they would have to update
// the constants in the unit tests "Check Bucket Growth"
static const int max_buckets_init = 1024;
/**
* @brief Initialize storage to the minimum size
* We rely on C++ behavior that default initalizes the
* std::vector bool to false for valid tracking
*
*/
BucketIdToImpl(Platform& platform)
: platform(platform)
, buckets(max_buckets_init)
, lastEmptySlot{ 0 }
{
}
~BucketIdToImpl()
{
clear();
}
/**
* @brief mimic emplace function for stl objects
*
* @details This allows us to do move of buckets after they have been
* created into the storage. We choose for backwards compatibility
* with other APS to return a pair. It takes care of the correct place
* to store the bucket for you to keep the storage as dense as possible.
* This claims the next bucket, and will move out of
* @param bucketImpl a new bucket to be added to the Storage, empty after function call
* due to move.
*
* @return <BucketId, BucketImpl&> the pair representing the Id for lookup of the bucket
* and a reference to the data since it was moved from param
*
*/
std::pair<BucketId, BucketImpl&> emplace(BucketImpl* bucketImpl)
{
CARB_ASSERT(bucketImpl);
BucketId bucketId = ClaimNextOpenBucket();
if (buckets[size_t(bucketId)])
{
platform.allocator.delete_(buckets[size_t(bucketId)]);
}
buckets[size_t(bucketId)] = std::move(bucketImpl);
return std::pair<BucketId, BucketImpl&>(bucketId, *buckets[(size_t)bucketId]);
}
/**
* @brief Erase the specified bucket
*
* @details This actually forces deletion of the object that is to be deleted it adds the
* id to the list of free buckets so that it will be recycled before more are
* added to the end
*
* @param id : The id of the bucket to be deleted
*/
void erase(BucketId id)
{
if (size_t(id) < buckets.size() && buckets[size_t(id)])
{
// Ignoring optimization of if last slot is empty
freeSlots.push(id);
platform.allocator.delete_(buckets[size_t(id)]);
buckets[size_t(id)] = nullptr;
}
}
// Find the bucket at the requested slot, if no bucket exists
// then we return
/**
* @brief Find the bucket if it exists
*
* @param id : The id of the bucket to be found
*
* @return If the bucket exists a pointer is return, otherwise a null pointer is returned
*/
BucketImpl* find(BucketId id)
{
if (size_t(id)< buckets.size())
return buckets[size_t(id)];
return nullptr;
}
/**
* @brief Find the bucket if it exists (const)
*
* @param id : The id of the bucket to be found
*
* @return If the bucket exists a cosnt pointer is return, otherwise a null pointer is returned
*/
const BucketImpl* find(BucketId slot) const
{
if (size_t(slot) < buckets.size())
return buckets[size_t(slot)];
return nullptr;
}
/**
* @brief Clear all the buckets
*
* @details This will force deletion of all the nodes and make the storage appear empty
*
*/
void clear()
{
for (uint64_t i = 0; i < size_t(lastEmptySlot); ++i)
{
if (buckets[i])
{
platform.allocator.delete_(buckets[i]);
buckets[i] = nullptr;
}
}
// no clear in std::queue so we swap with a new one
std::queue<BucketId>().swap(freeSlots);
lastEmptySlot.id = 0;
}
/**
* @brief get the possible end of the storage
*
* @details This will return the last possible id for a bucket, however
* should be combined with valid to be carefule
*
* @return The last "allocated bucket" but could not be valid
*/
size_t end() const
{
return size_t(lastEmptySlot);
}
/**
* @brief Support copy assignment.
*
* @note In order for this to be a valid copy it must be followed up
* by a call to PathToAttributesMap::BucketImplCopyArrays to
* correctly copy array-of-array data.
*
* @return Copy constructed buckets, without array-of-arrays set up
*/
BucketIdToImpl& operator=(const BucketIdToImpl& other)
{
// Array of free slots
this->freeSlots = other.freeSlots;
// Track the last empty slot
this->lastEmptySlot = other.lastEmptySlot;
//
// A bucketImpl is a struct that mainly contains a map
// to arrays which are of the data type MirroredArray.
// A MirroredArray has two states
// (1) It contains an array of data
// (2) It contains an array of arrays.
// In the case of (2) the array itself doesn't have enough
// information to make a copy of the array of arrays, so the
// copy constructor is overloaded and the structure around the
// array of arrays is loaded, but the actual copying of that data is
// pushed off to be done by the function BucketImplCopyArrays
// which is a member of PathToAttributesMap which is the only place
// that currently has enough information to make the copy
//
this->buckets.resize(other.buckets.size());
for (size_t i = 0; i < this->buckets.size(); ++i)
{
if (other.buckets[i])
{
if (!this->buckets[i])
{
this->buckets[i] = platform.allocator.new_<BucketImpl>(platform);
}
*this->buckets[i] = *other.buckets[i];
}
else if (this->buckets[i])
{
platform.allocator.delete_(this->buckets[i]);
this->buckets[i] = nullptr;
}
}
return *this;
}
/**
* @brief Resize the internals
*
* @details Note that resize will only grow, calling with a smaller size is a no-op
*
*/
void resize(size_t newSize)
{
if (newSize > buckets.size()) {
buckets.resize(newSize);
}
}
/**
* @brief Claim a bucket by index, this means that the pointer will be
* returned regardless of valid, and that it will mark as valid
* and update internals where needed. In the case where an index is
* requested that is past the allocated then more memory is allocated
*
* @note This should be used sparingly, generally it is intended to be used
* in the case where we are re-constructing one flat cache from another,
* if things are done out of order it could be expensize, also it is assumed
* all external mappings are maintained by the claimer
*
* @param id : The id of the bucket to be claimed
*
* @return A reference to the bucketImpl that was claimed
*/
BucketImpl& claim(BucketId id)
{
// grow if needed
if (size_t(id)>= buckets.size()) {
resize(size_t(id)+ 1);
}
// if the bucket is already valid we can just return access to it
if (!buckets[size_t(id)])
{
// otherwise we need to update accordingly
while (lastEmptySlot <= id) {
if (!buckets[size_t(lastEmptySlot)]) {
freeSlots.push(lastEmptySlot);
}
++lastEmptySlot;
}
buckets[size_t(id)] = platform.allocator.new_<BucketImpl>(platform);
}
CARB_ASSERT(buckets[size_t(id)]);
return *buckets[size_t(id)];
}
template<typename CallbackT>
void forEachValidBucket(CallbackT callback) const;
private:
/**
* @brief Get the next open bucket
*
* @note May invalidate references to existing buckets
*
*/
BucketId ClaimNextOpenBucket()
{
BucketId slot = lastEmptySlot;
if (freeSlots.size() != 0)
{
slot = freeSlots.front();
freeSlots.pop();
}
else
{
++lastEmptySlot;
if (size_t(lastEmptySlot) == buckets.size())
{
std::vector<BucketImpl*> newVector;
newVector.resize(buckets.size() * 2);
//
// A bucketImpl is a struct that mainly contains a map
// to arrays which are of the data type MirroredArray.
// A MirroredArray has two states
// (1) It contains an array of data
// (2) It contains an array of arrays.
// In the case of (2) the array itself doesn't have enough
// information to make a copy of the array of arrays, so the
// copy constructor is overloaded and the structure around the
// array of arrays is loaded, but the actual copying of that data is
// pushed off to be done by the function BucketImplCopyArrays
// which is a member of PathToAttributesMap which is the only place
// that currently has enough information to make the copy
//
// Since we cannot guarantee that someone will know to call
// the copyArrays function we enforce that data must be moved here.
//
for (size_t i = 0; i < buckets.size(); ++i)
{
newVector[i] = std::move(buckets[i]);
}
std::swap(newVector, buckets);
}
}
CARB_ASSERT(!buckets[size_t(slot)]);
buckets[size_t(slot)] = platform.allocator.new_<BucketImpl>(platform);
return slot;
}
Platform& platform;
// array of bucket impls
std::vector<BucketImpl*> buckets;
// Array of free slots
std::queue<BucketId> freeSlots;
// Track the last empty slot
BucketId lastEmptySlot;
};
// Internally we convert Paths to uint64_t path ids using asInt().
// PathId is the domain of pathToBucketElem, defined below.
using PathId = PathC;
Platform& platform;
// Concretely, FlatCache is the following three maps:
// 1) Each path maps to a bucket, and an SOA index within that bucket.
// This level of indirection allows the user to delete prims and/or
// attributes without creating holes in the SOAs. Whenever the user
// deletes a prim, the cache moves the last SOA element to the deleted
// element, and updates the path to element map of the moved SOA element.
// 2) Buckets (sets of attribute names) map to BucketImpls, defined above.
// This allows the user to quickly get e.g. arrays of all the meshes,
// or all the meshes that have rigid body attributes.
// 3) pxr::TfType names map to TypeInfos, containing attribute type and size in bytes.
HashMap<PathId, std::pair<BucketId, ArrayIndex>, std::hash<PathId>, std::equal_to<PathId>, AllocFunctor, FreeFunctor> pathToBucketElem;
BucketIdToImpl buckets;
std::map<Bucket, BucketId> attrNameSetToBucketId;
// Each listener has its own attrNamesToLog and enableChangeTracking
struct ChangeTrackerConfig
{
set<TokenC> attrNamesToLog;
bool changeTrackingEnabled = true;
};
HashMap<ListenerId, ChangeTrackerConfig, ListenerIdHasher, std::equal_to<ListenerId>, AllocFunctor, FreeFunctor> listenerIdToChangeTrackerConfig;
TypeToInfo typeToInfo;
UsdStageId usdStageId;
bool minimalPopulationDone = false; // 104 only - do not port this forward to 105+
Hierarchy stageHierarchy; // 104 only - do not port this forward to 105+
mutable bool apiLogEnabled = false;
// The rest of PathToAttributesMap is methods
size_t size();
void clear();
void printMirroredArray(const char* const label, const ScalarAttributeArray &array, const size_t* const arrayElemCount) const;
void print() const;
// Void* multiple attribute interface
void getArraysRdC(const void** attrsOut, const Bucket& bucket, const TokenC* attrNames, size_t attrCount);
void getAttributesRdC(const void** attrsOut, const PathC* paths, const TokenC* attrNames, size_t attrCount);
void getAttributesRdGpuC(const void** attrsOut, const PathC* paths, const TokenC* attrNames, size_t attrCount, PtrToPtrKind ptrToPtrKind);
void getArraysWrC(void** attrsOut, const Bucket& bucket, const TokenC* attrNames, size_t attrCount);
void getAttributesWrC(void** attrsOut, const PathC& path, const TokenC* attrNames, size_t attrCount);
void getAttributesWrGpuC(void** attrsOut, const PathC& path, const TokenC* attrNames, size_t attrCount, PtrToPtrKind ptrToPtrKind);
// Span interface
SpanC getArraySpanC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix = NameSuffix::none);
ConstSpanC getArraySpanRdC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix = NameSuffix::none);
SpanC getArraySpanWrC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix = NameSuffix::none);
// Void* interface
void addAttributeC(
const PathC& path, const TokenC& attrName, TypeC type, const void* value = nullptr);
void addArrayAttributeC(
const PathC& path, const TokenC& attrName, TypeC type, const void* value, const size_t arrayElemCount);
void addAttributesToPrim(
const PathC& path, const std::vector<TokenC>& attrNames, const std::vector<TypeC>& typeCs);
void addAttributeC(const PathC& path,
const TokenC& attrName,
NameSuffix suffix,
TypeC type,
const void* value = nullptr);
void addArrayAttributeC(const PathC& path,
const TokenC& attrName,
NameSuffix suffix,
TypeC type,
const void* value,
const size_t arrayElemCount);
void* getArrayC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix = NameSuffix::none);
SpanC getAttributeC(const PathC& path, const TokenC& attrName, NameSuffix suffix = NameSuffix::none);
const void* getArrayRdC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix = NameSuffix::none);
ConstSpanC getAttributeRdC(const PathC& path, const TokenC& attrName, NameSuffix suffix = NameSuffix::none);
void* getArrayWrC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix = NameSuffix::none);
SpanC getAttributeWrC(const PathC& path, const TokenC& attrName, NameSuffix suffix = NameSuffix::none);
SpanC getOrCreateAttributeWrC(const PathC& path, const TokenC& attrName, TypeC type);
// Type safe interface
template <typename T>
void addAttribute(const PathC& path, const TokenC& attrName, TypeC type, const T& value);
template <typename T>
void addSubAttribute(const PathC& path, const TokenC& attrName, NameSuffix suffix, TypeC type, const T& value);
template <typename T>
T* getArray(const Bucket& bucket, const TokenC& attrName);
template <typename T>
T* getAttribute(const PathC& path, const TokenC& attrName);
template <typename T>
const T* getArrayRd(const Bucket& bucket, const TokenC& attrName);
template <typename T>
const T* getArrayRd(BucketId bucketId, const TokenC& attrName);
template <typename T>
const T* getAttributeRd(const PathC& path, const TokenC& attrName);
template <typename T>
T* getArrayWr(const Bucket& bucket, const TokenC& attrName);
template <typename T>
T* getAttributeWr(const PathC& path, const TokenC& attrName);
template <typename T>
T* getAttributeWr(const PathC& path, const TokenC& attrName, NameSuffix suffix);
void removeAttribute(const PathC& path, const TokenC& attrName);
void removeSubAttribute(const PathC& path, const TokenC& attrName, NameSuffix suffix);
/** @brief Destroy all attributes with matching names from prim at given path
*
* @path[in] Path to the prim holding the attribute
* @attrNames[in] Attribute name array
*
*/
void removeAttributesFromPath(const PathC& path, const std::vector<TokenC>& attrNames);
ValidMirrors getAttributeValidBits(
const PathC& path,
const TokenC& attrName,
ArrayAttributeArray::MirroredArrays subArray = ArrayAttributeArray::MirroredArrays::Values) const;
// Accessors for element count of array attributes
size_t* getArrayAttributeSize(const PathC& path, const TokenC& attrName);
const size_t* getArrayAttributeSizeRd(const PathC& path, const TokenC& attrName);
size_t* getArrayAttributeSizeWr(const PathC& path, const TokenC& attrName);
size_t* getArrayAttributeSizes(const Bucket& bucket, const TokenC& attrName);
const size_t* getArrayAttributeSizesRd(const Bucket& bucket, const TokenC& attrName);
size_t* getArrayAttributeSizesWr(const Bucket& bucket, const TokenC& attrName);
SpanC setArrayAttributeSizeAndGet(PathC path, const TokenC& attrName, size_t newSize);
SpanC setArrayAttributeSizeAndGet(BucketId bucketId, size_t elementIndex, const TokenC& attrName, size_t newSize);
// GPU can currently read, but not write, size of arrays
// This is because writing causes array to resize, and that's not currently supported on GPU
const size_t* getArrayAttributeSizeRdGpu(const PathC& path, const TokenC& attrName);
const size_t* getArrayAttributeSizesRdGpu(const Bucket& bucket, const TokenC& attrName);
// Void* CUDA GPU interface
SpanC getAttributeGpuC(const PathC& path, const TokenC& attrName, PtrToPtrKind ptrToPtrKind = PtrToPtrKind::eNotApplicable, NameSuffix suffix = NameSuffix::none);
ConstSpanC getAttributeRdGpuC(const PathC& path, const TokenC& attrName, PtrToPtrKind ptrToPtrKind = PtrToPtrKind::eNotApplicable, NameSuffix suffix = NameSuffix::none);
SpanC getAttributeWrGpuC(const PathC& path, const TokenC& attrName, PtrToPtrKind ptrToPtrKind = PtrToPtrKind::eNotApplicable, NameSuffix suffix = NameSuffix::none);
void* getArrayGpuC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix = NameSuffix::none);
const void* getArrayRdGpuC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix = NameSuffix::none);
void* getArrayWrGpuC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix = NameSuffix::none);
// Span CUDA GPU interface
SpanC getArraySpanGpuC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix = NameSuffix::none);
ConstSpanC getArraySpanRdGpuC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix = NameSuffix::none);
SpanC getArraySpanWrGpuC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix = NameSuffix::none);
// Type safe CUDA GPU interface
template <typename T>
T* getAttributeGpu(const PathC& path, const TokenC& attrName, PtrToPtrKind ptrToPtrKind = PtrToPtrKind::eNotApplicable);
template <typename T>
const T* getAttributeRdGpu(const PathC& path, const TokenC& attrName, PtrToPtrKind ptrToPtrKind = PtrToPtrKind::eNotApplicable);
template <typename T>
T* getAttributeWrGpu(const PathC& path, const TokenC& attrName, PtrToPtrKind ptrToPtrKind = PtrToPtrKind::eNotApplicable);
template <typename T>
T* getArrayGpu(const Bucket& bucket, const TokenC& attrName);
template <typename T>
const T* getArrayRdGpu(const Bucket& bucket, const TokenC& attrName);
template <typename T>
T* getArrayWrGpu(const Bucket& bucket, const TokenC& attrName);
// D3D GPU interface
omni::gpucompute::GpuPointer getAttributeD3d(const PathC& path, const TokenC& attrName);
void* getArrayD3d(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix = NameSuffix::none);
const void* getArrayRdD3d(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix = NameSuffix::none);
void* getArrayWrD3d(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix = NameSuffix::none);
// PathC methods
void addPath(const PathC& path, const Bucket& bucket = {});
void renamePath(const PathC& oldPath, const PathC& newPath);
set<AttrNameAndType> getTypes(const PathC& path) const;
size_t getAttributeCount(const PathC& path) const;
TypeC getType(const PathC& path, const TokenC& attrName) const;
void removePath(const PathC& path);
size_t count(const PathC& path) const;
size_t count(const PathC& path, const TokenC& attrName) const;
// Type methods
void addType(TypeC type, Typeinfo typeInfo);
Typeinfo getTypeInfo(TypeC type) const;
// Bucket methods
BucketId addBucket(const Bucket& bucket);
void addAttributeC(
const Bucket& bucket, const TokenC& attrName, TypeC type, const void* value = nullptr);
void addArrayAttributeC(const Bucket& bucket, const TokenC& attrName, TypeC type, const void* value, const size_t arrayElemCount);
template <typename T>
void addAttribute(const Bucket& bucket, const TokenC& attrName, TypeC type, const T& value);
void removeAttributeC(const Bucket& bucket, const TokenC& attrName, TypeC type);
void printBucket(const Bucket& bucket) const;
void printBucketName(const Bucket& bucketTypes, BucketId bucketId) const;
void printBucketNames() const;
void printBucketNamesAndTypes() const;
flatcache::set<BucketId> findBuckets(const set<AttrNameAndType>& all,
const set<AttrNameAndType>& any = {},
const set<AttrNameAndType>& none = {}) const;
View getView(const set<AttrNameAndType>& inc, const set<AttrNameAndType>& exc = {});
size_t getElementCount(const Bucket& bucket) const;
const PathC* getPathArray(const Bucket& bucket) const;
TypeC getType(const Bucket& bucket, const TokenC& attrName) const;
/** @brief Destroy all attributes with matching names from a given Bucket - array version of removeAttributeC
*
* @bucket[in] Bucket to remove attributes from
* @attrNames[in] Attribute name array
*
*/
void removeAttributesFromBucket(const Bucket& bucket, const std::vector<TokenC>& attrNames);
// BucketId methods
size_t getElementCount(BucketId bucketId) const;
SpanC getArrayC(BucketId bucketId, const TokenC& attrName, NameSuffix suffix = NameSuffix::none);
ConstSpanC getArrayRdC(BucketId bucketId, const TokenC& attrName, NameSuffix suffix = NameSuffix::none);
SpanC getArrayWrC(BucketId bucketId, const TokenC& attrName, NameSuffix suffix = NameSuffix::none);
SpanC getOrCreateArrayWrC(
BucketId bucketId, const TokenC& attrName, TypeC type, NameSuffix suffix = NameSuffix::none);
SpanC getArrayGpuC(BucketId bucketId, const TokenC& attrName, NameSuffix suffix = NameSuffix::none);
ConstSpanC getArrayRdGpuC(BucketId bucketId, const TokenC& attrName, NameSuffix suffix = NameSuffix::none);
SpanC getArrayWrGpuC(BucketId bucketId, const TokenC& attrName, NameSuffix suffix = NameSuffix::none);
BucketId getBucketId(const PathC& path) const;
ArrayPointersAndSizesC getArrayAttributeArrayWithSizes(BucketId bucketId, const TokenC& attrName);
ConstArrayPointersAndSizesC getArrayAttributeArrayWithSizesRd(BucketId bucketId, const TokenC& attrName);
ArrayPointersAndSizesC getArrayAttributeArrayWithSizesWr(BucketId bucketId, const TokenC& attrName);
SpanSizeC getArrayAttributeSizes(BucketId bucketId, const TokenC& attrName);
ConstSpanSizeC getArrayAttributeSizesRd(BucketId bucketId, const TokenC& attrName);
ConstSpanSizeC getArrayAttributeSizesRdGpu(BucketId bucketId, const TokenC& attrName);
SpanSizeC getArrayAttributeSizesWr(BucketId bucketId, const TokenC& attrName);
ConstPathCSpan getPathArray(BucketId bucketId) const;
Bucket getNamesAndTypes(BucketId bucketId) const;
// std::vector<Bucket> methods
std::vector<size_t> getElementCounts(const std::vector<Bucket>& buckets) const;
template <typename T>
std::vector<const T*> getArraysRd(const std::vector<Bucket>& buckets, const TokenC& attrName);
template <typename T>
std::vector<T*> getArraysWr(const std::vector<Bucket>& buckets, const TokenC& attrName);
template <typename T>
std::vector<T*> getArrays(const std::vector<Bucket>& buckets, const TokenC& attrName);
// BucketImpl methods
size_t getElementCount(const BucketImpl& bucketImpl) const;
// Allow default construction
PathToAttributesMap(const PlatformId& platformId = PlatformId::Global);
// Disallow copying
PathToAttributesMap(const PathToAttributesMap&) = delete;
// Allow copy assignment. This is used by StageWithHistory
PathToAttributesMap& operator=(const PathToAttributesMap&);
// Allow move construction and assignment
PathToAttributesMap(PathToAttributesMap&& other) noexcept = default;
PathToAttributesMap& operator=(PathToAttributesMap&& other) noexcept = default;
~PathToAttributesMap();
// Methods that are currently used in flatcache.cpp
// TODO: Make private
struct ArrayOfArrayInfo
{
// For each array, the element count requested by the user
size_t* arraySizeArray;
// For each array, the element count allocated on the CPU
MirroredArray* arrayCpuCapacityArray;
// For each array, the element count allocated on the GPU
MirroredArray* arrayGpuCapacityArray;
// For each array, the GPU data
MirroredArray* arrayGpuPtrArray;
};
struct ConstArrayOfArrayInfo
{
// For each array, the element count requested by the user
const MirroredArray * arraySizeArray;
// For each array, the element count allocated on the CPU
const MirroredArray* arrayCpuCapacityArray;
// For each array, the element count allocated on the GPU
const MirroredArray* arrayGpuCapacityArray;
// For each array, the GPU data
const MirroredArray* arrayGpuPtrArray;
};
// enableCpuWrite() is used in flatcache.cpp, so needs to be public
// TODO: move that code from there to here
void enableCpuWrite(MirroredArray& array,
const size_t* elemToArraySize,
MirroredArray* elemToArrayCpuCapacity,
MirroredArray* elemToArrayGpuCapacity,
MirroredArray* elemToArrayGpuData);
// getArrayOfArrayInfo() is used in flatcache.cpp, so needs to be public
// TODO: move that code from there to here
ArrayOfArrayInfo getArrayOfArrayInfo(Typeinfo typeInfo, BucketImpl& bucketImpl, TokenC attrName);
ArrayOfArrayInfo getArrayOfArrayInfo(ArrayAttributeArray &arrayAttributeArray);
ConstArrayOfArrayInfo getArrayOfArrayInfo(Typeinfo typeInfo, const BucketImpl& bucketImpl, TokenC attrName) const;
ConstArrayOfArrayInfo getArrayOfArrayInfo(const ArrayAttributeArray &arrayAttributeArray) const;
void bucketImplCopyScalarAttributeArray(ScalarAttributeArray &dest, const ScalarAttributeArray &src);
void bucketImplCopyArrayAttributeArray(BucketImpl& destBucketImpl, const AttrName& destName, ArrayAttributeArray &dest, const ArrayAttributeArray &src);
void bucketImplCopyArrays(BucketImpl& destBucketImpl,
BucketId destBucketId,
const BucketImpl& srcBucketImpl,
BucketId srcBucketId,
const carb::flatcache::set<AttrNameAndType>& attrFilter = {});
// Serialization
struct Serializer
{
uint8_t *p;
uint8_t *buf;
uint8_t *end;
uint64_t bytesWritten; // increments even if attempts are made to write past end
bool overflowed;
void init(uint8_t *const _buf, uint8_t *const end);
bool writeBytes(const uint8_t *const src, uint64_t size);
bool writeString(const char* const s, const size_t len);
bool writeString(const std::string &s);
template<typename T>
bool write(const T &t);
};
struct Deserializer
{
const uint8_t *p;
const uint8_t *buf;
const uint8_t *end;
uint64_t bytesRead; // increments even if attempts are made to read past end
bool overflowed;
void init(const uint8_t *const _buf, const uint8_t *const end);
bool readBytes(uint8_t *const dst, uint64_t size);
bool readString(std::string &s);
template<typename T>
bool read(T &t);
};
uint64_t serializeScalarAttributeArray(BucketImpl& srcBucketImpl, const BucketId& srcBucketId, const AttrName& srcName, ScalarAttributeArray& srcScalarAttributeArray, Serializer &out);
bool deserializeScalarAttributeArray(BucketImpl& destBucketImpl, const BucketId& destBucketId, Deserializer &in);
uint64_t serializeArrayAttributeArray(BucketImpl& srcBucketImpl, const BucketId& srcBucketId, const AttrName& srcName, ArrayAttributeArray& srcArrayAttributeArray, Serializer &out);
bool deserializeArrayAttributeArray(BucketImpl& destBucketImpl, const BucketId& destBucketId, Deserializer &in);
PrimBucketListImpl getChanges(ListenerId listener);
void popChanges(ListenerId listener);
private:
// Device is used by getArrayC
enum class Device
{
eCPU = 0,
eCudaGPU = 1,
eD3dVkGPU = 2
};
static inline constexpr ArrayOfArrayInfo ScalarArrayOfArrayInfo()
{
return ArrayOfArrayInfo{ nullptr, nullptr, nullptr, nullptr };
}
// TODO: Now that EnableReadFn and EnableWriteFn can have the same type, should they just be one alias?
using EnableReadFn = void (PathToAttributesMap::*)(PathToAttributesMap::MirroredArray& array,
const size_t* elemToArraySize,
PathToAttributesMap::MirroredArray* elemToArrayCpuCapacity,
PathToAttributesMap::MirroredArray* elemToArrayGpuCapacity,
PathToAttributesMap::MirroredArray* elemToArrayGpuData);
using EnableWriteFn = void (PathToAttributesMap::*)(PathToAttributesMap::MirroredArray& array,
const size_t* elemToArraySize,
PathToAttributesMap::MirroredArray* elemToArrayCpuCapacity,
PathToAttributesMap::MirroredArray* elemToArrayGpuCapacity,
PathToAttributesMap::MirroredArray* elemToArrayGpuData);
struct IOConfig
{
EnableReadFn enableRead;
EnableWriteFn enableWrite;
EnableReadFn enableRdPtrForWrite;
Device device;
PtrToPtrKind ptrToPtrKind;
inline IOConfig& withEnableRead(EnableReadFn _enableRead)
{
enableRead = _enableRead;
return *this;
}
inline IOConfig& withEnableWrite(EnableWriteFn _enableWrite)
{
enableWrite = _enableWrite;
return *this;
}
inline IOConfig& withEnableRdPtrForWrite(EnableReadFn _enableRdPtrForWrite)
{
enableRdPtrForWrite = _enableRdPtrForWrite;
return *this;
}
inline IOConfig& withDevice(Device _device)
{
device = _device;
return *this;
}
inline IOConfig& withPtrToPtrKind(PtrToPtrKind _ptrToPtrKind)
{
ptrToPtrKind = _ptrToPtrKind;
return *this;
}
};
void serializeMirroredArrayMetadata(const AttrName& srcName, MirroredArray &srcValuesArray, Serializer &out);
template<typename ArraysT, typename ArraysMapT>
void deserializeMirroredArrayMetadata(Platform& platform, ArraysMapT& arraysMap, AttrName &destName, Typeinfo *&typeInfo, ArraysT *&destArray, Deserializer &in);
BucketImpl& addAttributeInternal(BucketImpl& prevBucketImpl, const Bucket& bucket, const TokenC& attrName, const TypeC ctype, const void* value, const Typeinfo& typeinfo, const size_t arrayElemCount);
void fillAttributeInternal(BucketImpl& bucketImpl, const AttrName& name, const size_t startIndex, const size_t endIndex, const void* value, const Typeinfo& typeinfo, const size_t arrayElemCount, MirroredArray *const valuesArray, ArrayAttributeArray *const arrayAttributeArray);
void addAttributeInternal(const PathC& path, const TokenC& attrNameC, const NameSuffix nameSuffix, const TypeC ctype, const void* value, const Typeinfo& typeinfo, const size_t arrayElemCount);
bool findArrayAttributeArrayForPath(const PathC& path,
const TokenC& attrName,
size_t& outElementIndex,
BucketImpl*& outBucketImpl,
ArrayAttributeArray*& outArrayAttributeArray);
bool findArrayAttributeArrayForBucketId(const BucketId bucketId,
const TokenC& attrName,
BucketImpl*& outBucketImpl,
ArrayAttributeArray*& outArrayAttributeArray);
void allocElement(ScalarAttributeArray &scalar);
void allocElement(ArrayAttributeArray &vector);
size_t allocElement(BucketImpl& bucketImpl);
void allocElementForMove(BucketImpl& srcBucketImpl, const ArrayOfArrayInfo &srcAoaInfo, const AttrName& name, MirroredArray &destArray, MirroredArray *const srcArray);
size_t allocElementForMove(BucketImpl& destBucketImpl, BucketImpl& srcBucketImpl, const PathC& path);
void addElementToTrackers(size_t elemIndex, BucketImpl& bucketImpl);
void makeSrcValidIfDestValid(MirroredArray& srcArray,
BucketImpl& srcBucketImpl,
const ArrayOfArrayInfo& srcAoaInfo,
const MirroredArray& destArray,
const AttrName& name);
void moveElementBetweenBuckets(const PathC& path, BucketId destBucketId, BucketId srcBucketId, const Bucket& destBucket);
void moveElementScalarData(ScalarAttributeArray &destArray, const size_t destElemIndex, const ScalarAttributeArray &srcArray, const size_t srcElemIndex);
void moveElementArrayData(ArrayAttributeArray &destArray, const size_t destElemIndex, const ArrayAttributeArray &srcArray, const size_t srcElemIndex);
void moveElement(BucketImpl& destBucket, size_t destElemIndex, BucketImpl& srcBucket, size_t srcElemIndex);
void destroyElement(BucketId bucketId, size_t elemIndex, bool destroyDataPointedTo);
ArrayAndDirtyIndices getArraySpanC(BucketId bucketId, TokenC attrName, const IOConfig &io, NameSuffix suffix = NameSuffix::none);
ArrayAndDirtyIndices getArraySpanC(MirroredArray& array, const AttrName& name, const ArrayOfArrayInfo& aoa, BucketImpl& bucketImpl, const IOConfig &io);
void enableCpuReadImpl(MirroredArray& array,
const size_t* elemToArraySize,
MirroredArray* elemToArrayCpuCapacity,
MirroredArray* elemToArrayGpuCapacity,
MirroredArray* elemToArrayGpuData,
bool printWarnings = true);
void enableCpuRead(MirroredArray& array,
const size_t* elemToArraySize,
MirroredArray* elemToArrayCpuCapacity,
MirroredArray* elemToArrayGpuCapacity,
MirroredArray* elemToArrayGpuData);
void enableCpuReadIfValid(MirroredArray& array,
const size_t* elemToArraySize,
MirroredArray* elemToArrayCpuCapacity,
MirroredArray* elemToArrayGpuCapacity,
MirroredArray* elemToArrayGpuData);
void enableGpuRead(MirroredArray& array,
const size_t* elemToArraySize,
MirroredArray* elemToArrayCpuCapacity,
MirroredArray* elemToArrayGpuCapacity,
MirroredArray* elemToArrayGpuData);
void enableGpuWrite(MirroredArray& array,
const size_t* elemToArraySize,
MirroredArray* elemToArrayCpuCapacity,
MirroredArray* elemToArrayGpuCapacity,
MirroredArray* elemToArrayGpuData);
void enableD3dGpuRead(MirroredArray& array,
const size_t* elemToArraySize,
MirroredArray* elemToArrayCpuCapacity,
MirroredArray* elemToArrayGpuCapacity,
MirroredArray* gpuPointerArray);
void enableD3dGpuWrite(MirroredArray& array,
const size_t* elemToArraySize,
MirroredArray* elemToArrayCpuCapacity,
MirroredArray* elemToArrayGpuCapacity,
MirroredArray* elemToArrayGpuData);
static inline constexpr IOConfig CpuReadConfig()
{
return IOConfig{
&PathToAttributesMap::enableCpuRead, // enableRead
nullptr, // enableWrite
nullptr, // enableRdPtrForWrite
Device::eCPU, // device
PtrToPtrKind::eNotApplicable // ptrToPtrKind
};
};
static inline constexpr IOConfig CpuWriteConfig()
{
return IOConfig{
nullptr, // enableRead
&PathToAttributesMap::enableCpuWrite, // enableWrite
&PathToAttributesMap::enableCpuRead, // enableRdPtrForWrite
Device::eCPU, // device
PtrToPtrKind::eNotApplicable // ptrToPtrKind
};
};
static inline constexpr IOConfig CpuReadWriteConfig()
{
return IOConfig{
&PathToAttributesMap::enableCpuRead, // enableRead
&PathToAttributesMap::enableCpuWrite, // enableWrite
&PathToAttributesMap::enableCpuRead, // enableRdPtrForWrite
Device::eCPU, // device
PtrToPtrKind::eNotApplicable // ptrToPtrKind
};
};
// TODO: This probably needs to go away, it only exists to turn off "printWarnings"
static inline constexpr IOConfig CpuReadIfValidWriteConfig()
{
return IOConfig{
&PathToAttributesMap::enableCpuReadIfValid, // enableRead
&PathToAttributesMap::enableCpuWrite, // enableWrite
&PathToAttributesMap::enableCpuRead, // enableRdPtrForWrite
Device::eCPU, // device
PtrToPtrKind::eNotApplicable // ptrToPtrKind
};
};
static inline constexpr IOConfig CudaReadConfig()
{
return IOConfig{
&PathToAttributesMap::enableGpuRead, // enableRead
nullptr, // enableWrite
nullptr, // enableRdPtrForWrite
Device::eCudaGPU, // device
PtrToPtrKind::eNotApplicable // ptrToPtrKind
};
};
static inline constexpr IOConfig CudaWriteConfig()
{
return IOConfig{
nullptr, // enableRead
&PathToAttributesMap::enableGpuWrite, // enableWrite
&PathToAttributesMap::enableGpuRead, // enableRdPtrForWrite
Device::eCudaGPU, // device
PtrToPtrKind::eNotApplicable // ptrToPtrKind
};
};
static inline constexpr IOConfig CudaReadWriteConfig()
{
return IOConfig{
&PathToAttributesMap::enableGpuRead, // enableRead
&PathToAttributesMap::enableGpuWrite, // enableWrite
&PathToAttributesMap::enableGpuRead, // enableRdPtrForWrite
Device::eCudaGPU, // device
PtrToPtrKind::eNotApplicable // ptrToPtrKind
};
};
static inline constexpr IOConfig D3dVkReadConfig()
{
return IOConfig{
&PathToAttributesMap::enableD3dGpuRead, // enableRead
nullptr, // enableWrite
nullptr, // enableRdPtrForWrite
Device::eD3dVkGPU, // device
PtrToPtrKind::eNotApplicable // ptrToPtrKind
};
};
static inline constexpr IOConfig D3dVkWriteConfig()
{
return IOConfig{
nullptr, // enableRead
&PathToAttributesMap::enableD3dGpuWrite, // enableWrite
&PathToAttributesMap::enableD3dGpuRead, // enableRdPtrForWrite
Device::eD3dVkGPU, // device
PtrToPtrKind::eNotApplicable // ptrToPtrKind
};
};
static inline constexpr IOConfig D3dVkReadWriteConfig()
{
return IOConfig{
&PathToAttributesMap::enableD3dGpuRead, // enableRead
&PathToAttributesMap::enableD3dGpuWrite, // enableWrite
&PathToAttributesMap::enableD3dGpuRead, // enableRdPtrForWrite
Device::eD3dVkGPU, // device
PtrToPtrKind::eNotApplicable // ptrToPtrKind
};
};
std::tuple<bool, BucketId, size_t> getPresentAndBucketAndElement(const PathC& path) const;
SpanC getArrayElementPtr(SpanC array, size_t bucketElement) const;
ConstSpanC getArrayElementPtr(ConstSpanC array, size_t bucketElement) const;
void destructiveResizeIfNecessary(uint8_t*& cpuData, size_t& capacity, size_t desiredElemCount, size_t elemByteCount);
void destructiveResizeIfNecessaryGPU(MirroredArray& gpuPointerArray,
size_t elem,
size_t& capacity,
size_t desiredElemCount,
size_t elemByteCount,
omni::gpucompute::GpuCompute* computeAPI,
omni::gpucompute::Context* computeCtx);
void resizeIfNecessary(uint8_t*& cpuData, size_t& capacity, size_t desiredElemCount, size_t elemByteCount, TypeC type);
void resizeIfNecessaryGPU(MirroredArray& gpuPointerArray,
size_t elem,
size_t& capacity,
size_t desiredElemCount,
size_t elemByteCount,
omni::gpucompute::GpuCompute* computeAPI,
omni::gpucompute::Context* computeCtx);
void allocGpuMemIfNecessary(PathToAttributesMap::MirroredArray& array,
size_t byteCount,
size_t elemSize,
omni::gpucompute::GpuCompute* computeAPI,
omni::gpucompute::Context* computeCtx);
std::pair<BucketId, BucketImpl&> findOrCreateBucket(const Bucket& bucket);
void eraseBucket(const Bucket& bucket);
BucketId findBucketId(const Bucket& bucket);
std::tuple<BucketId, ArrayIndex> getBucketAndArrayIndex(const PathC& path) const;
std::tuple<BucketId, ArrayIndex> addAttributeGetBucketAndArrayIndex(
const PathC& path, const TokenC& attrNameC, NameSuffix nameSuffix, TypeC type);
void addAttributesToBucket(
const PathC& path,
const std::vector<TokenC>& attrNames,
const std::vector<TypeC>& typeCs);
void setArrayDirty(ArrayAndDirtyIndices& arrayAndDirtyIndices);
void setArrayElementDirty(ArrayAndDirtyIndices& arrayAndDirtyIndices, size_t elemIndex);
BucketImpl& addAttributeC(BucketImpl& bucketImpl,
const Bucket& bucket,
const TokenC& attrName,
TypeC ctype,
const void* value = nullptr);
BucketImpl& addArrayAttributeC(BucketImpl& bucketImpl, const Bucket& bucket, const TokenC& attrName, TypeC ctype, const void* value, const size_t arrayElemCount);
void checkInvariants();
/**
* @brief Internal debug function
*
* @details This loops over all the array attributes checking that the
* invariants hold true. Currently it just enforces
* (I1) If the size and cpu size match that the cpu array pointer isn't null
*
*
* @return none
*/
bool __validateArrayInvariants() const;
};
// PathToAttributesMap doesn't depend on USD for tokens or paths
// However, it's useful to be able to see the USD text representation of tokens and
// paths when debugging. Set ENABLE_USD_DEBUGGING to 1 to enable that.
// Then use toTfToken() to convert TokenC to pxr::TfToken
inline const pxr::TfToken& toTfToken(const TokenC& token)
{
return reinterpret_cast<const pxr::TfToken&>(token);
}
inline const pxr::SdfPath& toSdfPath(const PathC& path)
{
return reinterpret_cast<const pxr::SdfPath&>(path);
}
// Query result
struct View
{
PathToAttributesMap* path2attrsMap;
std::vector<Bucket> buckets;
std::vector<size_t> bucketElemCounts;
};
using BucketImpl = PathToAttributesMap::BucketImpl;
// The rest of this file is methods
// Returns the number of Paths known to the cache
inline size_t PathToAttributesMap::size()
{
return pathToBucketElem.size();
}
// Delete all data in the cache, meaning all the buckets and the map
// from paths to buckets.
inline void PathToAttributesMap::clear()
{
buckets.clear();
pathToBucketElem.clear();
attrNameSetToBucketId.clear();
}
template<typename CallbackT>
inline void PathToAttributesMap::BucketIdToImpl::forEachValidBucket(CallbackT callback) const
{
BucketId id{ 0 };
for (size_t i = 0; i < buckets.size(); ++i, ++id)
{
if (buckets[i])
{
callback(id, *buckets[i]);
}
}
}
inline void PathToAttributesMap::printMirroredArray(const char* const label, const ScalarAttributeArray &array, const size_t* const arrayElemCount) const
{
auto printValue = [](const uint8_t *const data, const size_t size)
{
if (!data)
{
printf("<nullptr>");
}
else
{
if (size <= sizeof(uint8_t))
{
printf("u8=%u, d8=%d, c=%c", *data, *(const int8_t*)data, *(const char*)data);
}
else if (size <= sizeof(uint16_t))
{
printf("u16=%u, d16=%d", *(const uint16_t*)data, *(const int16_t*)data);
}
else if (size <= sizeof(uint32_t))
{
printf("u32=%u, d32=%d, float=%f", *(const uint32_t*)data, *(const int32_t*)data, *(const float*)data);
}
else if (size <= sizeof(uint64_t))
{
printf("u64=%" PRIu64 ", d64=%" PRId64 ", double=%f, ptr=0x%p", *(const uint64_t*)data, *(const int64_t*)data, *(const double*)data, *(void**)data);
}
else
{
printf("\n");
for (size_t i = 0; i < size; i += 16)
{
printf(" %06zx: ", i);
for (size_t j = 0; j < 16; ++j)
{
if (i + j < size)
{
printf("%02x ", data[i + j]);
}
else
{
printf(" ");
}
}
printf(" ");
for (size_t j = 0; j < 16; j++)
{
if (i + j < size)
{
printf("%c", isprint(data[i + j]) ? data[i + j] : '.');
}
}
printf("\n");
}
}
}
};
printf(" %s (type %d)[count %zu]:\n", label, array.type.type, array.count);
const Typeinfo &typeinfo = array.typeinfo;
const size_t elemSize = typeinfo.size;
printf(" cpuValid=%d 0x%p\n", array.cpuValid, array.cpuData());
if (array.cpuValid)
{
for (size_t elem = 0; elem < array.count; ++elem)
{
printf(" [%5zu]: ", elem);
const uint8_t *const elemData = array.cpuData() + elem * elemSize;
printf("0x%p ", elemData);
if (arrayElemCount)
{
CARB_ASSERT(typeinfo.isArray);
const uint8_t* const base = *((const uint8_t **)elemData);
printf(" => 0x%p", base);
for (size_t i = 0; i < arrayElemCount[elem]; ++i)
{
printf("\n [%5zu]: ", i);
const uint8_t* const arrayData = base ? base + i * typeinfo.arrayElemSize : nullptr;
printValue(arrayData, typeinfo.arrayElemSize);
}
}
else
{
printValue(elemData, elemSize);
}
printf("\n");
}
}
printf(" gpuValid=%d 0x%p\n", array.gpuValid, array.gpuArray);
printf(" usdValid=%d\n", array.usdValid);
}
// Print the cache, specifically the Paths and the UsdAttributes they map to
// (but not the values of the attributes currently)
inline void PathToAttributesMap::print() const
{
auto va = [](auto ...params) -> const char* {
static char tmp[1024];
#ifdef _WIN32
_snprintf_s(tmp, sizeof(tmp), params...);
#else
snprintf(tmp, sizeof(tmp), params...);
#endif
return (const char*)&tmp;
};
std::cout << "(== PathToAttributesMap::print() begin ==)\n";
buckets.forEachValidBucket([this, va](const BucketId bucketId, const BucketImpl& bucketImpl) {
printf("bucket [%zu]:\n", size_t(bucketId));
if (!bucketImpl.elemToPath.size())
{
printf(" <no elements>\n");
}
else
{
for (size_t elem = 0; elem < bucketImpl.elemToPath.size(); ++elem)
{
printf(" elem [%5zu]: \"%s\"\n", elem, bucketImpl.elemToPath[elem].GetText());
}
}
if (bucketImpl.scalarAttributeArrays.empty() && bucketImpl.arrayAttributeArrays.empty())
{
printf(" <no attributes>\n");
}
else
{
bucketImpl.scalarAttributeArrays.forEach([this, &va](const AttrName& name, const ScalarAttributeArray &array) {
printMirroredArray(va("%s \"%s\"", "sattr", toTfToken(name.name).GetText()), array, nullptr);
});
bucketImpl.arrayAttributeArrays.forEach([this, &va](const AttrName& name, const ArrayAttributeArray &array) {
printMirroredArray(va("%s \"%s\" %s", "vattr", toTfToken(name.name).GetText(), "values"), array.values, (const size_t*)array.cpuElemCounts.cpuData());
printMirroredArray(va("%s \"%s\" %s", "vattr", toTfToken(name.name).GetText(), "elemCounts"), array.elemCounts, nullptr);
printMirroredArray(va("%s \"%s\" %s", "vattr", toTfToken(name.name).GetText(), "cpuElemCounts"), array.cpuElemCounts, nullptr);
printMirroredArray(va("%s \"%s\" %s", "vattr", toTfToken(name.name).GetText(), "gpuElemCounts"), array.gpuElemCounts, nullptr);
printMirroredArray(va("%s \"%s\" %s", "vattr", toTfToken(name.name).GetText(), "gpuPtrs"), array.gpuPtrs, nullptr);
});
}
});
std::cout << "(== PathToAttributesMap::print() end ==)\n\n";
}
#define ENABLE_LOG 0
inline void log(const char* format, ...)
{
#if ENABLE_LOG
va_list args;
va_start(args, format);
vprintf(format, args);
va_end(args);
#endif
}
inline void PathToAttributesMap::addType(TypeC type, Typeinfo typeInfo)
{
Typeinfo *v;
typeToInfo.allocateEntry(type, &v);
*v = typeInfo;
}
inline Typeinfo PathToAttributesMap::getTypeInfo(TypeC type) const
{
const Typeinfo* typeinfo;
if (typeToInfo.find(type, &typeinfo))
{
return *typeinfo;
}
else
{
return Typeinfo();
}
}
inline BucketId PathToAttributesMap::addBucket(const Bucket& bucket)
{
auto iter = attrNameSetToBucketId.find(bucket);
bool found = (iter != attrNameSetToBucketId.end());
if (!found)
{
// Create bucket
auto bucketIdAndImpl = buckets.emplace(platform.allocator.new_<BucketImpl>(platform));
auto bucketAndId = attrNameSetToBucketId.emplace(bucket, bucketIdAndImpl.first);
const Bucket& addedBucket = bucketAndId.first->first;
BucketImpl& bucketImpl = bucketIdAndImpl.second;
bucketImpl.SetBucket(bucket);
// Make array for each type
for (const AttrNameAndType& b : addedBucket)
{
const Type attrType = b.type;
const Token& attrName = b.name;
const NameSuffix& suffix = b.suffix;
const TypeC attrTypeC = TypeC(attrType);
const Typeinfo* typeinfo;
if (typeToInfo.find(attrTypeC, &typeinfo))
{
AttrName name{ attrName, suffix };
if (typeinfo->isArray)
{
ArrayAttributeArray* ptr;
bucketImpl.arrayAttributeArrays.allocateEntry(std::move(name), &ptr);
new (ptr) ArrayAttributeArray(platform, attrTypeC, *typeinfo);
}
else
{
ScalarAttributeArray* ptr;
bucketImpl.scalarAttributeArrays.allocateEntry(std::move(name), &ptr);
new (ptr) ScalarAttributeArray(platform, attrTypeC, *typeinfo);
}
}
else
{
std::cout << "Error: Typeinfo for " << attrType << " not found. Please add it using addType()." << std::endl;
}
}
return bucketIdAndImpl.first;
}
else
{
return iter->second;
}
}
// Multiple attribute methods
inline void PathToAttributesMap::getAttributesRdC(const void** attrsOut,
const PathC* paths,
const TokenC* attrNames,
size_t attrCount)
{
// TODO: make optimized version instead of calling getAttributeRdC
for (size_t i = 0; i != attrCount; i++)
{
attrsOut[i] = getAttributeRdC(paths[i], attrNames[i]).ptr;
}
}
inline void PathToAttributesMap::getAttributesRdGpuC(const void** attrsOut,
const PathC* paths,
const TokenC* attrNames,
size_t attrCount,
PtrToPtrKind ptrToPtrKind)
{
// TODO: make optimized version instead of calling getAttributeRdGpuC
for (size_t i = 0; i != attrCount; i++)
{
attrsOut[i] = getAttributeRdGpuC(paths[i], attrNames[i], ptrToPtrKind).ptr;
}
}
inline void PathToAttributesMap::getArraysRdC(const void** attrsOut,
const Bucket& bucket,
const TokenC* attrNames,
size_t attrCount)
{
// TODO: make optimized version instead of calling getArrayRdC
for (size_t i = 0; i != attrCount; i++)
{
attrsOut[i] = getArrayRdC(bucket, attrNames[i]);
}
}
inline void PathToAttributesMap::getAttributesWrC(void** attrsOut,
const PathC& path,
const TokenC* attrNames,
size_t attrCount)
{
// TODO: make optimized version instead of calling getAttributeWrC
for (size_t i = 0; i != attrCount; i++)
{
attrsOut[i] = getAttributeWrC(path, attrNames[i]).ptr;
}
}
inline void PathToAttributesMap::getAttributesWrGpuC(void** attrsOut,
const PathC& path,
const TokenC* attrNames,
size_t attrCount,
PtrToPtrKind ptrToPtrKind)
{
// TODO: make optimized version instead of calling getAttributeWrGpuC
for (size_t i = 0; i != attrCount; i++)
{
attrsOut[i] = getAttributeWrGpuC(path, attrNames[i], ptrToPtrKind).ptr;
}
}
inline void PathToAttributesMap::getArraysWrC(void** attrsOut, const Bucket& bucket, const TokenC* attrNames, size_t attrCount)
{
// TODO: make optimized version instead of calling getArrayWrC
for (size_t i = 0; i != attrCount; i++)
{
attrsOut[i] = getArrayWrC(bucket, attrNames[i]);
}
}
// Algorithm:
// Check whether bucket already has a bucketId
// If it does:
// Check whether bucketId has a bucketImpl
// If it does:
// return (bucketId, bucketImpl)
// Else:
// Print error message
// return (bucketId, empty bucketImpl)
// Else:
// Allocate a bucketId
// attrNameSetToBucketId += (bucket->bucketId)
// buckets += (bucketId->empty bucketImpl)
//
inline std::pair<BucketId, BucketImpl&> PathToAttributesMap::findOrCreateBucket(const Bucket& bucket)
{
auto iter = attrNameSetToBucketId.find(bucket);
bool foundBucketAndId = (iter != attrNameSetToBucketId.end());
BucketId bucketId;
if (foundBucketAndId)
{
bucketId = iter->second;
auto implPtr = buckets.find(bucketId);
if (implPtr)
{
return { bucketId, *implPtr };
}
else
{
// This is an error, but make an impl so that we can return gracefully
CARB_LOG_ERROR("BucketId->impl not found");
// Allocate an impl and id->impl mapping and then set the
// attrNameSetToBucketId to the slot of the new impl
auto idAndImpl = buckets.emplace(platform.allocator.new_<BucketImpl>(platform));
iter->second = idAndImpl.first;
idAndImpl.second.SetBucket(bucket);
return idAndImpl;
}
}
// Allocate an impl and place in vector
auto idAndImpl = buckets.emplace(platform.allocator.new_<BucketImpl>(platform));
// Store bucket->Id mapping
attrNameSetToBucketId.emplace(bucket, idAndImpl.first);
idAndImpl.second.SetBucket(bucket);
return idAndImpl;
}
inline void PathToAttributesMap::eraseBucket(const Bucket& bucket)
{
auto iter = attrNameSetToBucketId.find(bucket);
bool foundBucketAndId = (iter != attrNameSetToBucketId.end());
BucketId bucketId;
if (foundBucketAndId)
{
bucketId = iter->second;
auto implPtr = buckets.find(bucketId);
if (implPtr)
{
buckets.erase(bucketId);
}
else
{
CARB_LOG_ERROR("BucketId->impl not found");
}
attrNameSetToBucketId.erase(bucket);
}
else
{
// Nothing to do
}
}
// Add an attribute to all elements of a bucket
// Note that this might cause a merge with an existing bucket
//
// Here are the maps we have to update:
// pathToBucketElem :: path -> (bucketId, arrayIndex)
// buckets :: bucketId -> bucketImpl
// attrNameSetToBucketId :: bucket-> bucketId
//
inline BucketImpl& PathToAttributesMap::addAttributeC(BucketImpl& bucketImpl,
const Bucket& bucket,
const TokenC& attrName,
TypeC ctype,
const void* value)
{
// TODO: should we warn on missing type?
const Typeinfo& typeinfo = getTypeInfo(ctype);
if (typeinfo.isArray && value)
{
CARB_LOG_ERROR("addAttributeC: Attempted to add array-value attribute with default values. Use addArrayAttribute instead.");
return bucketImpl;
}
return addAttributeInternal(bucketImpl, bucket, attrName, ctype, value, typeinfo, 0);
}
inline BucketImpl& PathToAttributesMap::addArrayAttributeC(BucketImpl& bucketImpl, const Bucket& bucket, const TokenC& attrName, TypeC ctype, const void* value, const size_t arrayElemCount)
{
CARB_ASSERT(!arrayElemCount || value);
// TODO: should we warn on missing type?
const Typeinfo& typeinfo = getTypeInfo(ctype);
return addAttributeInternal(bucketImpl, bucket, attrName, ctype, value, typeinfo, arrayElemCount);
}
// Add an attribute to all elements of a bucket
inline void PathToAttributesMap::addAttributeC(
const Bucket& bucket, const TokenC& attrName, TypeC ctype, const void* value)
{
const auto iter = attrNameSetToBucketId.find(bucket);
if (iter != attrNameSetToBucketId.end())
{
const BucketId bucketId = iter->second;
BucketImpl *const implPtr = buckets.find(bucketId);
if (implPtr)
{
// TODO: should we warn on missing type?
const Typeinfo& typeinfo = getTypeInfo(ctype);
if (typeinfo.isArray && value)
{
CARB_LOG_ERROR("addAttributeC: Attempted to add array-value attribute with default values. Use addArrayAttribute instead.");
return;
}
addAttributeInternal(*implPtr, bucket, attrName, ctype, value, typeinfo, 0);
}
}
}
inline void PathToAttributesMap::addArrayAttributeC(const Bucket& bucket, const TokenC& attrName, TypeC ctype, const void* value, const size_t arrayElemCount)
{
CARB_ASSERT(!arrayElemCount || value);
const auto iter = attrNameSetToBucketId.find(bucket);
if (iter != attrNameSetToBucketId.end())
{
const BucketId bucketId = iter->second;
BucketImpl *const implPtr = buckets.find(bucketId);
if (implPtr)
{
// TODO: should we warn on missing type?
const Typeinfo& typeinfo = getTypeInfo(ctype);
addAttributeInternal(*implPtr, bucket, attrName, ctype, value, typeinfo, arrayElemCount);
}
}
}
template <typename T>
void PathToAttributesMap::addAttribute(
const Bucket& bucket, const TokenC& attrName, TypeC type, const T& value)
{
APILOGGER("addAttribute", apiLogEnabled, attrName);
// TODO: check that type is compatible
return addAttributeC(bucket, attrName, type, &value);
}
inline size_t PathToAttributesMap::getElementCount(const BucketImpl& bucketImpl) const
{
return bucketImpl.elemToPath.size();
}
inline size_t PathToAttributesMap::getElementCount(BucketId bucketId) const
{
const auto implPtr = buckets.find(bucketId);
if (implPtr)
{
return implPtr->elemToPath.size();
}
return 0;
}
inline size_t PathToAttributesMap::getElementCount(const Bucket& bucket) const
{
const auto iter = attrNameSetToBucketId.find(bucket);
if (iter != attrNameSetToBucketId.end())
{
BucketId bucketId = iter->second;
return getElementCount(bucketId);
}
return 0;
}
inline PathToAttributesMap::ArrayOfArrayInfo PathToAttributesMap::getArrayOfArrayInfo(Typeinfo typeInfo,
BucketImpl& bucketImpl,
TokenC attrName)
{
if (typeInfo.isArray)
{
ArrayAttributeArray *array;
if (bucketImpl.arrayAttributeArrays.find(AttrName{ attrName, NameSuffix::none }, &array))
{
return getArrayOfArrayInfo(*array);
}
}
return { nullptr, nullptr, nullptr, nullptr };
}
inline PathToAttributesMap::ArrayOfArrayInfo PathToAttributesMap::getArrayOfArrayInfo(ArrayAttributeArray &arrayAttributeArray)
{
MirroredArray *const arraySizeArray = &arrayAttributeArray.elemCounts;
MirroredArray *const arrayCpuCapacityArray = &arrayAttributeArray.cpuElemCounts;
MirroredArray *const arrayGpuCapacityArray = &arrayAttributeArray.gpuElemCounts;
MirroredArray *const arrayGpuPtrArray = &arrayAttributeArray.gpuPtrs;
return { (size_t*)arraySizeArray->cpuData(), arrayCpuCapacityArray, arrayGpuCapacityArray, arrayGpuPtrArray };
}
inline PathToAttributesMap::ConstArrayOfArrayInfo PathToAttributesMap::getArrayOfArrayInfo(Typeinfo typeInfo,
const BucketImpl& bucketImpl,
TokenC attrName) const
{
if (typeInfo.isArray)
{
const ArrayAttributeArray *array;
if (bucketImpl.arrayAttributeArrays.find(AttrName{ attrName, NameSuffix::none }, &array))
{
return getArrayOfArrayInfo(*array);
}
}
return { nullptr, nullptr, nullptr, nullptr };
}
inline PathToAttributesMap::ConstArrayOfArrayInfo PathToAttributesMap::getArrayOfArrayInfo(const ArrayAttributeArray &arrayAttributeArray) const
{
const MirroredArray *const arraySizeArray = &arrayAttributeArray.elemCounts;
const MirroredArray *const arrayCpuCapacityArray = &arrayAttributeArray.cpuElemCounts;
const MirroredArray *const arrayGpuCapacityArray = &arrayAttributeArray.gpuElemCounts;
const MirroredArray *const arrayGpuPtrArray = &arrayAttributeArray.gpuPtrs;
return { arraySizeArray, arrayCpuCapacityArray, arrayGpuCapacityArray, arrayGpuPtrArray };
}
inline std::vector<size_t> PathToAttributesMap::getElementCounts(const std::vector<Bucket>& buckets) const
{
size_t bucketCount = buckets.size();
std::vector<size_t> retval(bucketCount);
for (size_t i = 0; i != bucketCount; i++)
{
retval[i] = getElementCount(buckets[i]);
}
return retval;
}
inline void PathToAttributesMap::addElementToTrackers(size_t elemIndex, BucketImpl& bucketImpl)
{
// Update change trackers
// We allocate them lazily, so we have to iterate over listenerIdToChangeTrackerConfig
// then allocate bucketImpl.listenerIdToChanges if necessary
listenerIdToChangeTrackerConfig.forEach([this, &bucketImpl, &elemIndex](ListenerId& listenerId, ChangeTrackerConfig& config) {
if (config.changeTrackingEnabled)
{
// Allocate changes if necessary
Changes* changes;
if (bucketImpl.listenerIdToChanges.allocateEntry(listenerId, &changes))
{
new (changes) Changes();
}
changes->addNewPrim(elemIndex);
}
});
}
inline void PathToAttributesMap::allocElement(ScalarAttributeArray &scalar)
{
const size_t allocSize = scalar.typeinfo.size;
const size_t newSize = scalar.size() + allocSize;
scalar.resize(newSize);
// Only resize GPU mirror if it was previously allocated
if (scalar.gpuCapacity != 0)
{
scalar.resizeGpu(platform.gpuCuda, platform.gpuCudaCtx, newSize, allocSize);
}
scalar.count++;
}
inline void PathToAttributesMap::allocElement(ArrayAttributeArray &arrayAttributeArray)
{
static_assert(sizeof(ArrayAttributeArray) == 5 * sizeof(MirroredArray), "ArrayAttributeArray has unexpected size");
allocElement(arrayAttributeArray.values);
allocElement(arrayAttributeArray.elemCounts);
allocElement(arrayAttributeArray.cpuElemCounts);
allocElement(arrayAttributeArray.gpuElemCounts);
allocElement(arrayAttributeArray.gpuPtrs);
// For array-valued attributes, initialize CPU and GPU element counts
static_assert(sizeof(ArrayAttributeArray) == 5 * sizeof(MirroredArray), "ArrayAttributeArray has unexpected size");
reinterpret_cast<size_t*>(arrayAttributeArray.elemCounts.cpuData())[arrayAttributeArray.elemCounts.count-1] = 0;
arrayAttributeArray.elemCounts.cpuValid = true;
reinterpret_cast<size_t*>(arrayAttributeArray.cpuElemCounts.cpuData())[arrayAttributeArray.cpuElemCounts.count - 1] = 0;
arrayAttributeArray.cpuElemCounts.cpuValid = true;
reinterpret_cast<size_t*>(arrayAttributeArray.gpuElemCounts.cpuData())[arrayAttributeArray.gpuElemCounts.count - 1] = 0;
arrayAttributeArray.gpuElemCounts.cpuValid = true;
}
inline size_t PathToAttributesMap::allocElement(BucketImpl& bucketImpl)
{
// I moved this here to support old-style ArrayBase::resize
// TODO: Now that ArrayBase is gone, check whether we can move it back
const size_t element = bucketImpl.elemToPath.size();
bucketImpl.elemToPath.emplace_back(); // Allocate an empty path, it gets set later
bucketImpl.scalarAttributeArrays.forEach([this, &bucketImpl](const AttrName& name, ScalarAttributeArray &array) {
allocElement(array);
CARB_UNUSED(bucketImpl);
CARB_ASSERT(array.count == bucketImpl.elemToPath.size());
});
bucketImpl.arrayAttributeArrays.forEach([this, &bucketImpl](const AttrName& name, ArrayAttributeArray &array) {
allocElement(array);
CARB_UNUSED(bucketImpl);
CARB_ASSERT(array.values.count == bucketImpl.elemToPath.size());
CARB_ASSERT(array.elemCounts.count == bucketImpl.elemToPath.size());
CARB_ASSERT(array.cpuElemCounts.count == bucketImpl.elemToPath.size());
CARB_ASSERT(array.gpuElemCounts.count == bucketImpl.elemToPath.size());
CARB_ASSERT(array.gpuPtrs.count == bucketImpl.elemToPath.size());
});
addElementToTrackers(element, bucketImpl);
return element;
}
// CPU and GPU valid bits are per SoA array, not per-prim per-attribute.
// Suppose we have a prim with attr whose GPU mirror is not valid, and we want
// to add it to a bucket that has a valid GPU mirror of that attribute. What
// should we set the bucket array's gpuValid to after the add?
//
// Option 1: set bucket's gpuValid to false.
// If cpuValid were true for the bucket, then this would be inefficient but
// correct. But, if cpuValid were false, then we'd have to copy all the bucket's
// data from GPU to CPU to avoid invalidating the only valid copy of the data.
// That would be very inefficient for a bucket with a lot of prims, or an
// array of array-valued attributes.
//
// Option 2: set bucket's gpuValid to true.
// For the bucket plus our new element to be gpuValid, we need to make the new
// element gpuValid by copying it from CPU to GPU.
//
// We've chosen Option 2 as it is the most efficient and makeSrcValidIfDestValid
// implements it.
// The explanation above was for GPU mirrors, but it applies equally to CPU.
//
// We are changing the srcArray mirrors to match destArray, so
// counterintuitively destArray is const and srcArray is not.
inline void PathToAttributesMap::makeSrcValidIfDestValid(MirroredArray& srcArray,
BucketImpl& srcBucketImpl,
const ArrayOfArrayInfo& srcAoaInfo,
const MirroredArray& destArray,
const AttrName& name)
{
bool srcCpuValid = srcArray.cpuValid;
bool srcGpuValid = srcArray.gpuValid;
bool destCpuValid = destArray.cpuValid;
bool destGpuValid = destArray.gpuValid;
if (srcCpuValid && !srcGpuValid && destGpuValid)
{
// Possible states:
// srcCpu srcGpu destCpu destGpu
// 1 0 0 1
// 1 0 1 1
// With a valid CPU source, this will copy data to the GPU to make it valid
// We don't set dirty indices here because this method gives read-only access
getArraySpanC(srcArray, name, srcAoaInfo, srcBucketImpl, CudaReadConfig());
// srcCpu srcGpu destCpu destGpu
// 1 1 0 1
// 1 1 1 1
}
else if (!srcCpuValid && !srcGpuValid && !destCpuValid && destGpuValid)
{
// srcCpu srcGpu destCpu destGpu
// 0 0 0 1
// Without a valid CPU source, just allocate memory so it can be "valid" even if not initialized
getArraySpanC(srcArray, name, srcAoaInfo, srcBucketImpl, CudaWriteConfig());
// srcCpu srcGpu destCpu destGpu
// 0 1 0 1
}
else if (!srcCpuValid && srcGpuValid && destCpuValid)
{
// srcCpu srcGpu destCpu destGpu
// 0 1 1 0
// 0 1 1 1
// With a valid GPU source, this will copy data back to the CPU to make it valid
// We don't set dirty indices here because this method gives read-only access
getArraySpanC(srcArray, name, srcAoaInfo, srcBucketImpl, CpuReadConfig());
// srcCpu srcGpu destCpu destGpu
// 1 1 1 0
// 1 1 1 1
}
else if (!srcCpuValid && !srcGpuValid && destCpuValid && !destGpuValid)
{
// srcCpu srcGpu destCpu destGpu
// 0 0 1 0
// Without a valid GPU source, just allocate memory so it can be "valid" even if not initialized
getArraySpanC(srcArray, name, srcAoaInfo, srcBucketImpl, CpuWriteConfig());
// srcCpu srcGpu destCpu destGpu
// 1 0 1 0
}
else if (!srcCpuValid && !srcGpuValid && destCpuValid && destGpuValid)
{
// srcCpu srcGpu destCpu destGpu
// 0 0 1 1
// Without a valid GPU source, just allocate memory so it can be "valid" even if not initialized
getArraySpanC(srcArray, name, srcAoaInfo, srcBucketImpl, CudaWriteConfig());
// srcCpu srcGpu destCpu destGpu
// 0 1 1 1
// This one clears gpuValid, because we assume that the user is going to write to it
// But, we're not passing the allocated pointer to the user so...
getArraySpanC(srcArray, name, srcAoaInfo, srcBucketImpl, CpuWriteConfig());
// srcCpu srcGpu destCpu destGpu
// 1 0 1 1
// ..we can safely set gpuValid to true
srcArray.gpuValid = true;
// srcCpu srcGpu destCpu destGpu
// 1 1 1 1
}
}
inline void PathToAttributesMap::allocElementForMove(BucketImpl& srcBucketImpl, const ArrayOfArrayInfo &srcAoaInfo, const AttrName& name, MirroredArray &destArray, MirroredArray *const srcArray)
{
bool srcGpuAlloced = false;
if (srcArray)
{
makeSrcValidIfDestValid(*srcArray, srcBucketImpl, srcAoaInfo, destArray, name);
srcGpuAlloced = (srcArray->gpuCapacity != 0);
}
if (srcArray)
{
if (destArray.type != srcArray->type)
{
if (destArray.typeinfo.size != srcArray->typeinfo.size)
{
CARB_LOG_ERROR_ONCE("PathToAttributesMap (%p) contains attributes with duplicate name \"%s\" with different types and different per-element sizes (%zu vs %zu). Data will almost certainly become corrupted during request to move elements between buckets!", this, toTfToken(name.name).GetString().c_str(), destArray.typeinfo.size, srcArray->typeinfo.size);
}
else
{
CARB_LOG_WARN_ONCE("PathToAttributesMap (%p) contains attributes with duplicate name \"%s\" with different types but same per-element size. Data may become corrupted during request to move elements between buckets!", this, toTfToken(name.name).GetString().c_str());
}
}
}
const size_t allocSize = destArray.typeinfo.size;
const size_t newSize = destArray.size() + allocSize;
destArray.resize(newSize);
const bool destGpuAlloced = (destArray.gpuCapacity != 0);
if (srcGpuAlloced || destGpuAlloced)
{
destArray.resizeGpu(platform.gpuCuda, platform.gpuCudaCtx, destArray.size(), destArray.typeinfo.size);
}
destArray.count++;
}
// When moving elements between buckets we want to only allocate GPU storage if
// the source had a valid GPU mirror.
inline size_t PathToAttributesMap::allocElementForMove(BucketImpl& destBucketImpl,
BucketImpl& srcBucketImpl,
const PathC& path)
{
const size_t element = destBucketImpl.elemToPath.size();
destBucketImpl.elemToPath.emplace_back(); // Allocate an empty path, it gets set later
// Only allocate dest GPU mirror if src has GPU mirror
destBucketImpl.scalarAttributeArrays.forEach([this, &srcBucketImpl](const AttrName& name, ScalarAttributeArray &array) {
ScalarAttributeArray *srcArray = srcBucketImpl.scalarAttributeArrays.find(name, &srcArray) ? srcArray : nullptr;
const ArrayOfArrayInfo aoa = ScalarArrayOfArrayInfo();
allocElementForMove(srcBucketImpl, aoa, name, array, srcArray);
});
destBucketImpl.arrayAttributeArrays.forEach([this, &srcBucketImpl](const AttrName& name, ArrayAttributeArray &array) {
ArrayAttributeArray *srcArray = srcBucketImpl.arrayAttributeArrays.find(name, &srcArray) ? srcArray : nullptr;
static_assert(sizeof(ArrayAttributeArray) == 5 * sizeof(MirroredArray), "ArrayAttributeArray has unexpected size");
const ArrayOfArrayInfo aoa = srcArray ? getArrayOfArrayInfo(*srcArray) : ScalarArrayOfArrayInfo();
allocElementForMove(srcBucketImpl, aoa, name, array.values, srcArray ? &srcArray->values : nullptr);
allocElementForMove(srcBucketImpl, aoa, name, array.elemCounts, srcArray ? &srcArray->elemCounts : nullptr);
allocElementForMove(srcBucketImpl, aoa, name, array.cpuElemCounts, srcArray ? &srcArray->cpuElemCounts : nullptr);
allocElementForMove(srcBucketImpl, aoa, name, array.gpuElemCounts, srcArray ? &srcArray->gpuElemCounts : nullptr);
allocElementForMove(srcBucketImpl, aoa, name, array.gpuPtrs, srcArray ? &srcArray->gpuPtrs : nullptr);
// For array-valued attributes, initialize CPU and GPU element counts
static_assert(sizeof(ArrayAttributeArray) == 5 * sizeof(MirroredArray), "ArrayAttributeArray has unexpected size");
reinterpret_cast<size_t*>(array.elemCounts.cpuData())[array.elemCounts.count - 1] = 0;
array.elemCounts.cpuValid = true;
reinterpret_cast<size_t*>(array.cpuElemCounts.cpuData())[array.cpuElemCounts.count - 1] = 0;
array.cpuElemCounts.cpuValid = true;
reinterpret_cast<size_t*>(array.gpuElemCounts.cpuData())[array.gpuElemCounts.count - 1] = 0;
array.gpuElemCounts.cpuValid = true;
});
addElementToTrackers(element, destBucketImpl);
return element;
}
// Array resize that does not preserve previous data
inline void PathToAttributesMap::destructiveResizeIfNecessary(uint8_t*& cpuData,
size_t& capacity,
size_t desiredElemCount,
size_t elemByteCount)
{
// Resize iff (capacity < desiredElemCount)
if (capacity != desiredElemCount)
{
size_t byteCount = desiredElemCount * elemByteCount;
if (!USE_PINNED_MEMORY || !platform.gpuCuda)
{
free(cpuData);
cpuData = reinterpret_cast<uint8_t*>(malloc(byteCount));
}
else if (platform.gpuCuda)
{
// Use page-locked memory CPU for CUDA
platform.gpuCuda->freeHost(*platform.gpuCudaCtx, cpuData);
platform.gpuCuda->hostAlloc(*platform.gpuCudaCtx, (void**)&cpuData, byteCount);
}
capacity = desiredElemCount;
}
}
// Flatcache only stores POD types, with the following exceptions:
// eToken (pxr::TfToken)
// eAsset (std::array<pxr::TfToken, 2>)
//
// The following code constructs an array of objects of one of these types,
// filling memory[newCpuData + oldByteCount .. newCpuData + newByteCount)
// It is called when enlarging arrays of such types
template <typename T>
void constructInPlace(uint8_t* newCpuData, size_t oldByteCount, size_t newByteCount)
{
T* begin = reinterpret_cast<T*>(newCpuData + oldByteCount);
T* end = reinterpret_cast<T*>(newCpuData + newByteCount);
for (T* current = begin; current != end; current++)
{
new (current) T;
}
}
// We plan to move TfToken and AssetPath construction to IToken.
// Until we do we have to declare this here and depend on USD headers.
struct AssetPath
{
pxr::TfToken assetPath;
pxr::TfToken resolvedPath;
};
inline bool PathToAttributesMap::__validateArrayInvariants() const
{
bool encounteredFailure = false;
// loop over the buckets
BucketId id{ 0 };
for (unsigned int i = 0; i < this->buckets.end(); ++i, ++id)
{
const auto bucketImplPtr = buckets.find(id);
if (!bucketImplPtr)
continue;
const auto& bucketImpl = *bucketImplPtr;
if (bucketImpl.elemToPath.size() == 0)
continue;
//loop over all the arrays
bucketImpl.arrayAttributeArrays.forEach([&encounteredFailure](const AttrName& name, const ArrayAttributeArray& local_array) {
const Typeinfo& typeInfo = local_array.values.typeinfo;
const size_t elemSize = typeInfo.size;
// only care about actual data
if (name.suffix != NameSuffix::none)
return;
// look up array info
const MirroredArray* arraySizeArray = &local_array.elemCounts;
const MirroredArray* arrayCpuCapacityArray = &local_array.cpuElemCounts;
// skip tags and not arrays
if (elemSize != 0)
{
//number of elements
const size_t elemCount = local_array.values.count;
// pointers to data
const uint8_t* const* elemToArrayCpuData = reinterpret_cast<const uint8_t* const*>(local_array.values.cpuData());
for (size_t elem = 0; elem != elemCount; elem++)
{
// get the actual pointer
const uint8_t* cpuData = elemToArrayCpuData[elem];
// look up the cpu capacity
const size_t& cpuCapacity = reinterpret_cast<const size_t*>(arraySizeArray->cpuData())[elem];
const size_t& desiredElemCount = reinterpret_cast<const size_t*>(arrayCpuCapacityArray->cpuData())[elem];
if (cpuCapacity == desiredElemCount)
{
// we should have valid data
if (cpuCapacity != 0 && !cpuData) {
std::cout << "Invalid array name = " << toTfToken(name.name).GetString() << std::endl;
encounteredFailure = true;
}
}
}
}
});
}
return encounteredFailure;
}
// Array resize that preserves previous data
inline void PathToAttributesMap::resizeIfNecessary(
uint8_t*& cpuData, size_t& capacity, size_t desiredElemCount, size_t elemByteCount, TypeC typeC)
{
// TODO: reduce number of reallocations by allocating capacity larger than size
// and not always reallocating when desiredElemCount<capacity
if (capacity < desiredElemCount)
{
size_t oldByteCount = capacity * elemByteCount;
size_t newByteCount = desiredElemCount * elemByteCount;
uint8_t* newCpuData = nullptr;
if (!USE_PINNED_MEMORY || !platform.gpuCuda)
{
newCpuData = reinterpret_cast<uint8_t*>(malloc(newByteCount));
}
else if (platform.gpuCuda)
{
platform.gpuCuda->hostAlloc(*platform.gpuCudaCtx, reinterpret_cast<void**>(&newCpuData), newByteCount);
}
if (cpuData)
{
size_t copyByteCount = std::min(oldByteCount, newByteCount);
memcpy(newCpuData, cpuData, copyByteCount);
if (!USE_PINNED_MEMORY || !platform.gpuCuda)
{
free(cpuData);
}
else if (platform.gpuCuda)
{
platform.gpuCuda->freeHost(*platform.gpuCudaCtx, cpuData);
}
}
// If type has a constructor, construct any new elements
if (oldByteCount < newByteCount)
{
Type type(typeC);
const uint8_t kScalar = 1;
const uint8_t kArray = 1;
if (type == Type(BaseDataType::eToken, kScalar, kArray))
{
constructInPlace<pxr::TfToken>(newCpuData, oldByteCount, newByteCount);
}
else if (type == Type(BaseDataType::eAsset, kScalar, kArray))
{
constructInPlace<flatcache::AssetPath>(newCpuData, oldByteCount, newByteCount);
}
else if (type == Type(BaseDataType::eConnection, kScalar, kArray))
{
constructInPlace<flatcache::Connection>(newCpuData, oldByteCount, newByteCount);
}
}
cpuData = newCpuData;
capacity = desiredElemCount;
}
}
inline void PathToAttributesMap::enableCpuReadImpl(PathToAttributesMap::MirroredArray& array,
const size_t* elemToArraySize,
PathToAttributesMap::MirroredArray* elemToArrayCpuCapacity,
PathToAttributesMap::MirroredArray* elemToArrayGpuCapacity,
PathToAttributesMap::MirroredArray* gpuArrayDataArray,
bool printWarnings)
{
using omni::gpucompute::MemcpyKind;
bool& usdValid = array.usdValid;
bool& cpuValid = array.cpuValid;
bool& gpuValid = array.gpuValid;
bool& usingCuda = array.gpuAllocedWithCuda;
uint8_t* cpuArray = array.cpuData();
uint8_t*& gpuArray = array.gpuArray;
// If CPU copy is valid, nothing to do
// If GPU copy is valid, copy to CPU
// If USD copy is valid, copy to CPU
if (cpuValid)
{
// Nothing to do
}
else if (!cpuValid && gpuValid)
{
size_t byteCount = array.size();
// Select which API to use
omni::gpucompute::GpuCompute* computeAPI = nullptr;
omni::gpucompute::Context* computeCtx = nullptr;
if (usingCuda)
{
computeAPI = platform.gpuCuda;
computeCtx = platform.gpuCudaCtx;
}
else if (!usingCuda)
{
computeAPI = platform.gpuD3dVk;
computeCtx = platform.gpuD3dVkCtx;
}
const Typeinfo &typeinfo = array.typeinfo;
std::lock_guard<MirroredArray::AttributeMutex> hostDeviceLock(array.attributeMutex);
if (typeinfo.isArray)
{
size_t elemCount = array.count;
uint8_t** elemToArrayCpuData = reinterpret_cast<uint8_t**>(cpuArray);
uint8_t** elemToArrayGpuData = reinterpret_cast<uint8_t**>(gpuArrayDataArray->cpuData());
for (size_t elem = 0; elem != elemCount; elem++)
{
// Make sure that the dest (CPU) buffer is large enough
uint8_t*& cpuData = elemToArrayCpuData[elem]; // dest
const uint8_t* const& gpuData = elemToArrayGpuData[elem]; // src
size_t& destCapacity = reinterpret_cast<size_t*>(elemToArrayCpuCapacity->cpuData())[elem];
size_t desiredElemCount = elemToArraySize[elem];
destructiveResizeIfNecessary(cpuData, destCapacity, desiredElemCount, typeinfo.arrayElemSize);
// Copy from GPU to CPU
size_t copyByteCount = desiredElemCount * typeinfo.arrayElemSize;
if(gpuData)
computeAPI->memcpy(*computeCtx, cpuData, gpuData, copyByteCount, MemcpyKind::deviceToHost);
}
// Don't copy the outer array to CPU, because GPU is not allowed to change outer array
}
else
{
log("array values: from GPU\n");
computeAPI->memcpy(*computeCtx, cpuArray, gpuArray, byteCount, MemcpyKind::deviceToHost);
}
cpuValid = true;
}
else if (!cpuValid && usdValid)
{
// printf("TODO: read data lazily from USD\n");
}
else
{
if (printWarnings)
CARB_LOG_WARN("No source has valid data array=%p usdValid=%i cpuValid=%i gpuValid=%i gpuAllocedWithCuda=%i", &array, array.usdValid, array.cpuValid, array.gpuValid, array.gpuAllocedWithCuda);
}
}
inline void PathToAttributesMap::enableCpuReadIfValid(PathToAttributesMap::MirroredArray& array,
const size_t* elemToArraySize,
PathToAttributesMap::MirroredArray* elemToArrayCpuCapacity,
PathToAttributesMap::MirroredArray* elemToArrayGpuCapacity,
PathToAttributesMap::MirroredArray* gpuArrayDataArray)
{
enableCpuReadImpl(array, elemToArraySize, elemToArrayCpuCapacity, elemToArrayGpuCapacity, gpuArrayDataArray, false);
}
inline void PathToAttributesMap::enableCpuRead(PathToAttributesMap::MirroredArray& array,
const size_t* elemToArraySize,
PathToAttributesMap::MirroredArray* elemToArrayCpuCapacity,
PathToAttributesMap::MirroredArray* elemToArrayGpuCapacity,
PathToAttributesMap::MirroredArray* gpuArrayDataArray)
{
enableCpuReadImpl(array, elemToArraySize, elemToArrayCpuCapacity, elemToArrayGpuCapacity, gpuArrayDataArray, true);
}
inline void PathToAttributesMap::enableCpuWrite(PathToAttributesMap::MirroredArray& array,
const size_t* elemToArraySize,
PathToAttributesMap::MirroredArray* elemToArrayCpuCapacity,
PathToAttributesMap::MirroredArray* elemToArrayGpuCapacity,
PathToAttributesMap::MirroredArray* elemToArrayGpuData)
{
using omni::gpucompute::MemcpyKind;
bool& usdValid = array.usdValid;
bool& cpuValid = array.cpuValid;
bool& gpuValid = array.gpuValid;
const Typeinfo &typeinfo = array.typeinfo;
// Array-valued elements are lazily allocated, meaning they are only
// resized when write access is requested.
// Write access has been requested, so resize if necessary
std::lock_guard<MirroredArray::AttributeMutex> hostDeviceLock(array.attributeMutex);
if (typeinfo.isArray)
{
size_t elemCount = array.count;
uint8_t** elemToArrayCpuData = reinterpret_cast<uint8_t**>(array.cpuData());
CARB_ASSERT(elemToArrayCpuCapacity->cpuValid);
for (size_t elem = 0; elem != elemCount; elem++)
{
uint8_t*& cpuData = elemToArrayCpuData[elem];
size_t& cpuCapacity = reinterpret_cast<size_t*>(elemToArrayCpuCapacity->cpuData())[elem];
size_t desiredElemCount = elemToArraySize[elem];
resizeIfNecessary(cpuData, cpuCapacity, desiredElemCount, typeinfo.arrayElemSize, array.type);
}
}
// New state
usdValid = false;
cpuValid = true;
gpuValid = false;
}
inline ArrayAndDirtyIndices PathToAttributesMap::getArraySpanC(MirroredArray& array,
const AttrName& name,
const ArrayOfArrayInfo& aoa,
BucketImpl& bucketImpl,
const IOConfig& io)
{
const size_t elemCount = bucketImpl.elemToPath.size();
const Typeinfo& typeinfo = array.typeinfo;
const size_t elemSize = typeinfo.size;
log("begin getArrayC\n");
bool isTag = (elemSize == 0);
if (isTag)
{
// If is a tag, then array.data() will be zero, so set special value
// to distinguish from tag absent case
return { SpanC{ (uint8_t*)-1, elemCount, 0 }, {} };
}
// Read enable must come before write enable
if (io.enableRead)
{
(this->*io.enableRead)(array, aoa.arraySizeArray, aoa.arrayCpuCapacityArray, aoa.arrayGpuCapacityArray, aoa.arrayGpuPtrArray);
// If requesting GPU access to array-of-array, additionally
// enable array of GPU pointers for GPU read
if (typeinfo.isArray && io.device == Device::eCudaGPU)
{
(this->*io.enableRead)(*aoa.arrayGpuPtrArray, nullptr, nullptr, nullptr, nullptr);
}
}
if (io.enableWrite)
{
(this->*io.enableWrite)(array, aoa.arraySizeArray, aoa.arrayCpuCapacityArray, aoa.arrayGpuCapacityArray, aoa.arrayGpuPtrArray);
// If requesting GPU access to array-of-array, additionally
// enable array of GPU pointers for GPU _read_
// This is necessary because the pointers may have been
// reallocated on CPU, and the GPU needs to _read_ these new
// pointers
if (typeinfo.isArray && io.device == Device::eCudaGPU)
{
(this->*io.enableRdPtrForWrite)(*aoa.arrayGpuPtrArray, nullptr, nullptr, nullptr, nullptr);
}
}
// If CPU pointer requested
// return CPU pointer
// If GPU pointer requested and not array of array
// return GPU pointer
// If GPU pointer requested and array of array
// return GPU pointer to GPU pointer array
uint8_t* retPtr = nullptr;
if (io.device == Device::eCPU)
{
retPtr = array.cpuData();
}
else if (io.device == Device::eCudaGPU && !typeinfo.isArray)
{
retPtr = array.gpuArray;
}
else if (io.device == Device::eCudaGPU && typeinfo.isArray && io.ptrToPtrKind == PtrToPtrKind::eGpuPtrToGpuPtr)
{
retPtr = aoa.arrayGpuPtrArray->gpuArray;
}
else if (io.device == Device::eCudaGPU && typeinfo.isArray && io.ptrToPtrKind == PtrToPtrKind::eCpuPtrToGpuPtr)
{
retPtr = aoa.arrayGpuPtrArray->cpuData();
}
else if (io.device == Device::eD3dVkGPU && !typeinfo.isArray)
{
retPtr = array.gpuArray;
}
else if (io.device == Device::eD3dVkGPU && typeinfo.isArray)
{
retPtr = aoa.arrayGpuPtrArray->cpuData();
}
// If enabling write,
// for each enabled listener listening to this attribute
// if changedIndices exists
// add to vector
// else
// create changedIndices and add to vector
// return vector
// else
// return empty vector
std::vector<ChangedIndicesImpl*> changedIndicesForEachListener;
changedIndicesForEachListener.reserve(listenerIdToChangeTrackerConfig.size());
if (io.enableWrite)
{
// optimization because the cost to create attrNameAndType is non-trivial,
// but they are loop invariant so we should try to only do it once.
bool costlyInvariantsInitialized = false;
AttrNameAndType *const attrNameAndType = (AttrNameAndType*)alloca(sizeof(AttrNameAndType)); // stack-allocate here for scope, but lazily-initialize below
listenerIdToChangeTrackerConfig.forEach([this, &bucketImpl, &costlyInvariantsInitialized, &name, &attrNameAndType, &array, &elemCount, &changedIndicesForEachListener](ListenerId& listenerId, ChangeTrackerConfig& config) {
// Create listener if it doesn't exist in bucket
Changes* changes;
if (bucketImpl.listenerIdToChanges.allocateEntry(listenerId, &changes))
{
new (changes) Changes();
}
if (config.changeTrackingEnabled && config.attrNamesToLog.contains(name.name))
{
if (!costlyInvariantsInitialized)
{
new (attrNameAndType) AttrNameAndType(Type(array.type), name.name, name.suffix);
costlyInvariantsInitialized = true;
}
auto iter = changes->changedAttributes.find(*attrNameAndType);
bool foundChangedIndices = (iter != changes->changedAttributes.end());
if (!foundChangedIndices)
{
// TODO: move this into a new ordered_map class
auto& keys = changes->changedAttributes.v;
auto& values = changes->changedIndices;
auto insertIter = lower_bound(keys.begin(), keys.end(), *attrNameAndType);
ptrdiff_t insertIndex = insertIter - keys.begin();
keys.insert(insertIter, *attrNameAndType);
values.insert(values.begin() + insertIndex, ChangedIndicesImpl(elemCount));
changedIndicesForEachListener.push_back(&values[insertIndex]);
}
else
{
ptrdiff_t attrIndex = iter - changes->changedAttributes.begin();
changedIndicesForEachListener.push_back(&changes->changedIndices[attrIndex]);
}
}
});
}
return { SpanC{ retPtr, elemCount, typeinfo.size }, changedIndicesForEachListener };
}
inline ArrayAndDirtyIndices PathToAttributesMap::getArraySpanC(BucketId bucketId,
TokenC attrName,
const IOConfig &io,
NameSuffix suffix)
{
BucketImpl *const bucketImpl = buckets.find(bucketId);
if (!bucketImpl)
{
return { SpanC{ nullptr, 0, 0 }, {} };
}
const AttrName name{ attrName, suffix };
{
ScalarAttributeArray *array;
if (bucketImpl->scalarAttributeArrays.find(name, &array))
{
const ArrayOfArrayInfo aoa = ScalarArrayOfArrayInfo();
return getArraySpanC(*array, name, aoa, *bucketImpl, io);
}
}
{
ArrayAttributeArray *array;
if (bucketImpl->arrayAttributeArrays.find(name, &array))
{
const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*array);
return getArraySpanC(array->values, name, aoa, *bucketImpl, io);
}
}
return { SpanC{ nullptr, 0, 0 }, {} };
}
template <typename T>
inline const T* PathToAttributesMap::getArrayRd(const Bucket& bucket, const TokenC& attrName)
{
APILOGGER("getArrayRd", apiLogEnabled, attrName);
// TODO: check that T is the correct type
return reinterpret_cast<const T*>(getArrayRdC(bucket, attrName, NameSuffix::none));
}
template <typename T>
inline const T* PathToAttributesMap::getArrayRd(BucketId bucketId, const TokenC& attrName)
{
APILOGGER("getArrayRd", apiLogEnabled, attrName);
// TODO: check that T is the correct type
return reinterpret_cast<const T*>(getArrayRdC(bucketId, attrName, NameSuffix::none).ptr);
}
template <typename T>
inline T* PathToAttributesMap::getArrayWr(const Bucket& bucket, const TokenC& attrName)
{
APILOGGER("getArrayWr", apiLogEnabled, attrName);
// TODO: check that T is the correct type
return reinterpret_cast<T*>(getArrayWrC(bucket, attrName, NameSuffix::none));
}
template <typename T>
inline T* PathToAttributesMap::getArray(const Bucket& bucket, const TokenC& attrName)
{
APILOGGER("getArray", apiLogEnabled, attrName);
// TODO: check that T is the correct type
return reinterpret_cast<T*>(getArrayC(bucket, attrName, NameSuffix::none));
}
template <typename T>
inline std::vector<const T*> PathToAttributesMap::getArraysRd(const std::vector<Bucket>& buckets, const TokenC& attrName)
{
size_t bucketCount = buckets.size();
std::vector<const T*> retval(bucketCount);
for (size_t i = 0; i != bucketCount; i++)
{
retval[i] = getArrayRd<T>(buckets[i], attrName);
}
return retval;
}
template <typename T>
inline std::vector<T*> PathToAttributesMap::getArraysWr(const std::vector<Bucket>& buckets, const TokenC& attrName)
{
size_t bucketCount = buckets.size();
std::vector<const T*> retval(bucketCount);
for (size_t i = 0; i != bucketCount; i++)
{
retval[i] = getArrayWr<T>(buckets[i], attrName);
}
return retval;
}
template <typename T>
inline std::vector<T*> PathToAttributesMap::getArrays(const std::vector<Bucket>& buckets, const TokenC& attrName)
{
size_t bucketCount = buckets.size();
std::vector<T*> retval(bucketCount);
for (size_t i = 0; i != bucketCount; i++)
{
retval[i] = getArray<T>(buckets[i], attrName);
}
return retval;
}
inline BucketId PathToAttributesMap::findBucketId(const Bucket& bucket)
{
auto iter = attrNameSetToBucketId.find(bucket);
bool found = iter != attrNameSetToBucketId.end();
if (!found)
return { kInvalidBucketId };
return iter->second;
}
inline ConstSpanC PathToAttributesMap::getArraySpanRdC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix)
{
APILOGGER("getArraySpanRdC", apiLogEnabled, attrName);
// Get read-only CPU access
BucketId bucketId = findBucketId(bucket);
if (bucketId == kInvalidBucketId)
return { nullptr, 0, 0 };
const ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(bucketId, attrName, CpuReadConfig(), suffix);
// We don't set dirty indices here because this method gives read-only access
return arrayAndDirtyIndices.array;
}
inline const void* PathToAttributesMap::getArrayRdC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix)
{
APILOGGER("getArrayRdC", apiLogEnabled, attrName);
// Get read-only CPU access
BucketId bucketId = findBucketId(bucket);
if (bucketId == kInvalidBucketId)
return nullptr;
const ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(bucketId, attrName, CpuReadConfig(), suffix);
// We don't set dirty indices here because this method gives read-only access
return arrayAndDirtyIndices.array.ptr;
}
inline ConstSpanC PathToAttributesMap::getArrayRdC(BucketId bucketId, const TokenC& attrName, NameSuffix suffix)
{
APILOGGER("getArrayRdC", apiLogEnabled, attrName);
// Get read-only CPU access
const ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(bucketId, attrName, CpuReadConfig(), suffix);
// We don't set dirty indices here because this method gives read-only access
return arrayAndDirtyIndices.array;
}
inline SpanC PathToAttributesMap::getArraySpanWrC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix)
{
APILOGGER("getArraySpanWrC", apiLogEnabled, attrName);
BucketId bucketId = findBucketId(bucket);
if (bucketId == kInvalidBucketId)
return { nullptr, 0, 0 };
ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(bucketId, attrName, CpuWriteConfig(), suffix);
setArrayDirty(arrayAndDirtyIndices);
return arrayAndDirtyIndices.array;
}
inline void* PathToAttributesMap::getArrayWrC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix)
{
APILOGGER("getArrayWrC", apiLogEnabled, attrName);
BucketId bucketId = findBucketId(bucket);
if (bucketId == kInvalidBucketId)
return nullptr;
ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(bucketId, attrName, CpuWriteConfig(), suffix);
setArrayDirty(arrayAndDirtyIndices);
return arrayAndDirtyIndices.array.ptr;
}
inline SpanC PathToAttributesMap::getArrayWrC(BucketId bucketId, const TokenC& attrName, NameSuffix suffix)
{
APILOGGER("getArrayWrC", apiLogEnabled, attrName);
ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(bucketId, attrName, CpuWriteConfig(), suffix);
setArrayDirty(arrayAndDirtyIndices);
return arrayAndDirtyIndices.array;
}
inline SpanC PathToAttributesMap::getOrCreateArrayWrC(
BucketId bucketId, const TokenC& attrName, TypeC type, NameSuffix suffix)
{
APILOGGER("getOrCreateArrayWrC", apiLogEnabled, attrName);
auto bucketImpl = buckets.find(bucketId);
if (!bucketImpl)
{
return SpanC{ nullptr, 0, 0 };
}
const AttrName name{ attrName, suffix };
ArrayOfArrayInfo aoa;
MirroredArray* array = nullptr;
const Typeinfo& typeinfo = getTypeInfo(type);
if (typeinfo.isArray)
{
ArrayAttributeArray *arrayAttributeArray;
if (!bucketImpl->arrayAttributeArrays.find(name, &arrayAttributeArray))
{
Bucket bucket = getNamesAndTypes(bucketId);
bucketImpl = &addAttributeC(*bucketImpl, bucket, attrName, type);
const bool found = bucketImpl->arrayAttributeArrays.find({ attrName, NameSuffix::none }, &arrayAttributeArray);
CARB_ASSERT(found);
CARB_UNUSED(found);
array = &arrayAttributeArray->values;
aoa = getArrayOfArrayInfo(*arrayAttributeArray);
}
else
{
array = &arrayAttributeArray->values;
aoa = getArrayOfArrayInfo(*arrayAttributeArray);
}
}
else
{
ScalarAttributeArray *scalarAttributeArray;
if (!bucketImpl->scalarAttributeArrays.find(name, &scalarAttributeArray))
{
Bucket bucket = getNamesAndTypes(bucketId);
bucketImpl = &addAttributeC(*bucketImpl, bucket, attrName, type);
const bool found = bucketImpl->scalarAttributeArrays.find({ attrName, NameSuffix::none }, &scalarAttributeArray);
CARB_ASSERT(found);
CARB_UNUSED(found);
array = scalarAttributeArray;
aoa = ScalarArrayOfArrayInfo();
}
else
{
array = scalarAttributeArray;
aoa = ScalarArrayOfArrayInfo();
}
}
CARB_ASSERT(type == array->type);
ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(*array, name, aoa, *bucketImpl, CpuWriteConfig());
setArrayDirty(arrayAndDirtyIndices);
return arrayAndDirtyIndices.array;
}
inline SpanC PathToAttributesMap::getArraySpanC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix)
{
APILOGGER("getArraySpanC", apiLogEnabled, attrName);
BucketId bucketId = findBucketId(bucket);
if (bucketId == kInvalidBucketId)
return { nullptr, 0, 0 };
ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(bucketId, attrName, CpuReadWriteConfig(), suffix);
setArrayDirty(arrayAndDirtyIndices);
return arrayAndDirtyIndices.array;
}
inline void* PathToAttributesMap::getArrayC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix)
{
APILOGGER("getArrayC", apiLogEnabled, attrName);
BucketId bucketId = findBucketId(bucket);
if (bucketId == kInvalidBucketId)
return nullptr;
ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(bucketId, attrName, CpuReadWriteConfig(), suffix);
setArrayDirty(arrayAndDirtyIndices);
return arrayAndDirtyIndices.array.ptr;
}
inline void PathToAttributesMap::setArrayDirty(ArrayAndDirtyIndices& array)
{
for (ChangedIndicesImpl* listener : array.changedIndicesForEachListener)
{
listener->dirtyAll();
}
}
inline void PathToAttributesMap::setArrayElementDirty(ArrayAndDirtyIndices& array, size_t elemIndex)
{
for (ChangedIndicesImpl* listener : array.changedIndicesForEachListener)
{
listener->insert(elemIndex, array.array.elementCount);
}
}
inline SpanC PathToAttributesMap::getArrayC(BucketId bucketId, const TokenC& attrName, NameSuffix suffix)
{
APILOGGER("getArrayC", apiLogEnabled, attrName);
ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(bucketId, attrName, CpuReadWriteConfig(), suffix);
setArrayDirty(arrayAndDirtyIndices);
return arrayAndDirtyIndices.array;
}
inline const PathC* PathToAttributesMap::getPathArray(const Bucket& bucket) const
{
auto iter = attrNameSetToBucketId.find(bucket);
bool found = (iter != attrNameSetToBucketId.end());
if (found)
{
BucketId bucketId = iter->second;
const auto implPtr = buckets.find(bucketId);
if (implPtr)
{
return reinterpret_cast<const PathC*>(implPtr->elemToPath.data());
}
else
{
CARB_LOG_WARN_ONCE("Found in attrNameSetToBucketId, but didn't find BucketId %zu in buckets\n", size_t(bucketId));
}
}
CARB_LOG_ERROR("getPathArray: Bucket not found");
printBucket(bucket);
std::cout << "\n";
CARB_LOG_INFO("Bucket list:");
printBucketNamesAndTypes();
return nullptr;
}
inline ConstPathCSpan PathToAttributesMap::getPathArray(BucketId bucketId) const
{
const auto implPtr = buckets.find(bucketId);
if (implPtr)
{
const BucketImpl& bucketImpl = *implPtr;
return { reinterpret_cast<const Path*>(bucketImpl.elemToPath.data()), bucketImpl.elemToPath.size() };
}
else
{
CARB_LOG_WARN_ONCE("Found in attrNameSetToBucketId, but didn't find BucketId %zu in buckets\n", size_t(bucketId));
}
return { nullptr, 0 };
}
inline Bucket PathToAttributesMap::getNamesAndTypes(BucketId bucketId) const
{
auto implPtr = buckets.find(bucketId);
if (implPtr)
{
const BucketImpl& bucketImpl = *implPtr;
size_t maxCount = bucketImpl.scalarAttributeArrays.size();
set<flatcache::AttrNameAndType> bucket;
bucket.reserve(maxCount);
bucketImpl.scalarAttributeArrays.forEach([&bucket](const AttrName& name, const ScalarAttributeArray& array) {
const TypeC& type = array.type;
if (name.suffix == NameSuffix::none || name.suffix == NameSuffix::connection)
{
AttrNameAndType attrNameAndType;
attrNameAndType.type = carb::flatcache::Type(type);
attrNameAndType.name = name.name;
attrNameAndType.suffix = name.suffix;
bucket.insert(attrNameAndType);
}
});
bucketImpl.arrayAttributeArrays.forEach([&bucket](const AttrName& name, const ArrayAttributeArray& array) {
const TypeC& type = array.values.type;
if (name.suffix == NameSuffix::none || name.suffix == NameSuffix::connection)
{
AttrNameAndType attrNameAndType;
attrNameAndType.type = Type(type);
attrNameAndType.name = name.name;
attrNameAndType.suffix = name.suffix;
bucket.insert(attrNameAndType);
}
});
return bucket;
}
else
{
CARB_LOG_ERROR_ONCE("getNamesAndTypes, bucketId %zu not found\n", size_t(bucketId));
return set<AttrNameAndType>();
}
}
inline void PathToAttributesMap::checkInvariants()
{
for (auto& bucketAndId : attrNameSetToBucketId)
{
const Bucket& correctBucket = bucketAndId.first;
BucketId bucketId = bucketAndId.second;
Bucket candidateBucket = getNamesAndTypes(bucketId);
if (candidateBucket.size() != correctBucket.size())
{
CARB_BREAK_POINT();
}
for (size_t i = 0; i != candidateBucket.size(); i++)
{
const AttrNameAndType& candidateNameAndType = candidateBucket.v[i];
const AttrNameAndType& correctNameAndType = correctBucket.v[i];
if (!(candidateNameAndType == correctNameAndType))
{
std::stringstream ss;
ss << "Candidate: " << Type(candidateNameAndType.type) << " "
<< Token(candidateNameAndType.name).getText() << toString(candidateNameAndType.suffix) << " "
<< " Correct: " << Type(correctNameAndType.type)
<< " " << Token(correctNameAndType.name).getText() << toString(correctNameAndType.suffix) << " "
<< "\n";
CARB_LOG_ERROR("%s", ss.str().c_str());
CARB_BREAK_POINT();
}
}
}
}
inline std::pair<bool, std::vector<AttrNameAndType>::const_iterator> findAttrNameAndType(const Bucket& bucket,
const TokenC& attrName)
{
#if 0
// Do O(log n) search of bucket for attrName, ignoring type
auto cmp = [](const AttrNameAndType& a, AttrNameAndTime b) {
return a.name < b;
};
auto i = lower_bound(bucket.begin(), bucket.end(), attrName, cmp);
// There can be multiple elements with same attrName, so check them all
while (i != bucket.end() && i->name == attrName && i->suffix != NameSuffix::none)
i++;
// At this point i is either at the end, or at the end of the elements with attrName, or pointing to an element with
// suffix==none
// If didn't get to the end, and didn't get to the end of the elements with attrName, then must be pointing to
// attrName with suffix==none
bool found = (i != bucket.end() && i->name == attrName);
return found ? (i->tfType) : (pxr::TfType());
#else
// Until we fix the order of the fields in the tuple to make equal attrNames contiguous, do a linear search
auto i = bucket.begin();
while (i != bucket.end() && !(i->name == attrName && i->suffix == NameSuffix::none))
i++;
bool found = (i != bucket.end());
return make_pair(found, i);
#endif
}
inline TypeC PathToAttributesMap::getType(const Bucket& bucket, const TokenC& attrName) const
{
APILOGGER("getType", apiLogEnabled, attrName);
std::vector<AttrNameAndType>::const_iterator pAttrNameAndType;
bool found;
std::tie(found, pAttrNameAndType) = findAttrNameAndType(bucket, attrName);
return found ? TypeC(pAttrNameAndType->type) : TypeC();
}
inline void PathToAttributesMap::addPath(const PathC& path, const Bucket& destBucket)
{
std::pair<BucketId, ArrayIndex> *pathAndBucketElem;
if (pathToBucketElem.allocateEntry(path, &pathAndBucketElem))
{
auto bucketIdAndImpl = findOrCreateBucket(destBucket);
BucketId bucketId = bucketIdAndImpl.first;
BucketImpl& bucketImpl = bucketIdAndImpl.second;
bucketImpl.SetBucket(destBucket);
size_t endElement = allocElement(bucketImpl);
*pathAndBucketElem = std::make_pair(bucketId, endElement);
bucketImpl.elemToPath[endElement] = { toSdfPath(path) };
}
else
{
auto iter = attrNameSetToBucketId.find(destBucket);
bool destBucketExists = (iter != attrNameSetToBucketId.end());
BucketId destBucketId = destBucketExists ? iter->second : kInvalidBucketId;
BucketId currentBucketId = pathAndBucketElem->first;
bool destBucketSpecified = (destBucket.size() != 0);
if (!destBucketSpecified || (destBucketExists && destBucketId == currentBucketId))
{
// If the dest bucket is not specified, or if already in the right
// bucket, then leave path in current bucket
return;
}
else if (destBucketSpecified && (destBucketId != currentBucketId))
{
moveElementBetweenBuckets(path, destBucketId, currentBucketId, destBucket);
}
}
}
// renames a path in a bucket
inline void PathToAttributesMap::renamePath(const PathC& oldPath, const PathC& newPath)
{
// TODO: should this early exit if oldPath == newPath?
std::pair<BucketId, ArrayIndex> *oldPathAndBucketElem;
if (pathToBucketElem.find(oldPath, &oldPathAndBucketElem))
{
BucketImpl* bucketImplPtr = buckets.find(oldPathAndBucketElem->first);
bucketImplPtr->elemToPath[oldPathAndBucketElem->second] = toSdfPath(newPath);
std::pair<BucketId, ArrayIndex> *newPathAndBucketElem;
pathToBucketElem.allocateEntry(newPath, &newPathAndBucketElem);
*newPathAndBucketElem = std::move(*oldPathAndBucketElem);
pathToBucketElem.freeEntry(oldPath);
}
else
{
CARB_LOG_WARN_ONCE("PathToAttributesMap::renamePath(%s,%s) - cannot find bucket to rename\n",
Path(oldPath).getText(), Path(newPath).getText());
return;
}
}
// present - Whether this path has a bucket
// bucket - Pointer to bucket if it does
// element - Index corresponding to path in this bucket's arrays
inline std::tuple<bool, BucketId, size_t> PathToAttributesMap::getPresentAndBucketAndElement(const PathC& path) const
{
const std::pair<BucketId, ArrayIndex>* bucketElem;
if (!pathToBucketElem.find(path, &bucketElem))
{
return { false, kInvalidBucketId, 0 };
}
return { true, bucketElem->first, bucketElem->second };
}
inline BucketId PathToAttributesMap::getBucketId(const PathC& path) const
{
std::tuple<bool, flatcache::BucketId, size_t> presentAndBucketAndElement = getPresentAndBucketAndElement(path);
bool present = std::get<0>(presentAndBucketAndElement);
if (!present)
return flatcache::kInvalidBucketId;
return std::get<1>(presentAndBucketAndElement);
}
inline SpanC PathToAttributesMap::getArrayElementPtr(SpanC array, size_t bucketElement) const
{
if (array.ptr == nullptr)
return { nullptr, 0, 0 };
size_t elemSize = array.elementSize;
return { array.ptr + bucketElement * elemSize, 1, elemSize };
}
inline ConstSpanC PathToAttributesMap::getArrayElementPtr(ConstSpanC array, size_t bucketElement) const
{
if (array.ptr == nullptr)
return { nullptr, 0, 0 };
size_t elemSize = array.elementSize;
return { array.ptr + bucketElement * elemSize, 1, elemSize };
}
inline SpanC PathToAttributesMap::getAttributeC(const PathC& path, const TokenC& attrName, NameSuffix suffix)
{
APILOGGER("getAttributeC", apiLogEnabled, attrName);
bool present; // Whether this path has a bucket
BucketId bucketId; // Pointer to the bucket if it does
size_t element; // Index corresponding to path in this bucket's arrays
std::tie(present, bucketId, element) = getPresentAndBucketAndElement(path);
if (!present)
return { nullptr, 0, 0 };
ArrayAndDirtyIndices arrayAndchangedIndices = getArraySpanC(bucketId, attrName, CpuReadWriteConfig(), suffix);
setArrayElementDirty(arrayAndchangedIndices, element);
SpanC array = arrayAndchangedIndices.array;
return getArrayElementPtr(array, element);
}
inline ConstSpanC PathToAttributesMap::getAttributeRdC(const PathC& path, const TokenC& attrName, NameSuffix suffix)
{
APILOGGER("getAttributeRdC", apiLogEnabled, attrName);
bool present;
BucketId bucketId;
size_t element;
std::tie(present, bucketId, element) = getPresentAndBucketAndElement(path);
if (!present)
return { nullptr, 0, 0 };
const ConstSpanC array = getArraySpanC(bucketId, attrName, CpuReadConfig(), suffix).array;
// We don't set dirty indices here because this method gives read-only access
return getArrayElementPtr(array, element);
}
inline SpanC PathToAttributesMap::getAttributeWrC(const PathC& path, const TokenC& attrName, NameSuffix suffix)
{
APILOGGER("getAttributeWrC", apiLogEnabled, path, attrName);
bool present;
BucketId bucketId;
size_t element;
std::tie(present, bucketId, element) = getPresentAndBucketAndElement(path);
if (!present)
return { nullptr, 0, 0 };
// Writing an element is a RMW on the whole array, so get read/write CPU access
ArrayAndDirtyIndices arrayAndchangedIndices = getArraySpanC(bucketId, attrName, CpuReadIfValidWriteConfig(), suffix);
setArrayElementDirty(arrayAndchangedIndices, element);
SpanC array = arrayAndchangedIndices.array;
return getArrayElementPtr(array, element);
}
inline SpanC PathToAttributesMap::setArrayAttributeSizeAndGet(PathC path, const TokenC& attrName, size_t newSize)
{
bool present; // Whether this path has a bucket
BucketId bucketId; // Pointer to the bucket if it does
size_t elementIndex; // Index corresponding to path in this bucket's arrays
std::tie(present, bucketId, elementIndex) = getPresentAndBucketAndElement(path);
if (!present)
return { nullptr, 0, 0 };
return setArrayAttributeSizeAndGet(bucketId, elementIndex, attrName, newSize);
}
inline SpanC PathToAttributesMap::setArrayAttributeSizeAndGet(
BucketId bucketId, size_t elementIndex, const TokenC& attrName, size_t newSize)
{
APILOGGER("setArrayAttributeSizeAndGet", apiLogEnabled, attrName);
// TODO: remove double hash lookup here
ArrayAndDirtyIndices sizeArray;
{
BucketImpl* bucketImpl;
ArrayAttributeArray *arrayAttributeArray;
if (findArrayAttributeArrayForBucketId(bucketId, attrName, bucketImpl, arrayAttributeArray))
{
const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray);
sizeArray = getArraySpanC(arrayAttributeArray->elemCounts, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CpuReadWriteConfig());
}
else
{
sizeArray = { 0 };
}
}
if (sizeArray.array.elementCount <= elementIndex)
return { nullptr, 0, 0 };
// Set the size
size_t* sizePtr = reinterpret_cast<size_t*>(getArrayElementPtr(sizeArray.array, elementIndex).ptr);
if (!sizePtr)
return { nullptr, 0, 0 };
*sizePtr = newSize;
// TODO: does this need to be moved higher next to getArraySpanC above?
setArrayElementDirty(sizeArray, elementIndex);
// Get the new array-valued element
ArrayAndDirtyIndices arrayAndchangedIndices = getArraySpanC(bucketId, attrName, CpuReadWriteConfig(), NameSuffix::none);
setArrayElementDirty(arrayAndchangedIndices, elementIndex);
SpanC array = arrayAndchangedIndices.array;
uint8_t** arrayData = reinterpret_cast<uint8_t**>(getArrayElementPtr(array, elementIndex).ptr);
if (!arrayData)
return { nullptr, 0, 0 };
return { *arrayData, newSize, 0 };
}
template <typename T>
T* PathToAttributesMap::getAttribute(const PathC& path, const TokenC& attrName)
{
APILOGGER("getAttribute", apiLogEnabled, path, attrName);
// TODO: check that T is the correct type
return reinterpret_cast<T*>(getAttributeC(path, attrName, NameSuffix::none).ptr);
}
template <typename T>
const T* PathToAttributesMap::getAttributeRd(const PathC& path, const TokenC& attrName)
{
APILOGGER("getAttributeRd", apiLogEnabled, path, attrName);
// TODO: check that T is the correct type
return reinterpret_cast<const T*>(getAttributeRdC(path, attrName, NameSuffix::none).ptr);
}
template <typename T>
T* PathToAttributesMap::getAttributeWr(const PathC& path, const TokenC& attrName)
{
APILOGGER("getAttributeWr", apiLogEnabled, path, attrName);
// TODO: check that T is the correct type
return reinterpret_cast<T*>(getAttributeWrC(path, attrName, NameSuffix::none).ptr);
}
template <typename T>
T* PathToAttributesMap::getAttributeWr(const PathC& path, const TokenC& attrName, NameSuffix suffix)
{
APILOGGER("getAttributeWr", apiLogEnabled, path, attrName);
// TODO: check that T is the correct type
return reinterpret_cast<T*>(getAttributeWrC(path, attrName, suffix).ptr);
}
inline ValidMirrors PathToAttributesMap::getAttributeValidBits(const PathC& path,
const TokenC& attrName,
ArrayAttributeArray::MirroredArrays subArray) const
{
APILOGGER("getAttributeValidBits", apiLogEnabled, path, attrName);
bool present; // Whether this path has a bucket
BucketId bucketId; // Pointer to the bucket if it does
size_t element; // Index corresponding to path in this bucket's arrays
std::tie(present, bucketId, element) = getPresentAndBucketAndElement(path);
if (!present)
return ValidMirrors::eNone;
const BucketImpl* bucketImplPtr = buckets.find(bucketId);
if (!bucketImplPtr)
return ValidMirrors::eNone;
const MirroredArray *array = nullptr;
const ScalarAttributeArray *scalarAttributeArray;
const AttrName name{ attrName, NameSuffix::none };
if (bucketImplPtr->scalarAttributeArrays.find(name, &scalarAttributeArray))
{
array = scalarAttributeArray;
}
else
{
const ArrayAttributeArray *arrayAttributeArray;
if (bucketImplPtr->arrayAttributeArrays.find(name, &arrayAttributeArray))
{
// This if statement is only needed because we ported OM-70434 fix from 105 to 104.2
// In 105, these mirrored arrays are stored in an array, indexed by subarray
if (subArray == ArrayAttributeArray::MirroredArrays::Values)
{
array = &arrayAttributeArray->values;
}
else if (subArray == ArrayAttributeArray::MirroredArrays::ElemCounts)
{
array = &arrayAttributeArray->elemCounts;
}
else if (subArray == ArrayAttributeArray::MirroredArrays::CpuElemCounts)
{
array = &arrayAttributeArray->cpuElemCounts;
}
else if (subArray == ArrayAttributeArray::MirroredArrays::GpuElemCounts)
{
array = &arrayAttributeArray->gpuElemCounts;
}
else if (subArray == ArrayAttributeArray::MirroredArrays::GpuPtrs)
{
array = &arrayAttributeArray->gpuPtrs;
}
}
else
{
return ValidMirrors::eNone;
}
}
const size_t elemSize = array->typeinfo.size;
const bool isTag = (elemSize == 0);
if (isTag)
return ValidMirrors::eNone;
ValidMirrors retval = ValidMirrors::eNone;
if (array->cpuValid)
retval = retval | ValidMirrors::eCPU;
if (array->gpuValid && array->gpuAllocedWithCuda)
retval = retval | ValidMirrors::eCudaGPU;
if (array->gpuValid && !array->gpuAllocedWithCuda)
retval = retval | ValidMirrors::eGfxGPU;
return retval;
}
inline bool PathToAttributesMap::findArrayAttributeArrayForPath(const PathC& path, const TokenC& attrName, size_t& outElementIndex, BucketImpl*& outBucketImpl, ArrayAttributeArray*& outArrayAttributeArray)
{
BucketId bucketId;
bool found;
std::tie(found, bucketId, outElementIndex) = getPresentAndBucketAndElement(path);
if (found)
{
outBucketImpl = buckets.find(bucketId);
if (outBucketImpl)
{
ArrayAttributeArray* array;
if (outBucketImpl->arrayAttributeArrays.find({ attrName, NameSuffix::none }, &array))
{
outArrayAttributeArray = array;
return true;
}
}
}
CARB_LOG_WARN_ONCE("Warning: %s not found\n", toTfToken(attrName).GetText());
return false;
}
inline bool PathToAttributesMap::findArrayAttributeArrayForBucketId(const BucketId bucketId, const TokenC& attrName, BucketImpl*& outBucketImpl, ArrayAttributeArray*& outArrayAttributeArray)
{
if (bucketId != kInvalidBucketId)
{
outBucketImpl = buckets.find(bucketId);
if (outBucketImpl)
{
ArrayAttributeArray* array;
if (outBucketImpl->arrayAttributeArrays.find({ attrName, NameSuffix::none }, &array))
{
outArrayAttributeArray = array;
return true;
}
}
}
CARB_LOG_WARN_ONCE("Warning: %s not found\n", toTfToken(attrName).GetText());
return false;
}
inline size_t* PathToAttributesMap::getArrayAttributeSize(const PathC& path, const TokenC& attrName)
{
APILOGGER("getArrayAttributeSize", apiLogEnabled, path, attrName);
size_t element;
BucketImpl* bucketImpl;
ArrayAttributeArray *arrayAttributeArray;
if (findArrayAttributeArrayForPath(path, attrName, element, bucketImpl, arrayAttributeArray))
{
const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray);
ArrayAndDirtyIndices arrayAndchangedIndices = getArraySpanC(arrayAttributeArray->elemCounts, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CpuReadWriteConfig());
setArrayElementDirty(arrayAndchangedIndices, element);
const SpanC array = arrayAndchangedIndices.array;
return reinterpret_cast< size_t*>(getArrayElementPtr(array, element).ptr);
}
return nullptr;
}
inline const size_t* PathToAttributesMap::getArrayAttributeSizeRd(const PathC& path, const TokenC& attrName)
{
APILOGGER("getArrayAttributeSizeRd", apiLogEnabled, path, attrName);
size_t element;
BucketImpl* bucketImpl;
ArrayAttributeArray *arrayAttributeArray;
if (findArrayAttributeArrayForPath(path, attrName, element, bucketImpl, arrayAttributeArray))
{
const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray);
const ArrayAndDirtyIndices arrayAndchangedIndices = getArraySpanC(arrayAttributeArray->elemCounts, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CpuReadConfig());
const ConstSpanC array = arrayAndchangedIndices.array;
return reinterpret_cast<const size_t*>(getArrayElementPtr(array, element).ptr);
}
return nullptr;
}
inline size_t* PathToAttributesMap::getArrayAttributeSizeWr(const PathC& path, const TokenC& attrName)
{
APILOGGER("getArrayAttributeSizeWr", apiLogEnabled, path, attrName);
size_t element;
BucketImpl* bucketImpl;
ArrayAttributeArray *arrayAttributeArray;
if (findArrayAttributeArrayForPath(path, attrName, element, bucketImpl, arrayAttributeArray))
{
const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray);
ArrayAndDirtyIndices arrayAndchangedIndices = getArraySpanC(arrayAttributeArray->elemCounts, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CpuReadIfValidWriteConfig());
setArrayElementDirty(arrayAndchangedIndices, element);
const SpanC array = arrayAndchangedIndices.array;
return reinterpret_cast<size_t*>(getArrayElementPtr(array, element).ptr);
}
return nullptr;
}
inline const size_t* PathToAttributesMap::getArrayAttributeSizeRdGpu(const PathC& path, const TokenC& attrName)
{
APILOGGER("getArrayAttributeSizeRdGpu", apiLogEnabled, path, attrName);
size_t element;
BucketImpl* bucketImpl;
ArrayAttributeArray *arrayAttributeArray;
if (findArrayAttributeArrayForPath(path, attrName, element, bucketImpl, arrayAttributeArray))
{
const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray);
const ArrayAndDirtyIndices arrayAndchangedIndices = getArraySpanC(arrayAttributeArray->gpuElemCounts, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CudaReadConfig());
const ConstSpanC array = arrayAndchangedIndices.array;
return reinterpret_cast<const size_t*>(getArrayElementPtr(array, element).ptr);
}
return nullptr;
}
inline const size_t* PathToAttributesMap::getArrayAttributeSizesRdGpu(const Bucket& bucket, const TokenC& attrName)
{
APILOGGER("getArrayAttributeSizesRdGpu", apiLogEnabled, attrName);
BucketImpl* bucketImpl;
ArrayAttributeArray *arrayAttributeArray;
const BucketId bucketId = findBucketId(bucket);
if (findArrayAttributeArrayForBucketId(bucketId, attrName, bucketImpl, arrayAttributeArray))
{
const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray);
const ArrayAndDirtyIndices arrayAndchangedIndices = getArraySpanC(arrayAttributeArray->gpuElemCounts, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CudaReadConfig());
const ConstSpanC array = arrayAndchangedIndices.array;
return reinterpret_cast<const size_t*>(array.ptr);
}
return nullptr;
}
inline ConstSpanSizeC PathToAttributesMap::getArrayAttributeSizesRdGpu(BucketId bucketId, const TokenC& attrName)
{
APILOGGER("getArrayAttributeSizesRdGpu", apiLogEnabled, attrName);
BucketImpl* bucketImpl;
ArrayAttributeArray *arrayAttributeArray;
if (findArrayAttributeArrayForBucketId(bucketId, attrName, bucketImpl, arrayAttributeArray))
{
const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray);
const ArrayAndDirtyIndices arrayAndchangedIndices = getArraySpanC(arrayAttributeArray->gpuElemCounts, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CudaReadConfig());
const ConstSpanC array = arrayAndchangedIndices.array;
return { reinterpret_cast<const size_t*>(array.ptr), array.elementCount };
}
return { nullptr, 0 };
}
inline size_t* PathToAttributesMap::getArrayAttributeSizes(const Bucket& bucket, const TokenC& attrName)
{
APILOGGER("getArrayAttributeSizes", apiLogEnabled, attrName);
BucketImpl* bucketImpl;
ArrayAttributeArray *arrayAttributeArray;
const BucketId bucketId = findBucketId(bucket);
if (findArrayAttributeArrayForBucketId(bucketId, attrName, bucketImpl, arrayAttributeArray))
{
const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray);
ArrayAndDirtyIndices arrayAndchangedIndices = getArraySpanC(arrayAttributeArray->elemCounts, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CpuReadWriteConfig());
setArrayDirty(arrayAndchangedIndices);
const SpanC array = arrayAndchangedIndices.array;
return reinterpret_cast<size_t*>(array.ptr);
}
return nullptr;
}
inline SpanSizeC PathToAttributesMap::getArrayAttributeSizes(BucketId bucketId, const TokenC& attrName)
{
APILOGGER("getArrayAttributeSizes", apiLogEnabled, attrName);
BucketImpl* bucketImpl;
ArrayAttributeArray *arrayAttributeArray;
if (findArrayAttributeArrayForBucketId(bucketId, attrName, bucketImpl, arrayAttributeArray))
{
const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray);
ArrayAndDirtyIndices arrayAndchangedIndices = getArraySpanC(arrayAttributeArray->elemCounts, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CpuReadWriteConfig());
setArrayDirty(arrayAndchangedIndices);
const SpanC array = arrayAndchangedIndices.array;
CARB_ASSERT(array.elementSize == sizeof(size_t));
return { reinterpret_cast<size_t*>(array.ptr), array.elementCount };
}
return { 0, 0 };
}
inline ConstSpanSizeC PathToAttributesMap::getArrayAttributeSizesRd(BucketId bucketId, const TokenC& attrName)
{
APILOGGER("getArrayAttributeSizesRd", apiLogEnabled, attrName);
BucketImpl* bucketImpl;
ArrayAttributeArray *arrayAttributeArray;
if (findArrayAttributeArrayForBucketId(bucketId, attrName, bucketImpl, arrayAttributeArray))
{
const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray);
ArrayAndDirtyIndices arrayAndchangedIndices = getArraySpanC(arrayAttributeArray->elemCounts, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CpuReadConfig());
const ConstSpanC array = arrayAndchangedIndices.array;
CARB_ASSERT(array.elementSize == sizeof(size_t));
return { reinterpret_cast<const size_t*>(array.ptr), array.elementCount };
}
return { 0, 0 };
}
inline SpanSizeC PathToAttributesMap::getArrayAttributeSizesWr(BucketId bucketId, const TokenC& attrName)
{
APILOGGER("getArrayAttributeSizesWr", apiLogEnabled, attrName);
BucketImpl* bucketImpl;
ArrayAttributeArray *arrayAttributeArray;
if (findArrayAttributeArrayForBucketId(bucketId, attrName, bucketImpl, arrayAttributeArray))
{
const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray);
ArrayAndDirtyIndices arrayAndchangedIndices = getArraySpanC(arrayAttributeArray->elemCounts, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CpuWriteConfig());
setArrayDirty(arrayAndchangedIndices);
const SpanC array = arrayAndchangedIndices.array;
CARB_ASSERT(array.elementSize == sizeof(size_t));
return { reinterpret_cast<size_t*>(array.ptr), array.elementCount };
}
return { 0, 0 };
}
inline ArrayPointersAndSizesC PathToAttributesMap::getArrayAttributeArrayWithSizes(BucketId bucketId,
const TokenC& attrName)
{
APILOGGER("getArrayAttributeArrayWithSizes", apiLogEnabled, attrName);
BucketImpl* bucketImpl;
ArrayAttributeArray *arrayAttributeArray;
if (findArrayAttributeArrayForBucketId(bucketId, attrName, bucketImpl, arrayAttributeArray))
{
const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray);
SpanC spanOfPointers;
{
ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(arrayAttributeArray->values, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CpuReadWriteConfig());
setArrayDirty(arrayAndDirtyIndices);
spanOfPointers = arrayAndDirtyIndices.array;
}
ConstSpanC spanOfSizes;
{
const ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(arrayAttributeArray->elemCounts, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CpuReadConfig());
spanOfSizes = arrayAndDirtyIndices.array;
}
CARB_ASSERT(spanOfPointers.elementSize == sizeof(uint8_t*));
CARB_ASSERT(spanOfSizes.elementSize == sizeof(size_t));
CARB_ASSERT(spanOfPointers.elementCount == spanOfSizes.elementCount);
return { reinterpret_cast<uint8_t* const*>(spanOfPointers.ptr), reinterpret_cast<const size_t*>(spanOfSizes.ptr),
spanOfPointers.elementCount };
}
return ArrayPointersAndSizesC{ 0, 0, 0 };
}
inline ConstArrayPointersAndSizesC PathToAttributesMap::getArrayAttributeArrayWithSizesRd(BucketId bucketId,
const TokenC& attrName)
{
APILOGGER("getArrayAttributeArrayWithSizesRd", apiLogEnabled, attrName);
BucketImpl* bucketImpl;
ArrayAttributeArray *arrayAttributeArray;
if (findArrayAttributeArrayForBucketId(bucketId, attrName, bucketImpl, arrayAttributeArray))
{
const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray);
ConstSpanC spanOfPointers;
{
const ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(arrayAttributeArray->values, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CpuReadConfig());
spanOfPointers = arrayAndDirtyIndices.array;
}
ConstSpanC spanOfSizes;
{
const ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(arrayAttributeArray->elemCounts, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CpuReadConfig());
spanOfSizes = arrayAndDirtyIndices.array;
}
CARB_ASSERT(spanOfPointers.elementSize == sizeof(uint8_t*));
CARB_ASSERT(spanOfSizes.elementSize == sizeof(size_t));
CARB_ASSERT(spanOfPointers.elementCount == spanOfSizes.elementCount);
return { reinterpret_cast<uint8_t* const*>(spanOfPointers.ptr), reinterpret_cast<const size_t*>(spanOfSizes.ptr),
spanOfPointers.elementCount };
}
return ConstArrayPointersAndSizesC{ 0, 0, 0 };
}
inline ArrayPointersAndSizesC PathToAttributesMap::getArrayAttributeArrayWithSizesWr(BucketId bucketId,
const TokenC& attrName)
{
APILOGGER("getArrayAttributeArrayWithSizesWr", apiLogEnabled, attrName);
BucketImpl* bucketImpl;
ArrayAttributeArray *arrayAttributeArray;
if (findArrayAttributeArrayForBucketId(bucketId, attrName, bucketImpl, arrayAttributeArray))
{
const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray);
SpanC spanOfPointers;
{
ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(arrayAttributeArray->values, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CpuReadWriteConfig());
setArrayDirty(arrayAndDirtyIndices);
spanOfPointers = arrayAndDirtyIndices.array;
}
ConstSpanC spanOfSizes;
{
const ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(arrayAttributeArray->elemCounts, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CpuReadConfig());
spanOfSizes = arrayAndDirtyIndices.array;
}
CARB_ASSERT(spanOfPointers.elementSize == sizeof(uint8_t*));
CARB_ASSERT(spanOfSizes.elementSize == sizeof(size_t));
CARB_ASSERT(spanOfPointers.elementCount == spanOfSizes.elementCount);
return { reinterpret_cast<uint8_t* const*>(spanOfPointers.ptr), reinterpret_cast<const size_t*>(spanOfSizes.ptr),
spanOfPointers.elementCount };
}
return ArrayPointersAndSizesC{ 0, 0, 0 };
}
inline const size_t* PathToAttributesMap::getArrayAttributeSizesRd(const Bucket& bucket, const TokenC& attrName)
{
APILOGGER("getArrayAttributeSizesRd", apiLogEnabled, attrName);
const BucketId bucketId = findBucketId(bucket);
BucketImpl* bucketImpl;
ArrayAttributeArray *arrayAttributeArray;
if (findArrayAttributeArrayForBucketId(bucketId, attrName, bucketImpl, arrayAttributeArray))
{
const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray);
const ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(arrayAttributeArray->elemCounts, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CpuReadConfig());
return reinterpret_cast<const size_t*>(arrayAndDirtyIndices.array.ptr);
}
return nullptr;
}
inline size_t* PathToAttributesMap::getArrayAttributeSizesWr(const Bucket& bucket, const TokenC& attrName)
{
APILOGGER("getArrayAttributeSizesWr", apiLogEnabled, attrName);
const BucketId bucketId = findBucketId(bucket);
BucketImpl* bucketImpl;
ArrayAttributeArray *arrayAttributeArray;
if (findArrayAttributeArrayForBucketId(bucketId, attrName, bucketImpl, arrayAttributeArray))
{
const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray);
ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(arrayAttributeArray->elemCounts, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CpuWriteConfig());
setArrayDirty(arrayAndDirtyIndices);
return reinterpret_cast<size_t*>(arrayAndDirtyIndices.array.ptr);
}
return nullptr;
}
// Intersect set<AttrNameAndType> and set<AttrNameAndType> comparing type,
// name and suffix, and ignoring tfType
inline void set_intersection2(set<AttrNameAndType>::const_iterator first1,
set<AttrNameAndType>::const_iterator last1,
set<AttrNameAndType>::const_iterator first2,
set<AttrNameAndType>::const_iterator last2,
std::back_insert_iterator<std::vector<AttrNameAndType>> d_first)
{
// Note that in the name comparisons below TokenC masks off USD's lifetime bit.
// For example, tokens created from the same string are considered equal even
// if one was created with finite lifetime and the other infinite lifetime.
auto comp1 = [](const AttrNameAndType& a, const AttrNameAndType& b) {
if (TypeC(a.type) < TypeC(b.type))
return true;
if (TypeC(b.type) < TypeC(a.type))
return false;
if (TokenC(a.name) < TokenC(b.name))
return true;
if (TokenC(b.name) < TokenC(a.name))
return false;
return a.suffix < b.suffix;
};
auto comp2 = [](const AttrNameAndType& a, const AttrNameAndType& b) {
if (TypeC(a.type) < TypeC(b.type))
return true;
if (TypeC(b.type) < TypeC(a.type))
return false;
if (TokenC(a.name) < TokenC(b.name))
return true;
if (TokenC(b.name) < TokenC(a.name))
return false;
return a.suffix < b.suffix;
};
while (first1 != last1 && first2 != last2)
{
if (comp1(*first1, *first2))
{
++first1;
}
else
{
if (!comp2(*first2, *first1))
{
*d_first++ = *first1++;
}
++first2;
}
}
}
inline flatcache::set<BucketId> PathToAttributesMap::findBuckets(const set<AttrNameAndType>& all,
const set<AttrNameAndType>& any,
const set<AttrNameAndType>& none) const
{
flatcache::set<BucketId> retval;
retval.reserve(256);
// TODO: Do this in a less brute-force way
for (auto& bucketAndId : attrNameSetToBucketId)
{
const Bucket& bucketTypes = bucketAndId.first;
BucketId bucketId = bucketAndId.second;
bool bucketEmpty = getElementCount(bucketId) == 0;
if (bucketEmpty)
continue;
std::vector<AttrNameAndType> allTypesPresent;
std::vector<AttrNameAndType> anyTypesPresent;
std::vector<AttrNameAndType> noneTypesPresent;
set_intersection2(
all.begin(), all.end(), bucketTypes.begin(), bucketTypes.end(), std::back_inserter(allTypesPresent));
set_intersection2(
any.begin(), any.end(), bucketTypes.begin(), bucketTypes.end(), std::back_inserter(anyTypesPresent));
set_intersection2(
none.begin(), none.end(), bucketTypes.begin(), bucketTypes.end(), std::back_inserter(noneTypesPresent));
bool allOfAllTypesPresent = (allTypesPresent.size() == all.size());
bool oneOfAnyTypesPresent = (any.size() == 0) || (anyTypesPresent.size() != 0);
bool noneOfNoneTypesPresent = (noneTypesPresent.size() == 0);
if (allOfAllTypesPresent && oneOfAnyTypesPresent && noneOfNoneTypesPresent)
{
retval.v.push_back(bucketId);
}
}
// Sort the vector to make it a flatcache::set
std::sort(retval.begin(), retval.end());
return retval;
}
inline std::tuple<BucketId, ArrayIndex> PathToAttributesMap::getBucketAndArrayIndex(const PathC& path) const
{
const std::pair<BucketId, ArrayIndex>* bucketAndElem;
if (pathToBucketElem.find(path, &bucketAndElem))
{
const BucketId& bucketId = bucketAndElem->first;
const ArrayIndex& arrayIndex = bucketAndElem->second;
return { bucketId, arrayIndex };
}
else
{
// Commenting out the error in 104.2 as there is no hasPrim API in 104.2 yet
// and FabricSD needs to check for existence of a prim without causing an error to be logged
// CARB_LOG_ERROR_ONCE("getBucketAndArrayIndex called on non-existent path '%s'\n", Path(path).getText());
return { kInvalidBucketId, kInvalidArrayIndex };
}
}
inline Bucket PathToAttributesMap::getTypes(const PathC& path) const
{
const std::pair<BucketId, ArrayIndex>* bucketAndElem;
if (pathToBucketElem.find(path, &bucketAndElem))
{
const BucketImpl* bucketImpl = buckets.find(bucketAndElem->first);
if (bucketImpl)
{
return bucketImpl->GetBucket();
}
}
CARB_LOG_WARN_ONCE("getTypes called on non-existent path %s\n", Path(path).getText());
return Bucket();
}
inline size_t PathToAttributesMap::getAttributeCount(const PathC& path) const
{
const std::pair<BucketId, ArrayIndex>* bucketAndElem;
if (pathToBucketElem.find(path, &bucketAndElem))
{
const BucketImpl* bucketImpl = buckets.find(bucketAndElem->first);
if (bucketImpl)
{
return bucketImpl->GetBucket().size();
}
}
CARB_LOG_ERROR_ONCE("getAttributeCount called on non-existent path %s\n", Path(path).getText());
return 0;
}
inline TypeC PathToAttributesMap::getType(const PathC& path, const TokenC& attrName) const
{
APILOGGER("getType", apiLogEnabled, path, attrName);
const std::pair<BucketId, ArrayIndex>* bucketAndElem;
if (!pathToBucketElem.find(path, &bucketAndElem))
{
CARB_LOG_WARN_ONCE("getTfType called on non-existent path %s\n", Path(path).getText());
return kUnknownType;
}
const BucketId &bucketId = bucketAndElem->first;
const BucketImpl* bucketImplPtr = buckets.find(bucketId);
if (!bucketImplPtr)
{
CARB_LOG_WARN_ONCE(
"getTfType called on non-existent bucket pathAndBucketElem is broken %s\n", Path(path).getText());
return kUnknownType;
}
const AttrName name{ attrName, NameSuffix::none };
const ScalarAttributeArray *scalarAttributeArray;
if (bucketImplPtr->scalarAttributeArrays.find(name, &scalarAttributeArray))
{
return scalarAttributeArray->type;
}
else
{
const ArrayAttributeArray *arrayAttributeArray;
if (bucketImplPtr->arrayAttributeArrays.find(name, &arrayAttributeArray))
{
return arrayAttributeArray->values.type;
}
}
CARB_LOG_WARN_ONCE("getType called on non-existent attribute %s %s\n", Path(path).getText(), Token(attrName).getText());
return kUnknownType;
}
// Return 1 if attribute is present at path, 0 otherwise
inline size_t PathToAttributesMap::count(const PathC& path, const TokenC& attrName) const
{
bool present; // Whether this path has a bucket
BucketId bucketId; // Pointer to the bucket if it does
size_t element; // Index corresponding to path in this bucket's arrays
std::tie(present, bucketId, element) = getPresentAndBucketAndElement(path);
if (!present)
{
return 0;
}
const BucketImpl* bucketImplPtr = buckets.find(bucketId);
if (!bucketImplPtr)
return 0;
const AttrName name{ attrName, NameSuffix::none };
const ScalarAttributeArray *scalarAttributeArray;
if (bucketImplPtr->scalarAttributeArrays.find(name, &scalarAttributeArray))
{
return 1;
}
else
{
const ArrayAttributeArray *arrayAttributeArray;
if (bucketImplPtr->arrayAttributeArrays.find(name, &arrayAttributeArray))
{
return 1;
}
}
return 0;
}
inline void PathToAttributesMap::moveElementScalarData(ScalarAttributeArray &destArray, const size_t destElemIndex, const ScalarAttributeArray &srcArray, const size_t srcElemIndex)
{
if (srcArray.type != destArray.type)
{
return;
}
const Typeinfo& typeinfo = srcArray.typeinfo;
const size_t size = typeinfo.size;
const bool isArray = typeinfo.isArray;
// Ideally usdValid would be driven by the change tracker
// however I can't use that until we have a way to tie into that
// Ideally there would be a subsscriber for "copying back to USD"
// and that could be parsed to see if an element in invalid. At this point
// we just have to do it per-attribute since that is as fine-grained as we
// have data right now. This happens in moveElement to avoid having to repeat
// logic about matching attriubte arrays that the function already does
if (!srcArray.usdValid) {
destArray.usdValid = srcArray.usdValid;
}
//
// In the case where this is the first element in the new bucket
// then the validity of the data needs to be moved from the old
// bucket.
//
if (destElemIndex == 0) {
destArray.cpuValid = srcArray.cpuValid;
destArray.gpuValid = srcArray.gpuValid;
}
if (destArray.cpuValid && !srcArray.cpuValid)
{
// This should not happen because of makeSrcValidIfDestValid
CARB_LOG_ERROR_ONCE("Invalid state while moving element: srcArray.cpuValid=%i destArray.cpuValid=%i",
srcArray.cpuValid, destArray.cpuValid);
assert(false);
}
if (destArray.gpuValid && !srcArray.gpuValid)
{
// This should not happen because of makeSrcValidIfDestValid
CARB_LOG_ERROR_ONCE("Invalid state while moving element: srcArray.gpuValid=%i destArray.gpuValid=%i",
srcArray.gpuValid, destArray.gpuValid);
assert(false);
}
//
// As noted above the validity of the src was already matched to the destinations needs
// in the case, where for example the src has valid but the dest doesn't that is ok
// but we do still move data, because in the case of array-of-array you get to at least
// avoid another malloc.
//
if (srcArray.cpuValid || isArray)
{
uint8_t* destArrayData = destArray.cpuData();
uint8_t* destPtr = destArrayData + destElemIndex * size;
const uint8_t* srcArrayData = srcArray.cpuData();
const uint8_t* srcPtr = srcArrayData + srcElemIndex * size;
memcpy(destPtr, srcPtr, size);
}
if (srcArray.gpuValid)
{
if (isArray)
{
if (srcArray.gpuCapacity)
{
destArray.resizeGpu(platform.gpuCuda, platform.gpuCudaCtx, srcArray.gpuCapacity, typeinfo.size);
platform.gpuCuda->memcpyAsync(*platform.gpuCudaCtx, destArray.gpuArray, srcArray.gpuArray, srcArray.gpuCapacity,
omni::gpucompute::MemcpyKind::deviceToDevice);
}
}
else
{
uint8_t* destArrayData = destArray.gpuArray;
uint8_t* destPtr = destArrayData + destElemIndex * size;
const uint8_t* srcArrayData = srcArray.gpuArray;
const uint8_t* srcPtr = srcArrayData + srcElemIndex * size;
platform.gpuCuda->memcpyAsync(*platform.gpuCudaCtx, destPtr, srcPtr, size, omni::gpucompute::MemcpyKind::deviceToDevice);
}
destArray.gpuAllocedWithCuda = true;
}
}
inline void PathToAttributesMap::moveElementArrayData(ArrayAttributeArray &destArray, const size_t destElemIndex, const ArrayAttributeArray &srcArray, const size_t srcElemIndex)
{
static_assert(sizeof(ArrayAttributeArray) == 5 * sizeof(MirroredArray), "ArrayAttributeArray has unexpected size");
moveElementScalarData(destArray.values, destElemIndex, srcArray.values, srcElemIndex);
moveElementScalarData(destArray.elemCounts, destElemIndex, srcArray.elemCounts, srcElemIndex);
moveElementScalarData(destArray.cpuElemCounts, destElemIndex, srcArray.cpuElemCounts, srcElemIndex);
moveElementScalarData(destArray.gpuElemCounts, destElemIndex, srcArray.gpuElemCounts, srcElemIndex);
moveElementScalarData(destArray.gpuPtrs, destElemIndex, srcArray.gpuPtrs, srcElemIndex);
}
inline void PathToAttributesMap::moveElement(BucketImpl& destBucket,
size_t destElemIndex,
BucketImpl& srcBucket,
size_t srcElemIndex)
{
srcBucket.scalarAttributeArrays.forEach([this, &destBucket, &destElemIndex, &srcElemIndex](const AttrName& name, const ScalarAttributeArray& srcArray) {
// If bucket move is due to removal then one destTupleIndex will be
// invalid. So we check for invalid here
ScalarAttributeArray *destArray;
if (destBucket.scalarAttributeArrays.find(name, &destArray))
{
moveElementScalarData(*destArray, destElemIndex, srcArray, srcElemIndex);
}
});
srcBucket.arrayAttributeArrays.forEach([this, &destBucket, &destElemIndex, &srcElemIndex](const AttrName& name, const ArrayAttributeArray& srcArray) {
// If bucket move is due to removal then one destTupleIndex will be
// invalid. So we check for invalid here
ArrayAttributeArray *destArray;
if (destBucket.arrayAttributeArrays.find(name, &destArray))
{
moveElementArrayData(*destArray, destElemIndex, srcArray, srcElemIndex);
}
});
destBucket.elemToPath[destElemIndex] = std::move(srcBucket.elemToPath[srcElemIndex]);
}
inline void PathToAttributesMap::destroyElement(BucketId bucketId, size_t elemIndex, bool destroyDataPointedTo)
{
BucketImpl* srcBucketImplPtr = buckets.find(bucketId);
if (!srcBucketImplPtr)
return; // nothing to delete
BucketImpl& srcBucketImpl = *srcBucketImplPtr;
size_t elemCount = PathToAttributesMap::getElementCount(srcBucketImpl);
if (elemCount == 0)
return; // nothing to delete
if (destroyDataPointedTo)
{
// Destruct element about to be overwritten
srcBucketImpl.arrayAttributeArrays.forEach([this, &elemIndex](const AttrName& name, ArrayAttributeArray& array) {
// If a CPU array has been allocated, delete it
uint8_t** arrayCpuPtrArray = reinterpret_cast<uint8_t**>(array.values.cpuData());
uint8_t*& cpuPtrToDelete = arrayCpuPtrArray[elemIndex];
if (cpuPtrToDelete)
{
if (!USE_PINNED_MEMORY || !platform.gpuCuda)
{
free(cpuPtrToDelete);
}
else if (platform.gpuCuda)
{
platform.gpuCuda->freeHost(*platform.gpuCudaCtx, cpuPtrToDelete);
}
cpuPtrToDelete = nullptr;
}
// If a GPU array has been allocated, delete it
uint8_t** arrayGpuPtrArray = reinterpret_cast<uint8_t**>(array.gpuPtrs.cpuData());
uint8_t*& gpuPtrToDelete = arrayGpuPtrArray[elemIndex];
if (gpuPtrToDelete)
{
platform.gpuCuda->free(*platform.gpuCudaCtx, gpuPtrToDelete);
gpuPtrToDelete = nullptr;
}
size_t* arrayCpuCapacityArray = reinterpret_cast<size_t*>(array.cpuElemCounts.cpuData());
arrayCpuCapacityArray[elemIndex] = 0;
size_t* arrayGpuCapacityArray = reinterpret_cast<size_t*>(array.gpuElemCounts.cpuData());
arrayGpuCapacityArray[elemIndex] = 0;
size_t* arraySizeArray = reinterpret_cast<size_t*>(array.elemCounts.cpuData());
arraySizeArray[elemIndex] = 0;
});
}
// Copy last element to element to be deleted
PathC movedElemPath;
size_t lastElemIndex = elemCount - 1;
bool deletingLastElement = (elemIndex == lastElemIndex);
// If bucket has more than one element, move last element to deleted element
if (!deletingLastElement)
{
moveElement(srcBucketImpl, elemIndex, srcBucketImpl, lastElemIndex);
movedElemPath = asInt(srcBucketImpl.elemToPath[elemIndex]);
// For all attributes, dirty[elemIndex] := dirty[lastElemIndex]
srcBucketImpl.listenerIdToChanges.forEach([&elemIndex, &lastElemIndex, &elemCount](const ListenerId& listenerId, Changes& changes) {
size_t trackedAttrCount = changes.changedAttributes.size();
for (size_t i = 0; i != trackedAttrCount; i++)
{
ChangedIndicesImpl& changedIndices = changes.changedIndices[i];
if (changedIndices.contains(lastElemIndex) && !changedIndices.contains(elemIndex))
{
changedIndices.insert(elemIndex, elemCount - 1);
}
else if (!changedIndices.contains(lastElemIndex) && changedIndices.contains(elemIndex))
{
changedIndices.erase(elemIndex, elemCount - 1);
}
}
});
}
// Remove last element from change tracker
srcBucketImpl.listenerIdToChanges.forEach([&elemCount](const ListenerId& listenerId, Changes& changes) {
size_t trackedAttrCount = changes.changedAttributes.size();
for (size_t i = 0; i != trackedAttrCount; i++)
{
ChangedIndicesImpl& changedIndices = changes.changedIndices[i];
changedIndices.decrementN(elemCount - 1);
}
});
{
const auto removeLastElementFromMirroredArray = [this](MirroredArray& array) {
if (array.count > 0)
{
const size_t newSize = array.size() - array.typeinfo.size;
array.count--;
array.resize(newSize);
}
if (array.gpuCapacity != 0)
{
array.resizeGpu(platform.gpuCuda, platform.gpuCudaCtx, array.size(), array.typeinfo.size);
}
};
// Reduce element count
srcBucketImpl.scalarAttributeArrays.forEach([this, &removeLastElementFromMirroredArray](const AttrName& name, ScalarAttributeArray& array) {
removeLastElementFromMirroredArray(array);
});
srcBucketImpl.arrayAttributeArrays.forEach([this, &removeLastElementFromMirroredArray](const AttrName& name, ArrayAttributeArray& array) {
removeLastElementFromMirroredArray(array.values);
removeLastElementFromMirroredArray(array.elemCounts);
removeLastElementFromMirroredArray(array.cpuElemCounts);
removeLastElementFromMirroredArray(array.gpuElemCounts);
removeLastElementFromMirroredArray(array.gpuPtrs);
});
}
srcBucketImpl.elemToPath.pop_back();
// If bucket has more than one element, remap path that pointed to last element
if (!deletingLastElement)
{
std::pair<BucketId, ArrayIndex>* movedBucketAndElemIndex;
if (pathToBucketElem.find(movedElemPath, &movedBucketAndElemIndex))
{
movedBucketAndElemIndex->second = elemIndex;
}
else
{
CARB_LOG_ERROR_ONCE("destroyElement attempted to re-index missing path %s\n", Path(movedElemPath).getText());
}
}
// Update change trackers
// We allocate them lazily, so we have to iterate over listenerIdToChangeTrackerConfig
// then allocate bucketImpl.listenerIdToChanges if necessary
auto bucketImpl = buckets.find(bucketId);
if (bucketImpl)
{
listenerIdToChangeTrackerConfig.forEach([this, &bucketImpl, &elemIndex, &lastElemIndex, &deletingLastElement](ListenerId& listenerId, ChangeTrackerConfig& config) {
if (config.changeTrackingEnabled)
{
// Allocate changes if necessary
Changes* changes;
if (bucketImpl->listenerIdToChanges.allocateEntry(listenerId, &changes))
{
new (changes) Changes();
}
//
// Since we may be moving within a bucket we need to inform the change tracker
//
if (!deletingLastElement)
{
if (changes->addedIndices.contains(lastElemIndex))
{
// only need to track that it moved if we already
// cared about it
changes->addNewPrim(elemIndex);
}
changes->removePrim(lastElemIndex);
}
else
{
changes->removePrim(elemIndex);
}
}
});
}
}
inline void PathToAttributesMap::moveElementBetweenBuckets(const PathC& path,
BucketId destBucketId,
BucketId srcBucketId,
const Bucket& destBucket)
{
if (destBucketId == srcBucketId)
return;
// Get source BucketImpl
BucketImpl* srcPtr = buckets.find(srcBucketId);
if (!srcPtr)
{
CARB_LOG_ERROR("moveElementBetweenBuckets failed to move path \"%s\" to from bucketId %zu to bucketId %zu, could not find source bucket\n", Path(path).getText(), size_t(srcBucketId), size_t(destBucketId));
return;
}
// Get bucket and elem index
std::pair<BucketId, ArrayIndex>* srcBucketAndElemIndex;
if (!pathToBucketElem.find(path, &srcBucketAndElemIndex))
{
CARB_LOG_ERROR_ONCE("moveElementBetweenBuckets failed to move path \"%s\" to from bucketId %zu to bucketId %zu, could not find path\n", Path(path).getText(), size_t(srcBucketId), size_t(destBucketId));
return;
}
// Get dest BucketImpl
BucketImpl* destPtr = buckets.find(destBucketId);
if (!destPtr)
{
CARB_LOG_ERROR("moveElementBetweenBuckets failed to move path \"%s\" to from bucketId %zu to bucketId %zu, could not find destination bucket\n", Path(path).getText(), size_t(srcBucketId), size_t(destBucketId));
return;
}
size_t destElemIndex = PathToAttributesMap::getElementCount(*destPtr);
// Allocate element in new bucket
allocElementForMove(*destPtr, *srcPtr, path);
// Copy values from src to dest
//
// Ideally usdValid would be driven by the change tracker
// however I can't use that until we have a way to tie into that
// Ideally there would be a subsscriber for "copying back to USD"
// and that could be parsed to see if an element in invalid. At this point
// we just have to do it per-attribute since that is as fine-grained as we
// have data right now. This happens in moveElement to avoid having to repeat
// logic about matching attriubte arrays that the function already does
moveElement(*destPtr, destElemIndex, *srcPtr, srcBucketAndElemIndex->second);
// Delete element in old bucket
// Don't destroy data pointed to, because we want dest element to point to it
const bool destroyDataPointedTo = false;
destroyElement(srcBucketId, srcBucketAndElemIndex->second, destroyDataPointedTo);
// Map path to new bucket
*srcBucketAndElemIndex = std::make_pair(destBucketId, destElemIndex);
// Convert destBucket to a set<AttrNameAndType>
set<AttrNameAndType> destBucket_v2;
destBucket_v2.v.resize(destBucket.size());
for (size_t i = 0; i != destBucket.size(); i++)
{
destBucket_v2.v[i] =
AttrNameAndType(Type(destBucket.v[i].type), Token(destBucket.v[i].name), destBucket.v[i].suffix);
}
// Copy dirty bits to new bucket
srcPtr->listenerIdToChanges.forEach([&destPtr, &destBucket_v2, &destElemIndex](const ListenerId &listener, const Changes& srcChanges) {
// Create if listenerId doesn't exist on dest bucket
Changes* destChanges;
if (destPtr->listenerIdToChanges.allocateEntry(listener, &destChanges))
{
new (destChanges) Changes();
}
size_t changedAttrCount = srcChanges.changedAttributes.size();
size_t destNewElemCount = destPtr->elemToPath.size();
for (size_t i = 0; i != changedAttrCount; i++)
{
const AttrNameAndType& nameAndType = srcChanges.changedAttributes.v[i];
// TODO: we could optimize this by taking advantage of destBucket_v2
// and changeAttributes being sorted. This would allow us to iterate
// through both at the same time, and avoid doing n O(log n) lookups.
if (destBucket_v2.contains(nameAndType))
{
destChanges->setDirty(nameAndType, destElemIndex, destNewElemCount);
}
}
});
}
inline void PathToAttributesMap::addAttributeC(
const PathC& path, const TokenC& attrName, TypeC type, const void* value)
{
addAttributeC(path, attrName, NameSuffix::none, type, value);
}
inline void PathToAttributesMap::addArrayAttributeC(
const PathC& path, const TokenC& attrName, TypeC type, const void* value, const size_t arrayElemCount)
{
addArrayAttributeC(path, attrName, NameSuffix::none, type, value, arrayElemCount);
}
/**
* @brief adds attributes to a primitive
*
* @details As opposed to addAttributeC this function allows the user to add multiple attributes to
* a primitive at the same time and only does re-bucketing once after adding all of them.
* This should be faster than adding them one-by-one and re-bucketing after each of them.
* @param path - primitive path
* @param attrNames - vector of the attribute names as tokens
* @param tfTypes - vector of dynamic runtime types of the attributes
* @param typeCs - vector of identifiers for types
*
*
*/
inline void PathToAttributesMap::addAttributesToPrim(const PathC& path,
const std::vector<TokenC>& attrNames,
const std::vector<TypeC>& typeCs)
{
CARB_ASSERT(attrNames.size() == typeCs.size());
addAttributesToBucket(path, attrNames, typeCs);
}
// Find bucketId the path is currently in
// Find the bucket from the bucketId
inline void PathToAttributesMap::addAttributesToBucket(const PathC& path,
const std::vector<TokenC>& attrNames,
const std::vector<TypeC>& typeCs)
{
NameSuffix suffix = NameSuffix::none;
BucketId srcBucketId;
ArrayIndex srcElemIndex;
std::tie(srcBucketId, srcElemIndex) = getBucketAndArrayIndex(path);
bool pathIsInFlatcache = (srcBucketId != kInvalidBucketId);
if (!pathIsInFlatcache)
{
addPath(path);
std::tie(srcBucketId, srcElemIndex) = getBucketAndArrayIndex(path);
}
// Get dest bucket
// Dest bucket types = union(source bucket types, new type)
std::pair<BucketId, ArrayIndex>* bucketAndElemIndex;
VALIDATE_TRUE(pathToBucketElem.find(path, &bucketAndElemIndex));
BucketImpl *const bucketImplPtr = buckets.find(bucketAndElemIndex->first);
CARB_ASSERT(bucketImplPtr);
Bucket destBucket = bucketImplPtr->GetBucket();
for (uint32_t c = 0; c < attrNames.size(); ++c)
{
Token attrName(attrNames[c]);
AttrNameAndType nameAndType(Type(typeCs[c]), attrName, NameSuffix::none);
// Early out if attribute already present
if (destBucket.find(nameAndType) != destBucket.end())
continue;
// When adding a new attribute that shadows the name of an existing
// attribute, but with a new type, then we choose to drop the old attribute
// on the floor.
// Unfortunatly since we are searching on name since we will not know the type
// we have to scan the list of attributes.
for (const AttrNameAndType& bucketNameAndType : destBucket)
{
if (bucketNameAndType.name == attrNames[c] && bucketNameAndType.suffix == suffix &&
TypeC(bucketNameAndType.type) != typeCs[c])
{
// we can stop here since this enforces uniquness of attribute names
// todo: check that USD already enforces this
destBucket.erase(bucketNameAndType);
break;
}
}
destBucket.insert(nameAndType);
}
BucketId destBucketId = addBucket(destBucket);
if (srcBucketId != destBucketId)
{
moveElementBetweenBuckets(path, destBucketId, srcBucketId, destBucket);
}
}
// Find bucketId the path is currently in
// Find the bucket from the bucketId
inline std::tuple<BucketId, ArrayIndex> PathToAttributesMap::addAttributeGetBucketAndArrayIndex(
const PathC& path, const TokenC& attrNameC, NameSuffix nameSuffix, TypeC type)
{
BucketId srcBucketId;
ArrayIndex srcElemIndex;
std::tie(srcBucketId, srcElemIndex) = getBucketAndArrayIndex(path);
bool pathIsInFlatcache = (srcBucketId != kInvalidBucketId);
if (!pathIsInFlatcache)
{
addPath(path);
std::tie(srcBucketId, srcElemIndex) = getBucketAndArrayIndex(path);
}
// Get dest bucket
// Dest bucket types = union(source bucket types, new type)
std::pair<BucketId, ArrayIndex>* bucketAndElemIndex;
VALIDATE_TRUE(pathToBucketElem.find(path, &bucketAndElemIndex));
BucketImpl *const bucketImplPtr = buckets.find(bucketAndElemIndex->first);
CARB_ASSERT(bucketImplPtr);
Bucket destBucket = bucketImplPtr->GetBucket();
Token attrName(attrNameC);
AttrNameAndType nameAndType(Type(type), attrName, nameSuffix);
// Early out if attribute already present
if (destBucket.find(nameAndType) != destBucket.end())
return { srcBucketId, srcElemIndex };
// When adding a new attribute that shadows the name of an existing
// attribute, but with a new type, then we choose to drop the old attribute
// on the floor.
// Unfortunatly since we are searching on name since we will not know the type
// we have to scan the list of attributes.
for (const AttrNameAndType& bucketNameAndType : destBucket)
{
if (bucketNameAndType.name == attrNameC && bucketNameAndType.suffix == nameSuffix &&
TypeC(bucketNameAndType.type) != type)
{
// we can stop here since this enforces uniquness of attribute names
// todo: check that USD already enforces this
destBucket.erase(bucketNameAndType);
break;
}
}
destBucket.insert(nameAndType);
BucketId destBucketId = addBucket(destBucket);
size_t destElemIndex;
if (srcBucketId != destBucketId)
{
destElemIndex = getElementCount(destBucket);
moveElementBetweenBuckets(path, destBucketId, srcBucketId, destBucket);
}
else
{
destElemIndex = srcElemIndex;
}
return { destBucketId, destElemIndex };
}
inline void PathToAttributesMap::addAttributeC(
const PathC& path, const TokenC& attrNameC, NameSuffix nameSuffix, TypeC type, const void* value)
{
const Typeinfo& typeinfo = getTypeInfo(type);
if (typeinfo.isArray && value)
{
CARB_LOG_ERROR("addAttributeC: Attempted to add array-value attribute with default values. Use addArrayAttribute instead.");
return;
}
addAttributeInternal(path, attrNameC, nameSuffix, type, value, typeinfo, 0);
}
inline void PathToAttributesMap::addArrayAttributeC(const PathC& path, const TokenC& attrName, NameSuffix suffix, TypeC type, const void* value, const size_t arrayElemCount)
{
const Typeinfo& typeinfo = getTypeInfo(type);
addAttributeInternal(path, attrName, suffix, type, value, typeinfo, arrayElemCount);
}
inline SpanC PathToAttributesMap::getOrCreateAttributeWrC(const PathC& path,
const TokenC& attrName,
TypeC type)
{
APILOGGER("getOrCreateAttributeWrC", apiLogEnabled, path, attrName);
BucketId bucketId;
ArrayIndex elemIndex;
std::tie(bucketId, elemIndex) = addAttributeGetBucketAndArrayIndex(path, attrName, NameSuffix::none, type);
ArrayAndDirtyIndices arrayAndchangedIndices = getArraySpanC(bucketId, attrName, CpuReadWriteConfig(), NameSuffix::none);
setArrayElementDirty(arrayAndchangedIndices, elemIndex);
SpanC array = arrayAndchangedIndices.array;
return getArrayElementPtr(array, elemIndex);
}
template <typename T>
void PathToAttributesMap::addAttribute(
const PathC& path, const TokenC& attrName, TypeC type, const T& value)
{
APILOGGER("addAttribute", apiLogEnabled, path, attrName);
// TODO: check that type is compatible
return addAttributeC(path, attrName, type, &value);
}
template <typename T>
void PathToAttributesMap::addSubAttribute(
const PathC& path, const TokenC& attrName, NameSuffix suffix, TypeC type, const T& value)
{
APILOGGER("addSubAttribute", apiLogEnabled, path, attrName);
// TODO: check that type is compatible
return addAttributeC(path, attrName, suffix, type, &value);
}
// Return a new bucket with all sub-attributes of a single attribute removed
inline Bucket removeAllSubAttributesFromBucket(const Bucket& bucket, const TokenC& attrName)
{
Bucket newBucket;
// TODO: implement set::delete
for (auto nameAndType : bucket)
{
// Don't compare suffix and type to delete all suffix variants of name
if (nameAndType.name != attrName)
{
newBucket.insert(nameAndType);
}
}
return newBucket;
}
// Return a new bucket with all sub-attributes of all named attributes removed
inline Bucket removeAllSubAttributesFromBucket(const Bucket& bucket, const std::vector<TokenC>& attrNames)
{
Bucket newBucket;
for (auto nameAndType : bucket)
{
if (std::find_if(attrNames.begin(), attrNames.end(),
[&](auto attrName) { return nameAndType.name == attrName; }) == attrNames.end())
{
newBucket.insert(nameAndType);
}
}
return newBucket;
}
inline Bucket removeSubAttributeFromBucket(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix)
{
Bucket newBucket;
// TODO: implement set::delete
for (auto nameAndType : bucket)
{
// Don't compare suffix and type to delete all suffix variants of name
if (!(nameAndType.name == attrName && nameAndType.suffix == suffix))
{
newBucket.insert(nameAndType);
}
}
return newBucket;
}
// Remove an attribute and all its subattributes (suffixes).
inline void PathToAttributesMap::removeAttribute(const PathC& path, const TokenC& attrName)
{
APILOGGER("removeAttribute", apiLogEnabled, path, attrName);
BucketId srcBucketId;
ArrayIndex srcElemIndex;
std::tie(srcBucketId, srcElemIndex) = getBucketAndArrayIndex(path);
if (srcBucketId == kInvalidBucketId)
return;
// srcBucketId != kInvalidBucketId guarantees find will succeed
std::pair<BucketId, ArrayIndex>* bucketAndElemIndex;
VALIDATE_TRUE(pathToBucketElem.find(path, &bucketAndElemIndex));
BucketImpl *const bucketImplPtr = buckets.find(bucketAndElemIndex->first);
CARB_ASSERT(bucketImplPtr);
const Bucket& srcTypes = bucketImplPtr->GetBucket();
const Bucket destBucket = removeAllSubAttributesFromBucket(srcTypes, attrName);
const BucketId destBucketId = addBucket(destBucket);
moveElementBetweenBuckets(path, destBucketId, srcBucketId, destBucket);
}
inline void PathToAttributesMap::removeAttributesFromPath(const PathC& path, const std::vector<TokenC>& attrNames)
{
BucketId srcBucketId;
ArrayIndex srcElemIndex;
std::tie(srcBucketId, srcElemIndex) = getBucketAndArrayIndex(path);
if (srcBucketId == kInvalidBucketId)
return;
// srcBucketId != kInvalidBucketId guarantees find will succeed
std::pair<BucketId, ArrayIndex>* bucketAndElemIndex;
VALIDATE_TRUE(pathToBucketElem.find(path, &bucketAndElemIndex));
BucketImpl *const bucketImplPtr = buckets.find(bucketAndElemIndex->first);
CARB_ASSERT(bucketImplPtr);
const Bucket& bucket = bucketImplPtr->GetBucket();
const Bucket destBucket = removeAllSubAttributesFromBucket(bucket, attrNames);
const BucketId destBucketId = addBucket(destBucket);
if (srcBucketId != destBucketId)
{
moveElementBetweenBuckets(path, destBucketId, srcBucketId, destBucket);
}
}
inline void PathToAttributesMap::removeAttributesFromBucket(const Bucket& bucket, const std::vector<TokenC>& attrNames)
{
// first we need to find the actual bucketImpl that we will be
// deleting attributes from
auto iter = attrNameSetToBucketId.find(bucket);
bool found = (iter != attrNameSetToBucketId.end());
if (!found)
return;
BucketId bucketId = iter->second;
BucketImpl* bucketImplPtr = buckets.find(bucketId);
if (!bucketImplPtr)
return;
// Buckets are found based on a set of the attributes, so we build
// the new set based on the attribute names
Bucket newBucket = removeAllSubAttributesFromBucket(bucket, attrNames);
std::pair<BucketId, BucketImpl&> newBucketIdAndImpl = findOrCreateBucket(newBucket);
BucketImpl& newBucketImpl = newBucketIdAndImpl.second;
const size_t origSize = newBucketImpl.elemToPath.size();
if (origSize == 0)
{
// In the case where it is a new bucket we prefer just deleting the no longer needed arrays.
newBucketImpl = *bucketImplPtr;
newBucketImpl.SetBucket(std::move(newBucket));
// loop finding and deleting attribute arrays
for (auto attrName : attrNames)
{
newBucketImpl.scalarAttributeArrays.forEach([this, &attrName, &newBucketImpl](const AttrName& name, ScalarAttributeArray& array) {
if (name.name == attrName)
{
newBucketImpl.scalarAttributeArrays.freeEntry(name);
}
});
newBucketImpl.arrayAttributeArrays.forEach([this, &attrName, &newBucketImpl](const AttrName& name, ArrayAttributeArray& array) {
if (name.name == attrName)
{
newBucketImpl.arrayAttributeArrays.freeEntry(name);
}
});
}
}
else
{
// TODO : there should be a faster way to do this but more to discuss here later
for (const auto path : bucketImplPtr->elemToPath)
{
moveElementBetweenBuckets(
asInt(path), newBucketIdAndImpl.first, bucketId, newBucket);
}
}
//
// need to update the pathToBucketElem for all the items that just "moved" buckets
// TODO: make this work with moving buckets
for (size_t i = origSize; i < newBucketImpl.elemToPath.size(); ++i)
{
std::pair<BucketId, ArrayIndex>* bucketAndElemIndex;
pathToBucketElem.allocateEntry(asInt(newBucketImpl.elemToPath[i]), &bucketAndElemIndex);
*bucketAndElemIndex = std::make_pair(newBucketIdAndImpl.first, i);
}
buckets.erase(bucketId);
attrNameSetToBucketId.erase(bucket);
}
// Remove a particular (name,suffix) pair, for example the connection of an attribute
inline void PathToAttributesMap::removeSubAttribute(const PathC& path, const TokenC& attrName, NameSuffix suffix)
{
APILOGGER("removeSubAttribute", apiLogEnabled, path, attrName);
BucketId srcBucketId;
ArrayIndex srcElemIndex;
std::tie(srcBucketId, srcElemIndex) = getBucketAndArrayIndex(path);
if (srcBucketId == kInvalidBucketId)
return;
// srcBucketId != kInvalidBucketId guarantees find will succeed
std::pair<BucketId, ArrayIndex>* bucketAndElemIndex;
VALIDATE_TRUE(pathToBucketElem.find(path, &bucketAndElemIndex));
BucketImpl *const bucketImplPtr = buckets.find(bucketAndElemIndex->first);
CARB_ASSERT(bucketImplPtr);
const Bucket& srcTypes = bucketImplPtr->GetBucket();
bool pathFound = (srcBucketId != kInvalidBucketId);
if (!pathFound)
{
CARB_LOG_ERROR_ONCE("removeSubAttribute called on non-existent path %s \n", Path(path).getText());
return;
}
const Bucket destBucket = removeSubAttributeFromBucket(srcTypes, attrName, suffix);
const BucketId destBucketId = addBucket(destBucket);
moveElementBetweenBuckets(path, destBucketId, srcBucketId, destBucket);
}
// Removes an attribute (and all its subattributes) for all paths in a bucket
inline void PathToAttributesMap::removeAttributeC(const Bucket& bucket,
const TokenC& attrName,
TypeC type)
{
APILOGGER("removeAttributeC", apiLogEnabled, attrName);
// first we need to find the actual bucketImpl that we will be
// deleting attributes from
auto iter = attrNameSetToBucketId.find(bucket);
bool found = (iter != attrNameSetToBucketId.end());
if (!found)
return;
BucketId bucketId = iter->second;
BucketImpl* bucketImplPtr = buckets.find(bucketId);
if (!bucketImplPtr)
return;
// Buckets are found based on a set of the attributes, so we build
// the new set based on the attribute names
Bucket newBucket = removeAllSubAttributesFromBucket(bucket, attrName);
BucketId newBucketId = findBucketId(newBucket);
if (newBucketId == kInvalidBucketId)
{
// In the case where it is a new bucket we prefer just deleting the no longer needed arrays.
// This means that nothing needs to be updated as all the prims are in the same place
bucketImplPtr->SetBucket(std::move(newBucket));
if (getTypeInfo(type).isArray)
{
bucketImplPtr->arrayAttributeArrays.forEach([this, &attrName, bucketImplPtr](const AttrName& name, ArrayAttributeArray& array) {
if (name.name == attrName)
{
bucketImplPtr->arrayAttributeArrays.freeEntry(name);
}
});
}
else
{
bucketImplPtr->scalarAttributeArrays.forEach([this, &attrName, bucketImplPtr](const AttrName& name, ScalarAttributeArray& array) {
if (name.name == attrName)
{
bucketImplPtr->scalarAttributeArrays.freeEntry(name);
}
});
}
attrNameSetToBucketId[newBucket] = bucketId;
attrNameSetToBucketId.erase(bucket);
}
else
{
std::pair<BucketId, BucketImpl&> newBucketIdAndImpl = findOrCreateBucket(newBucket);
BucketImpl& newBucketImpl = newBucketIdAndImpl.second;
const size_t origSize = newBucketImpl.elemToPath.size();
// TODO : there should be a faster way to do this but more to discuss here later
// currently this pulls from the front, this ensures that elements stay in the same
// "order" when moving buckets -> but this means moving stuff around in elemToPath
// Doing this in "bulk" will make all of this better.
while(bucketImplPtr->elemToPath.size())
{
moveElementBetweenBuckets(
asInt(bucketImplPtr->elemToPath.front()), newBucketIdAndImpl.first, bucketId, newBucket);
}
//
// need to update the pathToBucketElem for all the items that just "moved" buckets
// TODO: make this work with moving buckets
for (size_t i = origSize; i < newBucketImpl.elemToPath.size(); ++i)
{
std::pair<BucketId, ArrayIndex>* bucketAndElemIndex;
pathToBucketElem.allocateEntry(asInt(newBucketImpl.elemToPath[i]), &bucketAndElemIndex);
*bucketAndElemIndex = std::make_pair(newBucketIdAndImpl.first, i);
}
}
}
inline void PathToAttributesMap::removePath(const PathC& path)
{
std::pair<BucketId, ArrayIndex>* bucketAndElemIndex;
if (!pathToBucketElem.find(path, &bucketAndElemIndex))
{
CARB_LOG_ERROR_ONCE("removePath called on non-existent path %s \n", Path(path).getText());
return;
}
const BucketId &bucketId = bucketAndElemIndex->first;
const size_t &elemIndex = bucketAndElemIndex->second;
const bool destroyDataPointedTo = true;
destroyElement(bucketId, elemIndex, destroyDataPointedTo);
pathToBucketElem.freeEntry(path);
}
inline size_t PathToAttributesMap::count(const PathC& path) const
{
const std::pair<BucketId, ArrayIndex>* bucketAndElemIndex;
if (pathToBucketElem.find(path, &bucketAndElemIndex))
{
return 1;
}
else
{
return 0;
}
}
struct ViewIterator
{
size_t bucketIndex = 0;
size_t elementIndex = 0;
std::vector<size_t>::const_iterator bucketElemCount;
ViewIterator& operator++()
{
elementIndex++;
if (elementIndex == *bucketElemCount)
{
bucketIndex++;
bucketElemCount++;
elementIndex = 0;
}
return *this;
}
bool operator!=(const ViewIterator& rhs) const
{
return bucketIndex != rhs.bucketIndex || elementIndex != rhs.elementIndex;
}
ViewIterator& operator*()
{
return *this;
}
};
// Array resize that does not preserve previous data
inline void PathToAttributesMap::destructiveResizeIfNecessaryGPU(MirroredArray& gpuPointerArray,
size_t elem,
size_t& capacity,
size_t desiredElemCount,
size_t elemByteCount,
omni::gpucompute::GpuCompute* computeAPI,
omni::gpucompute::Context* computeCtx)
{
uint8_t** elemToArrayGpuData = reinterpret_cast<uint8_t**>(gpuPointerArray.cpuData());
uint8_t*& gpuData = elemToArrayGpuData[elem];
// Resize iff (capacity < desiredElemCount)
if (capacity != desiredElemCount || gpuData == nullptr)
{
size_t byteCount = desiredElemCount * elemByteCount;
if (gpuData)
{
platform.gpuCuda->freeAsync(*platform.gpuCudaCtx, gpuData);
}
gpuData = reinterpret_cast<uint8_t*>(computeAPI->mallocAsync(*computeCtx, byteCount, elemByteCount));
// We've written to gpuData on the CPU so we have to invalidate any GPU mirror of it
gpuPointerArray.gpuValid = false;
gpuPointerArray.cpuValid = true;
capacity = desiredElemCount;
}
}
// Array resize that preserves previous data
inline void PathToAttributesMap::resizeIfNecessaryGPU(MirroredArray& gpuPointerArray,
size_t elem,
size_t& capacity,
size_t desiredElemCount,
size_t elemByteCount,
omni::gpucompute::GpuCompute* computeAPI,
omni::gpucompute::Context* computeCtx)
{
// TODO: reduce number of reallocations by allocating capacity larger than size
// and not always reallocating when desiredElemCount<capacity
// if gpuCapacity is 0 that means the array was recently copied and needs to be reallocated
if(computeAPI && (capacity != desiredElemCount || gpuPointerArray.gpuCapacity == 0))
{
size_t oldByteCount = capacity * elemByteCount;
size_t newByteCount = desiredElemCount * elemByteCount;
uint8_t** elemToArrayGpuData = reinterpret_cast<uint8_t**>(gpuPointerArray.cpuData());
uint8_t*& gpuData = elemToArrayGpuData[elem];
uint8_t* newGpuData = reinterpret_cast<uint8_t*>(computeAPI->mallocAsync(*computeCtx, newByteCount, elemByteCount));
if (gpuData)
{
using omni::gpucompute::MemcpyKind;
size_t copyByteCount = std::min(oldByteCount, newByteCount);
computeAPI->memcpy(*computeCtx, newGpuData, gpuData, copyByteCount, MemcpyKind::deviceToDevice);
// Note that this free has to be async even though the previous
// memcpy is sync. The reason is that deviceToDevice memcpys
// are always async, even if you call the sync version of cudaMemcpy.
// So if you do "sync" memcpy and then sync free, the free can
// execute on the CPU before the copy executes on the GPU.
// See https://nvidia-omniverse.atlassian.net/browse/OM-46051
computeAPI->freeAsync(*computeCtx, gpuData);
}
gpuData = newGpuData;
// We've written to gpuData on the CPU so we have to invalidate any GPU mirror of it
gpuPointerArray.gpuValid = false;
gpuPointerArray.cpuValid = true;
capacity = desiredElemCount;
}
}
// This function is called when we are just about to do a transfer to GPU, to
// make sure that GPU array is large enough.
// It is called only when !gpuValid, so we don't have to preserve any existing
// GPU array.
//
// Algorithm:
// If capacity is sufficient, do nothing
// If not, free any existing allocation, then allocate
inline void PathToAttributesMap::allocGpuMemIfNecessary(PathToAttributesMap::MirroredArray& array,
size_t byteCount,
size_t elemSize,
omni::gpucompute::GpuCompute* computeAPI,
omni::gpucompute::Context* computeCtx)
{
bool capacitySufficient = (byteCount <= array.gpuCapacity);
if (!capacitySufficient)
{
if (array.gpuArray)
{
computeAPI->freeAsync(*computeCtx, array.gpuArray);
}
array.gpuArray = reinterpret_cast<uint8_t*>(computeAPI->mallocAsync(*computeCtx, byteCount, elemSize));
array.gpuCapacity = byteCount;
}
}
inline PrimBucketListImpl PathToAttributesMap::getChanges(ListenerId listenerId)
{
PrimBucketListImpl changesOut;
// For now, iterate over all buckets
// We'll probably want the user to specify a subset of buckets for change logging
changesOut.buckets.reserve(buckets.end());
changesOut.changes.reserve(buckets.end());
BucketId id{ 0 };
for (unsigned int i = 0; i < buckets.end(); ++i, ++id)
{
BucketImpl* bucketPtr = buckets.find(id);
if (!bucketPtr)
continue;
BucketImpl& bucketImpl = *bucketPtr;
BucketId bucketId = id;
Changes* changesIn;
if (!bucketImpl.listenerIdToChanges.find(listenerId, &changesIn))
{
continue;
}
size_t changedAttrCount = changesIn->changedAttributes.size();
size_t primCount = bucketImpl.elemToPath.size();
bool attributesChanged = (changedAttrCount != 0 && primCount != 0);
bool primsAdded = (changesIn->getNewPrimCount() != 0);
if (attributesChanged || primsAdded)
{
changesOut.buckets.v.push_back(bucketId);
changesOut.changes.push_back(BucketChangesImpl());
BucketChangesImpl& bucketChanges = changesOut.changes.back();
// Write changed attributes
bucketChanges.changedAttributes = changesIn->changedAttributes;
bucketChanges.changedIndices.resize(changedAttrCount);
for (size_t j = 0; j != changedAttrCount; j++)
{
bucketChanges.changedIndices[j] = { changesIn->changedIndices[j].allIndicesChanged,
{ changesIn->changedIndices[j].changedIndices.data(),
changesIn->changedIndices[j].changedIndices.size() } };
}
bucketChanges.pathArray = { reinterpret_cast<const Path*>(bucketImpl.elemToPath.data()),
bucketImpl.elemToPath.size() };
// Write added prims
bucketChanges.addedIndices = { changesIn->addedIndices.data(), changesIn->addedIndices.size() };
}
}
return changesOut;
}
inline void PathToAttributesMap::popChanges(ListenerId listenerId)
{
BucketId id{ 0 };
for (unsigned int i = 0; i < buckets.end(); ++i, ++id)
{
BucketImpl* bucketPtr = buckets.find(id);
if (bucketPtr)
{
// Create listenerId if it doesn't exist
Changes* changes;
if (bucketPtr->listenerIdToChanges.allocateEntry(listenerId, &changes))
{
new (changes) Changes;
}
changes->changedAttributes.clear();
changes->changedIndices.clear();
changes->addedIndices.clear();
}
}
}
inline PathToAttributesMap::MirroredArray::MirroredArray(Platform& platform_, const TypeC &type, const Typeinfo& typeinfo) noexcept
: cpuArray()
, platform(platform_)
, type(type)
, typeinfo(typeinfo)
, gpuArray(nullptr)
, gpuCapacity(0)
, d3dArrays()
, count(0)
, usdValid(true)
, cpuValid(false)
, gpuValid(false)
, gpuAllocedWithCuda(false)
, attributeMutex()
{
}
inline PathToAttributesMap::MirroredArray::~MirroredArray()
{
// clean up any non-array gpu data
if (gpuArray)
{
if (gpuAllocedWithCuda)
{
platform.gpuCuda->freeAsync(*platform.gpuCudaCtx, gpuArray);
}
else
{
// @TODO Fix crash during D3dVk free! N.B. that this backend is incomplete and not in active use
// gpuD3dVk->freeAsync(*gpuD3dVkCtx, gpuArray);
}
gpuArray = nullptr;
}
gpuValid = false;
}
inline PathToAttributesMap::MirroredArray& PathToAttributesMap::MirroredArray::operator=(const MirroredArray& other) noexcept
{
if (!other.isArrayOfArray())
{
cpuArray = other.cpuArray;
}
else
{
// Here we set all pointers in dest array to nullptr
// The allocation and data copy happens in PathToAttributesMap's operator=
cpuArray.resize(other.size());
uint8_t** destPtrs = reinterpret_cast<uint8_t**>(cpuData());
for (size_t elemIndex = 0; elemIndex != other.count; elemIndex++)
{
destPtrs[elemIndex] = nullptr;
}
}
// platform = other.platform; // intentionally not copy-assigning platform
type = other.type;
typeinfo = other.typeinfo;
usdValid = other.usdValid;
cpuValid = other.cpuValid;
count = other.count;
// GPU data needs to be copied explicitly using the gpu compute API
gpuArray = nullptr;
gpuCapacity = 0;
gpuValid = false;
gpuAllocedWithCuda = false;
if (other.gpuValid)
{
// Also need to empty the cpuArray as it is a cpu pointer to now-invalid GPU data
uint8_t** destPtrs = reinterpret_cast<uint8_t**>(cpuData());
for (size_t elemIndex = 0; elemIndex != other.count; elemIndex++)
{
destPtrs[elemIndex] = nullptr;
}
}
return *this;
}
inline PathToAttributesMap::MirroredArray::MirroredArray(MirroredArray&& other) noexcept
: cpuArray(std::move(other.cpuArray)),
platform(other.platform),
type(other.type),
typeinfo(other.typeinfo),
gpuArray(other.gpuArray),
gpuCapacity(other.gpuCapacity),
d3dArrays(std::move(other.d3dArrays)),
count(other.count),
usdValid(other.usdValid),
cpuValid(other.cpuValid),
gpuValid(other.gpuValid),
gpuAllocedWithCuda(other.gpuAllocedWithCuda),
attributeMutex() // intentionally not move constructing the mutex
{
other.gpuArray = nullptr;
}
inline PathToAttributesMap::MirroredArray& PathToAttributesMap::MirroredArray::operator=(MirroredArray&& other) noexcept
{
MirroredArray tmp(std::move(other));
swap(*this, tmp);
return *this;
}
inline void swap(PathToAttributesMap::MirroredArray& a, PathToAttributesMap::MirroredArray& b) noexcept
{
using std::swap;
swap(a.cpuArray, b.cpuArray);
swap(a.type, b.type);
swap(a.typeinfo, b.typeinfo);
swap(a.gpuArray, b.gpuArray);
swap(a.gpuCapacity, b.gpuCapacity);
swap(a.d3dArrays, b.d3dArrays);
swap(a.count, b.count);
swap(a.usdValid, b.usdValid);
swap(a.cpuValid, b.cpuValid);
swap(a.gpuValid, b.gpuValid);
swap(a.gpuAllocedWithCuda, b.gpuAllocedWithCuda);
// swap(a.attributeMutex, b.attributeMutex); // intentionally NOT swapping attribute mutex because it is not move-constructable
}
inline PathToAttributesMap::ArrayAttributeArray::ArrayAttributeArray(Platform& platform_, const TypeC& type, const Typeinfo& typeinfo) noexcept
: values(platform_, type, typeinfo)
, elemCounts(platform_, PTAM_SIZE_TYPEC, Typeinfo{sizeof(PTAM_SIZE_TYPE), false, 0})
, cpuElemCounts(platform_, PTAM_SIZE_TYPEC, Typeinfo{ sizeof(PTAM_SIZE_TYPE), false, 0 })
, gpuElemCounts(platform_, PTAM_SIZE_TYPEC, Typeinfo{ sizeof(PTAM_SIZE_TYPE), false, 0 })
, gpuPtrs(platform_, PTAM_POINTER_TYPEC, Typeinfo{ sizeof(PTAM_POINTER_TYPE), false, 0 })
{
static_assert(sizeof(ArrayAttributeArray) == 5 * sizeof(MirroredArray), "ArrayAttributeArray has unexpected size");
}
inline PathToAttributesMap::ArrayAttributeArray::~ArrayAttributeArray()
{
Platform& platform = values.platform;
CARB_ASSERT(&platform == &elemCounts.platform);
CARB_ASSERT(&platform == &cpuElemCounts.platform);
CARB_ASSERT(&platform == &gpuElemCounts.platform);
CARB_ASSERT(&platform == &gpuPtrs.platform);
if (values.count)
{
uint8_t** elemToCpuPtr = reinterpret_cast<uint8_t**>(values.cpuData());
for (size_t elemIndex = 0; elemIndex != values.count; elemIndex++)
{
// If a CPU array has been allocated, free it
uint8_t*& cpuPtrToDelete = elemToCpuPtr[elemIndex];
if (cpuPtrToDelete)
{
if (!USE_PINNED_MEMORY || !platform.gpuCuda)
{
free(cpuPtrToDelete);
}
else if (platform.gpuCuda)
{
platform.gpuCuda->freeHost(*platform.gpuCudaCtx, cpuPtrToDelete);
}
cpuPtrToDelete = nullptr;
values.cpuValid = false;
}
}
}
if (gpuPtrs.count)
{
// CPU array of GPU pointers
uint8_t** elemToGpuPtr = reinterpret_cast<uint8_t**>(gpuPtrs.cpuData());
for (size_t elemIndex = 0; elemIndex != gpuPtrs.count; elemIndex++)
{
// If a GPU array has been allocated, free it
uint8_t*& gpuPtrToDelete = elemToGpuPtr[elemIndex];
if (gpuPtrToDelete)
{
CARB_ASSERT(platform.gpuCuda);
CARB_ASSERT(platform.gpuCudaCtx);
platform.gpuCuda->freeAsync(*platform.gpuCudaCtx, gpuPtrToDelete);
gpuPtrToDelete = nullptr;
}
}
}
}
inline PathToAttributesMap::ArrayAttributeArray& PathToAttributesMap::ArrayAttributeArray::operator=(const ArrayAttributeArray& other) noexcept
{
static_assert(sizeof(ArrayAttributeArray) == 5 * sizeof(MirroredArray), "ArrayAttributeArray has unexpected size");
values = other.values;
elemCounts = other.elemCounts;
cpuElemCounts = other.cpuElemCounts;
gpuElemCounts = other.gpuElemCounts;
gpuPtrs = other.gpuPtrs;
return *this;
}
inline PathToAttributesMap::ArrayAttributeArray::ArrayAttributeArray(ArrayAttributeArray&& other) noexcept
: values(std::move(other.values))
, elemCounts(std::move(other.elemCounts))
, cpuElemCounts(std::move(other.cpuElemCounts))
, gpuElemCounts(std::move(other.gpuElemCounts))
, gpuPtrs(std::move(other.gpuPtrs))
{
static_assert(sizeof(ArrayAttributeArray) == 5 * sizeof(MirroredArray), "ArrayAttributeArray has unexpected size");
other.values.count = 0;
other.elemCounts.count = 0;
other.cpuElemCounts.count = 0;
other.gpuElemCounts.count = 0;
other.gpuPtrs.count = 0;
}
inline PathToAttributesMap::ArrayAttributeArray& PathToAttributesMap::ArrayAttributeArray::operator=(ArrayAttributeArray&& other) noexcept
{
ArrayAttributeArray tmp(std::move(other));
swap(*this, tmp);
return *this;
}
inline void swap(PathToAttributesMap::ArrayAttributeArray& a, PathToAttributesMap::ArrayAttributeArray& b) noexcept
{
using std::swap;
swap(a.values, b.values);
swap(a.elemCounts, b.elemCounts);
swap(a.cpuElemCounts, b.cpuElemCounts);
swap(a.gpuElemCounts, b.gpuElemCounts);
swap(a.gpuPtrs, b.gpuPtrs);
}
inline void PathToAttributesMap::printBucket(const Bucket& bucket) const
{
bool multiLine = (1 < bucket.size());
printf("{");
if (multiLine)
printf("\n");
for (const auto& b : bucket)
{
TokenC attrName = b.name;
NameSuffix suffix = b.suffix;
Type type(b.type);
if (multiLine)
printf(" ");
Token attrNameToken(attrName);
std::cout << "TypeC(" << type << ") " << attrNameToken.getText() << suffix;
if (multiLine)
printf("\n");
}
printf("} ");
// Print arrays in bucket
auto iter = attrNameSetToBucketId.find(bucket);
bool found = (iter != attrNameSetToBucketId.end());
if (!found)
return;
BucketId bucketId = iter->second;
auto bucketImplPtr = buckets.find(bucketId);
if (bucketImplPtr)
{
const BucketImpl& bucketImpl = *bucketImplPtr;
bucketImpl.scalarAttributeArrays.forEach([&multiLine](const AttrName& name, const ScalarAttributeArray& array) {
#if ENABLE_USD_DEBUGGING
std::cout << toTfToken(name.name).GetText() << " " << toString(name.suffix) << " ";
#else
std::cout << name.name.token << " ";
#endif
std::cout << array.size() << "bytes ";
if (multiLine)
std::cout << "\n";
});
bucketImpl.arrayAttributeArrays.forEach([&multiLine](const AttrName& name, const ArrayAttributeArray& array) {
#if ENABLE_USD_DEBUGGING
std::cout << toTfToken(name.name).GetText() << " " << toString(name.suffix) << " ";
#else
std::cout << name.name.token << " ";
#endif
std::cout << array.values.size() << "bytes ";
if (multiLine)
std::cout << "\n";
});
}
}
inline void PathToAttributesMap::printBucketName(const Bucket& bucketTypes, BucketId bucketId) const
{
const BucketImpl* bucketImplPtr = buckets.find(bucketId);
if (!bucketImplPtr)
return;
const BucketImpl& bucketImpl = *bucketImplPtr;
std::cout << "Id: " << size_t(bucketId) << " ";
size_t bucketPrimCount = bucketImpl.elemToPath.size();
std::cout << "PrimCount: " << bucketPrimCount << " ";
// Find USD prim type
for (auto attrNameAndType : bucketTypes)
{
Type type(attrNameAndType.type);
if (type.role == AttributeRole::ePrimTypeName)
{
Token nameToken(attrNameAndType.name);
std::cout << "PrimType: " << nameToken.getText() << " ";
}
}
std::cout << "AttributeNames: ";
for (auto attrNameAndType : bucketTypes)
{
Token nameToken(attrNameAndType.name);
std::cout << nameToken.getText() << toString(attrNameAndType.suffix) << " ";
}
std::cout << "\n";
}
inline void PathToAttributesMap::printBucketNames() const
{
std::cout << "Buckets:\n";
for (auto& bucketIdAndBucket : attrNameSetToBucketId)
{
const Bucket& bucketTypes = bucketIdAndBucket.first;
BucketId bucketId = bucketIdAndBucket.second;
std::cout << " ";
printBucketName(bucketTypes, bucketId);
}
}
inline void PathToAttributesMap::printBucketNamesAndTypes() const
{
std::cout << "Buckets:\n";
for (auto& bucketIdAndBucket : attrNameSetToBucketId)
{
std::cout << " ";
const Bucket& bucketTypes = bucketIdAndBucket.first;
for (AttrNameAndType attrNameAndType : bucketTypes)
{
Type type(attrNameAndType.type);
Token nameToken(attrNameAndType.name);
std::cout << "(" << type << " " << nameToken.getText() << " " << attrNameAndType.suffix << " "
<< "TypeC(" << attrNameAndType.type << ") ";
}
std::cout << "\n";
}
}
inline void PathToAttributesMap::bucketImplCopyScalarAttributeArray(ScalarAttributeArray &dest, const ScalarAttributeArray &src)
{
CARB_ASSERT(dest.type == src.type);
if (src.gpuValid)
{
dest.resizeGpu(platform.gpuCuda, platform.gpuCudaCtx, src.size(), dest.typeinfo.size);
platform.gpuCuda->memcpyAsync(*platform.gpuCudaCtx, dest.gpuArray, src.gpuArray, src.gpuCapacity, omni::gpucompute::MemcpyKind::deviceToDevice);
dest.gpuValid = true;
dest.gpuCapacity = src.gpuCapacity;
dest.gpuAllocedWithCuda = src.gpuAllocedWithCuda;
}
}
inline void PathToAttributesMap::bucketImplCopyArrayAttributeArray(BucketImpl& destBucketImpl, const AttrName& destName, ArrayAttributeArray &dest, const ArrayAttributeArray &src)
{
CARB_ASSERT(dest.values.type == src.values.type);
const Typeinfo &typeInfo = dest.values.typeinfo;
const size_t arrayElemSize = typeInfo.arrayElemSize;
MirroredArray *const destSizeArray = &dest.elemCounts;
MirroredArray *const destCpuCapacityArray = &dest.cpuElemCounts;
const ArrayOfArrayInfo destAOA = getArrayOfArrayInfo(dest);
// TODO: Figure out how to remove this fixup step in a more "clean" way
// Need to set capacity to zero, because capacity will
// have been erroneously copied from source in
// MirroredArray copy constructor
ArrayAndDirtyIndices arrayAndchangedIndices = getArraySpanC(*destCpuCapacityArray, destName, destAOA, destBucketImpl, CpuWriteConfig());
setArrayDirty(arrayAndchangedIndices);
SpanC destCapacitySpan = arrayAndchangedIndices.array;
size_t* destCapacities = reinterpret_cast<size_t*>(destCapacitySpan.ptr);
for (size_t elemIndex = 0; elemIndex != destCapacitySpan.elementCount; elemIndex++)
{
destCapacities[elemIndex] = 0;
}
// getArrayWrC to allocate data for arrays
ArrayAndDirtyIndices destSpan = getArraySpanC(dest.values, destName, destAOA, destBucketImpl, CpuWriteConfig());
setArrayDirty(destSpan);
const size_t* elemCounts = reinterpret_cast<const size_t*>(destSizeArray->cpuData());
if (src.values.cpuValid)
{
// TODO: Isn't this redundant with the call to getArraySpanC above with cpu write access?
enableCpuWrite(dest.values, elemCounts, destCpuCapacityArray, nullptr, nullptr);
uint8_t** destPtrs = reinterpret_cast<uint8_t**>(destSpan.array.ptr);
uint8_t* const* srcPtrs = reinterpret_cast<uint8_t* const*>(src.values.cpuData());
for (size_t elemIndex = 0; elemIndex != destSpan.array.elementCount; elemIndex++)
{
uint8_t* destPtr = destPtrs[elemIndex];
const uint8_t* srcPtr = srcPtrs[elemIndex];
size_t sizeBytes = elemCounts[elemIndex] * arrayElemSize;
memcpy(destPtr, srcPtr, sizeBytes);
}
}
else
{
dest.values.cpuValid = false;
}
if (src.values.gpuValid)
{
MirroredArray *const destGpuElemCountArray = &dest.gpuElemCounts;
MirroredArray *const destGpuPtrArray = &dest.gpuPtrs;
enableGpuWrite(dest.values, elemCounts, destCpuCapacityArray, destGpuElemCountArray, destGpuPtrArray);
const MirroredArray *const srcGpuPtrArray = &src.gpuPtrs;
// Select which API to use
omni::gpucompute::GpuCompute* computeAPI = nullptr;
omni::gpucompute::Context* computeCtx = nullptr;
if (src.values.gpuAllocedWithCuda)
{
computeAPI = platform.gpuCuda;
computeCtx = platform.gpuCudaCtx;
}
if (computeAPI)
{
uint8_t** destPtrs = reinterpret_cast<uint8_t**>(destGpuPtrArray->cpuData());
uint8_t* const* srcPtrs = reinterpret_cast<uint8_t* const*>(srcGpuPtrArray->cpuData());
for (size_t elemIndex = 0; elemIndex != destSpan.array.elementCount; elemIndex++)
{
uint8_t* destPtr = destPtrs[elemIndex];
const uint8_t* srcPtr = srcPtrs[elemIndex];
size_t sizeBytes = elemCounts[elemIndex] * arrayElemSize;
computeAPI->memcpyAsync(*computeCtx, destPtr, srcPtr, sizeBytes, omni::gpucompute::MemcpyKind::deviceToDevice);
}
destGpuPtrArray->gpuAllocedWithCuda = src.values.gpuAllocedWithCuda;
}
}
}
inline void PathToAttributesMap::bucketImplCopyArrays(BucketImpl& destBucketImpl,
BucketId destBucketId,
const BucketImpl& srcBucketImpl,
BucketId srcBucketId,
const carb::flatcache::set<AttrNameAndType_v2>& attrFilter)
{
destBucketImpl.scalarAttributeArrays.forEach([this, &srcBucketImpl, &attrFilter](const AttrName& destName, ScalarAttributeArray& dest) {
AttrNameAndType_v2 destNameV2(
carb::flatcache::Type(dest.type), destName.name, destName.suffix);
const bool attrIsInFilter = attrFilter.size() == 0 || (attrFilter.find(destNameV2) != attrFilter.end());
if (attrIsInFilter && destName.suffix == NameSuffix::none)
{
const ScalarAttributeArray* src;
VALIDATE_TRUE(srcBucketImpl.scalarAttributeArrays.find(destName, &src));
CARB_ASSERT(src);
bucketImplCopyScalarAttributeArray(dest, *src);
}
});
destBucketImpl.arrayAttributeArrays.forEach([this, &destBucketImpl, &srcBucketImpl, &attrFilter](const AttrName& destName, ArrayAttributeArray& dest) {
AttrNameAndType_v2 destNameV2(
carb::flatcache::Type(dest.values.type), destName.name, destName.suffix);
const bool attrIsInFilter = attrFilter.size() == 0 || (attrFilter.find(destNameV2) != attrFilter.end());
if (attrIsInFilter && destName.suffix == NameSuffix::none)
{
const ArrayAttributeArray* src;
VALIDATE_TRUE(srcBucketImpl.arrayAttributeArrays.find(destName, &src));
CARB_ASSERT(src);
bucketImplCopyArrayAttributeArray(destBucketImpl, destName, dest, *src);
}
});
}
template<typename CallbackT>
void inline PathToAttributesMap::BucketImpl::forEachValueArray(CallbackT callback)
{
scalarAttributeArrays.forEach([&callback](const AttrName& name, ScalarAttributeArray& array) {
callback(name, array);
});
arrayAttributeArrays.forEach([&callback](const AttrName& name, ArrayAttributeArray& array) {
callback(name, array.values);
static_assert(sizeof(PathToAttributesMap::ArrayAttributeArray) == 5 * sizeof(PathToAttributesMap::MirroredArray), "ArrayAttributeArray has unexpected size");
// Intentionally skips these
// callback(name, array.elemCounts);
// callback(name, array.cpuElemCounts);
// callback(name, array.gpuElemCounts);
// callback(name, array.gpuPtrs);
});
}
inline void PathToAttributesMap::Serializer::init(uint8_t *const _buf, uint8_t *const _end)
{
p = buf = _buf;
end = _end;
bytesWritten = 0;
overflowed = false;
}
inline bool PathToAttributesMap::Serializer::writeBytes(const uint8_t *const src, uint64_t size)
{
CARB_ASSERT(src);
bytesWritten += size;
if (p != nullptr && p + size <= end)
{
memcpy(p, src, size);
p += size;
return true;
}
overflowed = true;
return false;
}
inline bool PathToAttributesMap::Serializer::writeString(const char* const s, const size_t len)
{
bool OK = true;
if (!write<size_t>(len))
{
OK = false;
}
if (!writeBytes(reinterpret_cast<const uint8_t*>(s), len))
{
OK = false;
}
return OK;
}
inline bool PathToAttributesMap::Serializer::writeString(const std::string &s)
{
bool OK = true;
if (!write<size_t>(s.length()))
{
OK = false;
}
if (!writeBytes(reinterpret_cast<const uint8_t*>(s.data()), s.length()))
{
OK = false;
}
return OK;
}
template<typename T>
bool PathToAttributesMap::Serializer::write(const T &t)
{
static_assert(std::is_pod<T>::value, "T must be POD");
return writeBytes(reinterpret_cast<const uint8_t*>(&t), sizeof(T));
}
inline void PathToAttributesMap::Deserializer::init(const uint8_t *const _buf, const uint8_t *const _end)
{
p = buf = _buf;
end = _end;
bytesRead = 0;
overflowed = false;
}
inline bool PathToAttributesMap::Deserializer::readBytes(uint8_t *const dst, uint64_t size)
{
CARB_ASSERT(dst);
bytesRead += size;
if (p + size <= end)
{
memcpy(dst, p, size);
p += size;
return true;
}
overflowed = true;
return false;
};
inline bool PathToAttributesMap::Deserializer::readString(std::string &s)
{
size_t len;
read<size_t>(len);
s.resize(len);
return readBytes(reinterpret_cast<uint8_t*>(&s[0]), len);
}
template<typename T>
bool PathToAttributesMap::Deserializer::read(T &t)
{
static_assert(std::is_pod<T>::value, "T must be POD");
return readBytes(reinterpret_cast<uint8_t*>(&t), sizeof(T));
}
inline void PathToAttributesMap::serializeMirroredArrayMetadata(const AttrName& srcName, MirroredArray &srcValuesArray, Serializer &out)
{
out.writeString(toTfToken(srcName.name).GetString());
out.write<NameSuffix>(srcName.suffix);
// TfToken are actually pointers, so we need to serialize the encoded TypeC
out.write<TypeC>(srcValuesArray.type);
out.write<bool>(srcValuesArray.cpuValid);
out.write<bool>(srcValuesArray.usdValid);
out.write<size_t>(srcValuesArray.count);
}
pxr::TfType typeCtoTfType(TypeC typeC);
template<typename ArraysT, typename ArraysMapT>
inline void PathToAttributesMap::deserializeMirroredArrayMetadata(Platform& platform, ArraysMapT& arraysMap, AttrName &destName, Typeinfo *&typeInfo, ArraysT *&destArray, Deserializer &in)
{
std::string nameStr;
in.readString(nameStr);
in.read<NameSuffix>(destName.suffix);
destName.name = asInt(pxr::TfToken(nameStr));
TypeC destType;
{
in.read<TypeC>(destType);
}
// typeToInfo is deserialized before all mirrored arrays, so the type must exist.
VALIDATE_TRUE(typeToInfo.find(destType, &typeInfo));
if (!arraysMap.allocateEntry(destName, &destArray))
{
CARB_LOG_ERROR("Failed to insert dest mirrored array");
return;
}
CARB_ASSERT(destArray);
new (destArray) ArraysT(platform, destType, *typeInfo);
MirroredArray *const destValuesArray = destArray->getValuesArray();
destValuesArray->type = destType;
in.read<bool>(destValuesArray->cpuValid);
in.read<bool>(destValuesArray->usdValid);
in.read<size_t>(destValuesArray->count);
destValuesArray->resize(destValuesArray->count * typeInfo->size);
}
inline uint64_t PathToAttributesMap::serializeScalarAttributeArray(BucketImpl& srcBucketImpl, const BucketId& srcBucketId, const AttrName& srcName, ScalarAttributeArray& srcScalarAttributeArray, Serializer &out)
{
const size_t bytesBegin = out.bytesWritten;
MirroredArray &srcValuesArray = srcScalarAttributeArray;
serializeMirroredArrayMetadata(srcName, srcValuesArray, out);
if (srcValuesArray.cpuValid)
{
const ConstSpanC srcSpan = getArraySpanC(srcValuesArray, srcName, ScalarArrayOfArrayInfo(), srcBucketImpl, CpuReadConfig()).array;
CARB_ASSERT(srcSpan.elementCount == srcValuesArray.count);
const uint8_t* srcPtr = reinterpret_cast<const uint8_t*>(srcSpan.ptr);
const Typeinfo &typeInfo = srcValuesArray.typeinfo;
CARB_ASSERT(!typeInfo.isArray);
out.writeBytes(srcPtr, srcSpan.elementCount * typeInfo.size);
}
return out.bytesWritten - bytesBegin;
}
inline bool PathToAttributesMap::deserializeScalarAttributeArray(BucketImpl& destBucketImpl, const BucketId& destBucketId, Deserializer &in)
{
AttrName destName;
Typeinfo *typeInfo;
ScalarAttributeArray *destArray;
deserializeMirroredArrayMetadata(destBucketImpl.platform, destBucketImpl.scalarAttributeArrays, destName, typeInfo, destArray, in);
CARB_ASSERT(typeInfo);
CARB_ASSERT(destArray);
CARB_ASSERT(!typeInfo->isArray);
if (destArray->cpuValid)
{
uint8_t* destPtr = reinterpret_cast<uint8_t*>(destArray->cpuData());
in.readBytes(destPtr, destArray->count * typeInfo->size);
}
return true;
}
inline uint64_t PathToAttributesMap::serializeArrayAttributeArray(BucketImpl& srcBucketImpl, const BucketId& srcBucketId, const AttrName& srcName, ArrayAttributeArray& srcArrayAttributeArray, Serializer &out)
{
const size_t bytesBegin = out.bytesWritten;
MirroredArray &srcValuesArray = srcArrayAttributeArray.values;
serializeMirroredArrayMetadata(srcName, srcValuesArray, out);
// write scalar metadata
auto writeScalarArrayOfArrayMetadata = [this](BucketImpl& srcBucketImpl, const BucketId& srcBucketId, const AttrName& srcName, ScalarAttributeArray& srcScalarAttributeArray, Serializer &out) {
// similar to serializeScalarAttributeArray, but we can skip some metadata because it should be inferrable
out.write<bool>(srcScalarAttributeArray.cpuValid);
out.write<bool>(srcScalarAttributeArray.usdValid);
out.write<size_t>(srcScalarAttributeArray.count);
if (srcScalarAttributeArray.cpuValid)
{
const ConstSpanC srcSpan = getArraySpanC(srcScalarAttributeArray, srcName, ScalarArrayOfArrayInfo(), srcBucketImpl, CpuReadConfig()).array;
CARB_ASSERT(srcSpan.elementCount == srcScalarAttributeArray.count);
const uint8_t* srcPtr = reinterpret_cast<const uint8_t*>(srcSpan.ptr);
const Typeinfo &typeInfo = srcScalarAttributeArray.typeinfo;
CARB_ASSERT(!typeInfo.isArray);
out.writeBytes(srcPtr, srcSpan.elementCount * typeInfo.size);
}
};
static_assert(sizeof(ArrayAttributeArray) == 5 * sizeof(MirroredArray), "ArrayAttributeArray has unexpected size");
writeScalarArrayOfArrayMetadata(srcBucketImpl, srcBucketId, srcName, srcArrayAttributeArray.elemCounts, out);
writeScalarArrayOfArrayMetadata(srcBucketImpl, srcBucketId, srcName, srcArrayAttributeArray.cpuElemCounts, out);
// TODO: Can we omit these?
writeScalarArrayOfArrayMetadata(srcBucketImpl, srcBucketId, srcName, srcArrayAttributeArray.gpuElemCounts, out);
writeScalarArrayOfArrayMetadata(srcBucketImpl, srcBucketId, srcName, srcArrayAttributeArray.gpuPtrs, out);
// write array-of-array values
if (srcValuesArray.cpuValid)
{
const ArrayOfArrayInfo destAOA = getArrayOfArrayInfo(srcArrayAttributeArray);
const ConstSpanC srcSpan = getArraySpanC(srcValuesArray, srcName, destAOA, srcBucketImpl, CpuReadConfig()).array;
CARB_ASSERT(srcSpan.elementCount == srcValuesArray.count);
// TODO: Should this be cpuElemCounts instead of elemCounts? The requested capacity may not have been applied yet..
const size_t* elemCounts = reinterpret_cast<const size_t*>(srcArrayAttributeArray.elemCounts.cpuData());
const Typeinfo &typeInfo = srcValuesArray.typeinfo;
CARB_ASSERT(typeInfo.isArray);
uint8_t* const* srcPtrs = reinterpret_cast<uint8_t* const*>(srcSpan.ptr);
for (size_t elemIndex = 0; elemIndex != srcSpan.elementCount; elemIndex++)
{
const uint8_t* srcPtr = srcPtrs[elemIndex];
const size_t elemCount = elemCounts[elemIndex];
out.writeBytes(srcPtr, elemCount * typeInfo.arrayElemSize);
}
}
return out.bytesWritten - bytesBegin;
}
inline bool PathToAttributesMap::deserializeArrayAttributeArray(BucketImpl& destBucketImpl, const BucketId& destBucketId, Deserializer &in)
{
AttrName destName;
Typeinfo *typeInfo;
ArrayAttributeArray *destArray;
deserializeMirroredArrayMetadata(destBucketImpl.platform, destBucketImpl.arrayAttributeArrays, destName, typeInfo, destArray, in);
CARB_ASSERT(typeInfo);
CARB_ASSERT(destArray);
CARB_ASSERT(typeInfo->isArray);
// write scalar metadata
auto readScalarArrayOfArrayMetadata = [this](BucketImpl& destBucketImpl, const BucketId& destBucketId, const AttrName& destName, ScalarAttributeArray& destScalarAttributeArray, Deserializer &in) {
// similar to deserializeScalarAttributeArray, but we can skip some metadata because it should be inferrable
in.read<bool>(destScalarAttributeArray.cpuValid);
in.read<bool>(destScalarAttributeArray.usdValid);
in.read<size_t>(destScalarAttributeArray.count);
const Typeinfo &typeInfo = destScalarAttributeArray.typeinfo;
destScalarAttributeArray.resize(typeInfo.size * destScalarAttributeArray.count);
if (destScalarAttributeArray.cpuValid)
{
CARB_ASSERT(destScalarAttributeArray.size() == (getTypeInfo(destScalarAttributeArray.type).size * destScalarAttributeArray.count));
uint8_t *const destPtr = reinterpret_cast<uint8_t*>(destScalarAttributeArray.cpuData());
CARB_ASSERT(!typeInfo.isArray);
in.readBytes(destPtr, destScalarAttributeArray.count * typeInfo.size);
}
};
static_assert(sizeof(ArrayAttributeArray) == 5 * sizeof(MirroredArray), "ArrayAttributeArray has unexpected size");
readScalarArrayOfArrayMetadata(destBucketImpl, destBucketId, destName, destArray->elemCounts, in);
readScalarArrayOfArrayMetadata(destBucketImpl, destBucketId, destName, destArray->cpuElemCounts, in);
// TODO: Can we omit these?
readScalarArrayOfArrayMetadata(destBucketImpl, destBucketId, destName, destArray->gpuElemCounts, in);
readScalarArrayOfArrayMetadata(destBucketImpl, destBucketId, destName, destArray->gpuPtrs, in);
// read array-of-array values
MirroredArray& destValuesArray = destArray->values;
if (destValuesArray.cpuValid)
{
// Need to set capacity to zero, because capacity will
// have been erroneously copied from source
const ArrayOfArrayInfo destAOA = getArrayOfArrayInfo(*destArray);
const SpanC destCapacitySpan = getArraySpanC(destArray->cpuElemCounts, destName, destAOA, destBucketImpl, CpuReadWriteConfig()).array;
size_t *const destCapacities = reinterpret_cast<size_t*>(destCapacitySpan.ptr);
for (size_t elemIndex = 0; elemIndex != destCapacitySpan.elementCount; elemIndex++)
{
destCapacities[elemIndex] = 0;
}
const size_t* elemCounts = reinterpret_cast<const size_t*>(destArray->elemCounts.cpuData());
// getArrayWrC to allocate data for arrays
const SpanC destSpan = getArraySpanC(destValuesArray, destName, destAOA, destBucketImpl, CpuReadWriteConfig()).array;
uint8_t** destPtrs = reinterpret_cast<uint8_t**>(destSpan.ptr);
for (size_t elemIndex = 0; elemIndex != destValuesArray.count; elemIndex++)
{
uint8_t* destPtr = destPtrs[elemIndex];
size_t elemCount = elemCounts[elemIndex];
in.readBytes(destPtr, elemCount * typeInfo->arrayElemSize);
}
destValuesArray.cpuValid = true;
}
return true;
}
inline BucketImpl& PathToAttributesMap::addAttributeInternal(BucketImpl& prevBucketImpl, const Bucket& prevBucket, const TokenC& attrName, const TypeC type, const void* value, const Typeinfo& typeinfo, const size_t arrayElemCount)
{
APILOGGER("addAttributeInternal", apiLogEnabled, attrName);
// newBucket := oldBucket Union { attrName }
// findOrCreate (bucketId, bucketImpl) for newBucket, which updates
// attrNameSetToBucketId and buckets
Bucket nextBucket = prevBucket;
nextBucket.insert({ carb::flatcache::Type(type), attrName, NameSuffix::none });
// Early out if attribute already in bucket
const bool attributeAlreadyInBucket = (nextBucket.size() == prevBucket.size());
if (attributeAlreadyInBucket)
return prevBucketImpl;
const std::pair<BucketId, BucketImpl&> nextBucketIdAndImpl = findOrCreateBucket(nextBucket);
const BucketId nextBucketId = nextBucketIdAndImpl.first;
BucketImpl& nextBucketImpl = nextBucketIdAndImpl.second;
const size_t nextBucketOriginalSize = nextBucketImpl.elemToPath.size();
if (nextBucketOriginalSize == 0)
{
// Move arrays etc. from original bucket
nextBucketImpl = std::move(prevBucketImpl);
nextBucketImpl.SetBucket(std::move(nextBucket));
// Below are codified assumptions about the side-effects of attempted move-assigning of a BucketImpl.
// We assume that move-assigning prevBucketImpl like above will clear it as well. This is important because
// prevBucketImpl may still reside as a valid bucket in the PathToAttributesMap::buckets map.
//
// These asserts live outside of the move-assignment operator definition because, technically, the compiler is
// allowed to elect to use a copy-assignment if it needs to.
//
// TODO: Would this be better expressed as an explicit "clear" of prevBucketImpl? Why wasn't that the original
// behavior?
CARB_ASSERT(prevBucketImpl.scalarAttributeArrays.empty());
CARB_ASSERT(prevBucketImpl.arrayAttributeArrays.empty());
CARB_ASSERT(prevBucketImpl.elemToPath.empty());
CARB_ASSERT(prevBucketImpl.listenerIdToChanges.empty());
}
else
{
// TODO : there should be a faster way to do this but more to discuss here later
auto prevBucketMapIter = attrNameSetToBucketId.find(prevBucket);
const BucketId prevBucketId = (prevBucketMapIter != attrNameSetToBucketId.end()) ? prevBucketMapIter->second : kInvalidBucketId;
for (const auto path : prevBucketImpl.elemToPath)
{
moveElementBetweenBuckets(asInt(path), nextBucketId, prevBucketId, nextBucket);
}
}
const size_t nextBucketNewSize = nextBucketImpl.elemToPath.size();
CARB_ASSERT(nextBucketNewSize >= nextBucketOriginalSize);
CARB_ASSERT(getTypeInfo(type).size == typeinfo.size);
CARB_ASSERT(getTypeInfo(type).isArray == typeinfo.isArray);
CARB_ASSERT(getTypeInfo(type).arrayElemSize == typeinfo.arrayElemSize);
// Add an array for the new attribute
const AttrName name{ attrName, NameSuffix::none };
ArrayAttributeArray *arrayAttributeArray;
MirroredArray* valuesArray;
if (typeinfo.isArray)
{
const bool inserted = nextBucketImpl.arrayAttributeArrays.allocateEntry(std::move(name), &arrayAttributeArray);
valuesArray = &arrayAttributeArray->values;
if (inserted)
{
new (arrayAttributeArray) ArrayAttributeArray(nextBucketImpl.platform, type, typeinfo);
while (valuesArray->count < nextBucketNewSize)
{
allocElement(*arrayAttributeArray);
}
}
}
else
{
const bool inserted = nextBucketImpl.scalarAttributeArrays.allocateEntry(std::move(name), &valuesArray);
arrayAttributeArray = nullptr;
if (inserted)
{
new (valuesArray) ScalarAttributeArray(nextBucketImpl.platform, type, typeinfo);
while (valuesArray->count < nextBucketNewSize)
{
allocElement(*valuesArray);
}
}
arrayAttributeArray = nullptr;
}
CARB_ASSERT(valuesArray);
CARB_ASSERT(!typeinfo.isArray || arrayAttributeArray);
CARB_ASSERT(!typeinfo.isArray || getTypeInfo(valuesArray->type).isArray);
#if CARB_ASSERT_ENABLED
const size_t elemCount = getElementCount(nextBucketImpl.GetBucket());
CARB_ASSERT(valuesArray->count == elemCount);
CARB_ASSERT(!arrayAttributeArray || arrayAttributeArray->cpuElemCounts.count == elemCount);
CARB_ASSERT(!arrayAttributeArray || arrayAttributeArray->elemCounts.count == elemCount);
CARB_ASSERT(!arrayAttributeArray || arrayAttributeArray->gpuElemCounts.count == elemCount);
CARB_ASSERT(!arrayAttributeArray || arrayAttributeArray->gpuPtrs.count == elemCount);
#endif // #if CARB_ASSERT_ENABLED
// fixup elem/path maps
for (pxr::SdfPath& path : nextBucketImpl.elemToPath)
{
std::pair<BucketId, ArrayIndex>* bucketAndElemIndex;
if (pathToBucketElem.find(asInt(path), &bucketAndElemIndex))
{
bucketAndElemIndex->first = nextBucketId;
}
}
// If default value specified, copy it to every element
if (value)
{
fillAttributeInternal(nextBucketImpl, name, nextBucketOriginalSize, nextBucketNewSize, value, typeinfo, arrayElemCount, valuesArray, arrayAttributeArray);
}
return nextBucketImpl;
}
inline void PathToAttributesMap::fillAttributeInternal(BucketImpl& bucketImpl, const AttrName& name, const size_t startIndex, const size_t endIndex, const void* value, const Typeinfo& typeinfo, const size_t arrayElemCount, MirroredArray *const valuesArray, ArrayAttributeArray *const arrayAttributeArray)
{
CARB_ASSERT(valuesArray);
CARB_ASSERT(value);
CARB_ASSERT(!typeinfo.isArray || arrayAttributeArray);
CARB_ASSERT(startIndex < valuesArray->count);
CARB_ASSERT(endIndex <= valuesArray->count);
if (typeinfo.isArray)
{
CARB_ASSERT(arrayAttributeArray);
const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray);
// Fill array sizes
{
ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(arrayAttributeArray->elemCounts, name, aoa, bucketImpl, CpuWriteConfig());
CARB_ASSERT(startIndex < arrayAndDirtyIndices.array.elementCount);
CARB_ASSERT(endIndex <= arrayAndDirtyIndices.array.elementCount);
for (size_t i = startIndex; i < endIndex; ++i)
{
reinterpret_cast<size_t*>(arrayAndDirtyIndices.array.ptr)[i] = arrayElemCount;
setArrayElementDirty(arrayAndDirtyIndices, i);
}
}
// Fill array values
{
ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(*valuesArray, name, aoa, bucketImpl, CpuWriteConfig());
CARB_ASSERT(startIndex < arrayAndDirtyIndices.array.elementCount);
CARB_ASSERT(endIndex <= arrayAndDirtyIndices.array.elementCount);
for (size_t i = startIndex; i < endIndex; ++i)
{
uint8_t** dest = reinterpret_cast<uint8_t**>(arrayAndDirtyIndices.array.ptr) + arrayAndDirtyIndices.array.elementSize * i;
CARB_ASSERT(*dest);
memcpy(*dest, value, arrayElemCount * typeinfo.arrayElemSize); // assumes coherent and packed array value provided
setArrayElementDirty(arrayAndDirtyIndices, i);
}
}
}
else
{
const ArrayOfArrayInfo aoa = ScalarArrayOfArrayInfo();
ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(*valuesArray, name, aoa, bucketImpl, CpuWriteConfig());
CARB_ASSERT(startIndex < arrayAndDirtyIndices.array.elementCount);
CARB_ASSERT(endIndex <= arrayAndDirtyIndices.array.elementCount);
for (size_t i = startIndex; i < endIndex; ++i)
{
uint8_t* dest = arrayAndDirtyIndices.array.ptr + arrayAndDirtyIndices.array.elementSize * i;
memcpy(dest, value, typeinfo.size);
setArrayElementDirty(arrayAndDirtyIndices, i);
}
}
}
inline void PathToAttributesMap::addAttributeInternal(const PathC& path, const TokenC& attrNameC, const NameSuffix nameSuffix, const TypeC ctype, const void* value, const Typeinfo& typeinfo, const size_t arrayElemCount)
{
APILOGGER("addAttributeInternal", apiLogEnabled, path, attrNameC);
BucketId bucketId;
ArrayIndex elemIndex;
std::tie(bucketId, elemIndex) = addAttributeGetBucketAndArrayIndex(path, attrNameC, nameSuffix, ctype);
BucketImpl *const bucketImpl = buckets.find(bucketId);
CARB_ASSERT(bucketImpl);
ArrayAttributeArray *arrayAttributeArray;
MirroredArray* valuesArray;
const AttrName attrName{ attrNameC, nameSuffix };
if (typeinfo.isArray)
{
bucketImpl->arrayAttributeArrays.find(attrName, &arrayAttributeArray);
CARB_ASSERT(arrayAttributeArray);
valuesArray = &arrayAttributeArray->values;
}
else
{
bucketImpl->scalarAttributeArrays.find(attrName, &valuesArray);
arrayAttributeArray = nullptr;
}
CARB_ASSERT(valuesArray);
CARB_ASSERT(!typeinfo.isArray || arrayAttributeArray);
if (value)
{
fillAttributeInternal(*bucketImpl, attrName, elemIndex, elemIndex + 1, value, typeinfo, arrayElemCount, valuesArray, arrayAttributeArray);
}
#if CARB_ASSERT_ENABLED
const size_t elemCount = bucketImpl->elemToPath.size();
CARB_ASSERT(valuesArray->count == elemCount);
CARB_ASSERT(!arrayAttributeArray || arrayAttributeArray->cpuElemCounts.count == elemCount);
CARB_ASSERT(!arrayAttributeArray || arrayAttributeArray->elemCounts.count == elemCount);
CARB_ASSERT(!arrayAttributeArray || arrayAttributeArray->gpuElemCounts.count == elemCount);
CARB_ASSERT(!arrayAttributeArray || arrayAttributeArray->gpuPtrs.count == elemCount);
#endif // #if CARB_ASSERT_ENABLED
}
inline PathToAttributesMap::PathToAttributesMap(const PlatformId& platformId)
: platform(carb::getCachedInterface<carb::flatcache::IPlatform>()->getMutable(platformId))
, pathToBucketElem(0, std::hash<PathId>(), std::equal_to<PathId>(), AllocFunctor{ &platform.allocator }, FreeFunctor{ &platform.allocator })
, buckets(platform)
, attrNameSetToBucketId()
, listenerIdToChangeTrackerConfig(0, ListenerIdHasher(), std::equal_to<ListenerId>(), AllocFunctor{ &platform.allocator }, FreeFunctor{ &platform.allocator })
, typeToInfo(0, std::hash<TypeC>(), std::equal_to<TypeC>(), AllocFunctor{ &platform.allocator }, FreeFunctor{ &platform.allocator })
, usdStageId()
, minimalPopulationDone(false)
{
// required types for arrays of arrays
Typeinfo* typeinfo;
typeToInfo.allocateEntry(PTAM_SIZE_TYPEC, &typeinfo);
*typeinfo = Typeinfo{ sizeof(PTAM_SIZE_TYPE), false, 0 };
typeToInfo.allocateEntry(PTAM_POINTER_TYPEC, &typeinfo);
*typeinfo = Typeinfo{ sizeof(PTAM_POINTER_TYPE), false, 0 };
}
inline PathToAttributesMap& PathToAttributesMap::operator=(const flatcache::PathToAttributesMap& other)
{
carb::profiler::ZoneId zoneId = CARB_PROFILE_BEGIN(1, "Clear buckets");
buckets.clear();
CARB_PROFILE_END(1, zoneId);
zoneId = CARB_PROFILE_BEGIN(1, "Copy pathToBucketElem");
pathToBucketElem.clear();
pathToBucketElem.reserve(other.pathToBucketElem.size());
other.pathToBucketElem.forEach([this](const PathId& key, const std::pair<BucketId, ArrayIndex> &otherValue) {
std::pair<BucketId, ArrayIndex>* value;
VALIDATE_TRUE(pathToBucketElem.allocateEntry(key, &value));
static_assert(std::is_copy_constructible<std::pair<BucketId, ArrayIndex>>::value, "Expected pathToBucketElem values to be copy-constructible");
new (value) std::pair<BucketId, ArrayIndex>(otherValue);
});
CARB_PROFILE_END(1, zoneId);
zoneId = CARB_PROFILE_BEGIN(1, "Copy scalar attributes");
buckets = other.buckets;
CARB_PROFILE_END(1, zoneId);
attrNameSetToBucketId = other.attrNameSetToBucketId;
typeToInfo = other.typeToInfo;
usdStageId = other.usdStageId;
minimalPopulationDone = other.minimalPopulationDone;
stageHierarchy = other.stageHierarchy;
zoneId = CARB_PROFILE_BEGIN(1, "Copy array attributes");
{
BucketId id{ 0 };
for (size_t i = 0; i < buckets.end(); ++i, ++id)
{
auto bucketPtr = buckets.find(id);
if (bucketPtr)
{
const BucketImpl& srcBucketImpl = *(other.buckets.find(id));
BucketImpl& destBucketImpl = *bucketPtr;
// Copy any array-valued attributes
bucketImplCopyArrays(destBucketImpl, id, srcBucketImpl, id);
}
}
}
CARB_PROFILE_END(1, zoneId);
return *this;
}
inline PathToAttributesMap::~PathToAttributesMap()
{
}
}
}
#include <carb/flatcache/GetArrayGPU.h>
// Enable the warnings we disabled when we included USD headers
#if defined(__GNUC__)
# pragma GCC diagnostic pop
# ifdef OMNI_USD_SUPPRESS_DEPRECATION_WARNINGS
# define __DEPRECATED
# undef OMNI_USD_SUPPRESS_DEPRECATION_WARNINGS
# endif
#endif
| 267,444 | C | 38.574578 | 369 | 0.645073 |
omniverse-code/kit/fabric/include/carb/flatcache/HashMap.h | // Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <cstdint>
#include <cstdlib>
#include <carb/Defines.h>
#include <carb/flatcache/Defines.h>
#include <carb/flatcache/Intrinsics.h>
namespace carb {
namespace flatcache {
struct HashMapDefaultAlloc
{
inline void* operator()(const size_t bytes)
{
return std::malloc(bytes);
}
};
struct HashMapDefaultFree
{
inline void operator()(void *const ptr)
{
std::free(ptr);
}
};
// A hashmap implemented with the following decisions:
//
// * Memory is allocated in a single contiguous buffer so that find operations make linear cache line fetches as much
// as possible. This intends to make more easily predictable memory access patterns, and thus, easier hardware-level
// prefetch decisions. Similarly, whole-map iteration benefits from the same cache-friendly access patterns.
//
// * Find operations intentionally are coded without range checks on the main loop. This is a tradeoff of speed for
// less error-detection in release builds. To help mitigate this, debug builds do track probe counts to validate
// we don't exceed the possible length of the hashmap.
//
// * No opinion codified on thread synchronization. It can be used safely if done carefully, but this is not a
// guarantee of the implementation.
//
// * No iterators provided. If some batch operation must occur, use the forEach() function provided, which should
// suffice. The forEach() method should provide similar performance without the added risk of callers being able to
// arbitrarily cache iterators outside the control of the HashMap, its routines, or even its lifetime.
//
// * Deletes copy-constructor and copy-assignment for non-standard-layout mappings. This forces callers to implement
// these routines explicitly, favoring the clarity of reading intent in explicit implementation over ambiguity over
// compiler selection. Mappings that have standard-layout default to use a memcpy to copy data as fast as possible.
//
// * Implements allocateEntry() method, rather than insert()/emplace() methods mimicing std::unordered_map API. This
// does the minimum steps necessary to reserve address space for a key-value mapping, and provides the caller with
// the reserved buffer memory address for them to do their own construction, assignment, or initialization routines.
// This favors slightly more explicit coding patterns at the caller to force clarity of intent. In particular,
// it make more obvious the choice of the caller between construction vs assignment, and copy vs move semantics. It
// also offers greater flexibility without sacrificing performance.
//
// * ~HashMap() and clear() operate different depending on if KeyT and ValueT are known to be
// std::is_trivially_destructible. If they are, the fastest option is chosen: to deallocate the memory without
// iteration or explicitly destruction per-entry. Otherwise, the implementation iterates to non-trivially destruct
// each object in-place.
template<typename KeyT,
typename ValueT,
typename HashT = std::hash<KeyT>,
typename KeyEqualsT = std::equal_to<KeyT>,
typename AllocT = HashMapDefaultAlloc,
typename FreeT = HashMapDefaultFree>
struct HashMap
{
// I didn't experiment with this exhaustively, could be tuned better, probably
static constexpr size_t LOAD_FACTOR_NUMERATOR = 3ull;
static constexpr size_t LOAD_FACTOR_DENOMENATOR = 4ull;
static constexpr size_t MIN_INIT_CAPACITY = 4ull;
static_assert((MIN_INIT_CAPACITY & (MIN_INIT_CAPACITY - 1ull)) == 0, "MIN_INIT_CAPACITY must be a power of two!");
static constexpr bool KEY_IS_TRIVIALLY_DESTRUCTIBLE = std::is_trivially_destructible<KeyT>::value;
static constexpr bool VALUE_IS_TRIVIALLY_DESTRUCTIBLE = std::is_trivially_destructible<ValueT>::value;
enum EntryState : uint8_t
{
HASH_MAP_ENTRY_STATE_FREE,
HASH_MAP_ENTRY_STATE_OCCUPIED,
HASH_MAP_ENTRY_STATE_DELETED,
};
struct EntryT
{
EntryState state;
KeyT key;
ValueT value;
};
static constexpr size_t allocationSize( const size_t capacity );
static constexpr size_t loadThreshold( const size_t capacity );
static constexpr size_t inverseLoadThreshold( const size_t capacity );
static constexpr size_t capacityAdjustedForLoadThreshold( const size_t capacity );
HashMap( const size_t capacity = 0,
const HashT &hasher = HashT(),
const KeyEqualsT &keyEquals = KeyEqualsT(),
const AllocT &alloc_ = AllocT(),
const FreeT &free_ = FreeT() );
~HashMap();
HashMap(const HashMap& other);
HashMap& operator=(const HashMap& other);
HashMap(HashMap&& other) noexcept;
HashMap& operator=(HashMap&& other) noexcept;
inline friend void swap(HashMap& a, HashMap& b) noexcept
{
using std::swap;
swap(a.m_hasher, b.m_hasher);
swap(a.m_keyEquals, b.m_keyEquals);
swap(a.m_alloc, b.m_alloc);
swap(a.m_free, b.m_free);
swap(a.m_size, b.m_size);
swap(a.m_capacity, b.m_capacity);
swap(a.m_loadThreshold, b.m_loadThreshold);
swap(a.m_mask, b.m_mask);
swap(a.m_entries, b.m_entries);
}
void clear();
const void* data() const;
bool empty() const;
size_t size() const;
size_t capacty() const;
void reserve(const size_t capacity);
bool find( const KeyT& key, ValueT** outValue );
bool find( const KeyT& key, const ValueT** outValue ) const;
bool exists( const KeyT& key ) const;
bool allocateEntry( KeyT&& key, ValueT** outValue );
bool allocateEntry( const KeyT& key, ValueT** outValue );
// Intended to be safe to call during forEach() as it does not invalidate iteration.
bool freeEntry( const KeyT& key );
void freeEntryByKeyAddress( const KeyT *const key );
void freeEntryByValueAddress( const ValueT *const value );
template<typename CallbackT>
inline void forEach( CallbackT callback );
template<typename CallbackT>
inline void forEach( CallbackT callback ) const;
size_t totalCollisionLength() const;
private:
size_t hashInternal( const KeyT& key ) const;
void resizeIfNecessary();
void resize( const size_t nextCapacity );
void freeEntryInternal( EntryT *const entry );
bool findFirstAvailable( const KeyT& key, EntryT** outEntry );
bool findExisting( const KeyT& key, EntryT** outEntry );
bool findExisting( const KeyT& key, const EntryT** outEntry ) const;
HashT m_hasher;
KeyEqualsT m_keyEquals;
AllocT m_alloc;
FreeT m_free;
size_t m_size;
size_t m_capacity;
size_t m_loadThreshold;
size_t m_mask;
EntryT* m_entries;
};
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline constexpr size_t HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::allocationSize( const size_t capacity )
{
return capacity * sizeof( EntryT );
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline constexpr size_t HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::loadThreshold( const size_t capacity )
{
return (capacity * LOAD_FACTOR_NUMERATOR / LOAD_FACTOR_DENOMENATOR);
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline constexpr size_t HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::inverseLoadThreshold( const size_t capacity )
{
return (capacity * LOAD_FACTOR_DENOMENATOR / LOAD_FACTOR_NUMERATOR);
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline constexpr size_t HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::capacityAdjustedForLoadThreshold( const size_t capacity )
{
// reserves capacity to the nearest power of two that satisfies the load threshhold for the requested capacity
size_t adjustedCapacity;
if (capacity && capacity >= loadThreshold(MIN_INIT_CAPACITY))
{
// +1 because we want capacity < loadThreshold(adjustedCapacity), not capacity <= loadThreshold(adjustedCapacity)
adjustedCapacity = 1ull << ( 64u - clz64( inverseLoadThreshold( capacity + 1 ) - 1ull ) );
}
else
{
adjustedCapacity = MIN_INIT_CAPACITY;
}
CARB_ASSERT(capacity < loadThreshold(adjustedCapacity));
CARB_ASSERT((adjustedCapacity & (adjustedCapacity - 1ull)) == 0);
return adjustedCapacity;
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::HashMap(const size_t capacity,
const HashT &hasher,
const KeyEqualsT &keyEquals,
const AllocT &alloc_,
const FreeT &free_)
{
m_hasher = hasher;
m_keyEquals = keyEquals;
m_alloc = alloc_;
m_free = free_;
m_size = 0;
if (capacity)
{
const size_t adjustedCapacity = capacityAdjustedForLoadThreshold(capacity);
const size_t bufSize = allocationSize(adjustedCapacity);
m_capacity = adjustedCapacity;
m_loadThreshold = loadThreshold(adjustedCapacity);
m_mask = adjustedCapacity - 1ull;
m_entries = (EntryT*)m_alloc(bufSize);
memset(m_entries, 0, bufSize);
}
else
{
m_capacity = 0;
m_loadThreshold = 0;
m_mask = 0;
m_entries = nullptr;
}
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::~HashMap()
{
if ( m_entries )
{
if (!KEY_IS_TRIVIALLY_DESTRUCTIBLE || !VALUE_IS_TRIVIALLY_DESTRUCTIBLE)
{
size_t index = 0;
size_t visited = 0;
for ( ; index < m_capacity && visited < m_size; ++index)
{
EntryT *const entry = &m_entries[index];
if ( entry->state == HASH_MAP_ENTRY_STATE_OCCUPIED )
{
if (!KEY_IS_TRIVIALLY_DESTRUCTIBLE)
{
entry->key.~KeyT();
}
if (!VALUE_IS_TRIVIALLY_DESTRUCTIBLE)
{
entry->value.~ValueT();
}
CARB_ASSERT(visited < m_size);
++visited;
}
}
}
m_free(m_entries);
m_entries = nullptr;
}
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::HashMap(const HashMap& other)
: m_hasher(other.m_hasher)
, m_keyEquals(other.m_keyEquals)
, m_alloc(other.m_alloc)
, m_free(other.m_free)
, m_size(other.m_size)
, m_capacity(other.m_capacity)
, m_loadThreshold(other.m_loadThreshold)
, m_mask(other.m_mask)
{
static_assert(std::is_trivially_copyable<EntryT>::value, "Copying of HashMap is only supported for key-value mappings that are use standard-layout classes.");
const size_t bufSize = allocationSize(m_capacity);
m_entries = (EntryT*)m_alloc(bufSize);
memcpy(m_entries, other.m_entries, bufSize);
CARB_ASSERT(m_entries);
CARB_ASSERT(m_capacity);
CARB_ASSERT((m_capacity & (m_capacity - 1ull)) == 0); // assert m_capacity is power of two
CARB_ASSERT(m_size < m_capacity);
CARB_ASSERT(m_size < m_loadThreshold);
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>& HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::operator=(const HashMap& other)
{
HashMap tmp(other);
swap(*this, tmp);
return *this;
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::HashMap(HashMap&& other) noexcept
: m_hasher(std::move(other.m_hasher))
, m_keyEquals(std::move(other.m_keyEquals))
, m_alloc(std::move(other.m_alloc))
, m_free(std::move(other.m_free))
, m_size(std::move(other.m_size))
, m_capacity(std::move(other.m_capacity))
, m_loadThreshold(std::move(other.m_loadThreshold))
, m_mask(std::move(other.m_mask))
, m_entries(std::move(other.m_entries))
{
other.m_entries = nullptr;
other.clear();
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>& HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::operator=(HashMap&& other) noexcept
{
HashMap tmp(std::move(other));
swap(*this, tmp);
return *this;
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline void HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::clear()
{
if ( m_entries )
{
if (!KEY_IS_TRIVIALLY_DESTRUCTIBLE || !VALUE_IS_TRIVIALLY_DESTRUCTIBLE)
{
size_t index = 0;
size_t visited = 0;
for ( ; index < m_capacity && visited < m_size; ++index)
{
EntryT *const entry = &m_entries[index];
if ( entry->state == HASH_MAP_ENTRY_STATE_OCCUPIED )
{
if (!KEY_IS_TRIVIALLY_DESTRUCTIBLE)
{
entry->key.~KeyT();
}
if (!VALUE_IS_TRIVIALLY_DESTRUCTIBLE)
{
entry->value.~ValueT();
}
CARB_ASSERT(visited < m_size);
++visited;
}
entry->state = HASH_MAP_ENTRY_STATE_FREE;
}
}
else
{
static_assert(HASH_MAP_ENTRY_STATE_FREE == 0, "memset(0) requires HASH_MAP_ENTRY_STATE_FREE == 0");
memset(m_entries, 0, allocationSize(m_capacity));
}
}
m_size = 0;
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline const void* HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::data() const
{
return m_entries;
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline bool HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::empty() const
{
return m_size == 0;
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline size_t HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::size() const
{
return m_size;
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline size_t HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::capacty() const
{
return m_capacity;
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline void HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::reserve(const size_t capacity)
{
const size_t adjustedCapacity = capacityAdjustedForLoadThreshold(capacity);
if (m_capacity < adjustedCapacity)
{
resize(adjustedCapacity);
}
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline bool HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::find( const KeyT& key, ValueT** outValue )
{
EntryT* existing;
if (findExisting( key, &existing) )
{
*outValue = &existing->value;
return true;
}
return false;
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline bool HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::find( const KeyT& key, const ValueT** outValue ) const
{
const EntryT* existing;
if (findExisting( key, &existing) )
{
*outValue = &existing->value;
return true;
}
return false;
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline bool HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::exists( const KeyT& key ) const
{
const EntryT* existing;
if (findExisting( key, &existing) )
{
return true;
}
return false;
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline bool HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::allocateEntry( KeyT&& key, ValueT** outValue )
{
EntryT* availableEntry;
resizeIfNecessary();
const bool available = findFirstAvailable(key, &availableEntry);
CARB_ASSERT(availableEntry);
if (available)
{
new (&availableEntry->key) KeyT(std::move(key));
CARB_ASSERT(availableEntry->state != HASH_MAP_ENTRY_STATE_OCCUPIED);
availableEntry->state = HASH_MAP_ENTRY_STATE_OCCUPIED;
*outValue = &availableEntry->value;
CARB_ASSERT(m_size < m_capacity);
CARB_ASSERT(m_size + 1 > m_size);
++m_size;
return true;
}
*outValue = &availableEntry->value;
return false;
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline bool HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::allocateEntry( const KeyT& key, ValueT** outValue )
{
EntryT* availableEntry;
resizeIfNecessary();
const bool available = findFirstAvailable(key, &availableEntry);
CARB_ASSERT(availableEntry);
if (available)
{
new (&availableEntry->key) KeyT(key);
CARB_ASSERT(availableEntry->state != HASH_MAP_ENTRY_STATE_OCCUPIED);
availableEntry->state = HASH_MAP_ENTRY_STATE_OCCUPIED;
*outValue = &availableEntry->value;
CARB_ASSERT(m_size < m_capacity);
CARB_ASSERT(m_size + 1 > m_size);
++m_size;
return true;
}
*outValue = &availableEntry->value;
return false;
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline bool HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::freeEntry( const KeyT& key )
{
EntryT* existing;
if (findExisting(key, &existing))
{
freeEntryInternal(existing);
return true;
}
else
{
return false;
}
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline void HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::freeEntryByKeyAddress( const KeyT *const key )
{
static_assert(!std::is_polymorphic<EntryT>::value, "Unable to freeEntry by key address!");
constexpr size_t OFFSET = offsetof(EntryT, key);
EntryT *const entry = (EntryT*)(((uintptr_t)key) - OFFSET);
freeEntryInternal(entry);
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline void HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::freeEntryByValueAddress(const ValueT *const value)
{
static_assert(!std::is_polymorphic<EntryT>::value, "Unable to freeEntry by value address!");
constexpr size_t OFFSET = offsetof(EntryT, value);
EntryT *const entry = (EntryT*)(((uintptr_t)value) - OFFSET);
freeEntryInternal(entry);
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline size_t HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::hashInternal( const KeyT& key ) const
{
size_t hash = m_hasher(key);
#define HASHMAP_DEFENSIVE_SALT IN_USE
#if USING( HASHMAP_DEFENSIVE_SALT )
// Apply a defensive salt to the user-calculated hash value. It is unsafe to assume user-provided hashes are good.
//
// Kit historically had a problem where std::hash<PathC> caused terrible distributions inside of space-restricted
// hashmaps. This was primarly because the hash values returned had zero entropy in the lower 8 bits. The higher
// bits had excellent entropy, though. It is trivial to improve std::hash<PathC> by doing (oldHashValue >> 8).
// In other words, tossing the bits with zero entropy. This will produce perfectly unique hash value output for
// every PathC input. However, using this directly in a hash map is still not ideal because, while the hash function
// has a guarantee on uniqueness, it does not necessarily lend to good distributions in a hash table. Two hash
// values that are multiples of each other will naturally colliide in any space-restricted hashmap.
// (Which, realistically, is all real hash maps since hardware memory is not infinite.) Applying a little salt on
// top of the hash value fixes this distribution problem.
//
// This also provides general safety against poorly implemented user-provided hash functions that don't generate
// unique or well distributed values.
//
// Known problematic data sets:
// - PathC (interned SdfPaths)
// - TokenC (interned TfTokens)
//
// Salt techniques tried:
// - H3_XX64 (xxhash):
// - good distribution
// - too slow
// - H3_XX64 (xxhash) with custom seeds:
// - no seed performed better than the xxhash default secret
// - Custom H3_XX64 implementation specialized for aligned 64-bit keys:
// - methematically identical distribution to H3_XX64
// - 2x faster performance than official implementation
// - Multiply by a prime
// - best distribution so far
// - best speed so far (3x faster than custom H3_XX64)
//
// TODO: A fun intern experiment would be to investigate our various omniverse hash functions for distribution and
// speed. And also investigate alternative defensive salting techniques.
return hash * 48271ull;
#else // #if USING( HASHMAP_DEFENSIVE_SALT )
return hash;
#endif // #if USING( HASHMAP_DEFENSIVE_SALT )
#undef HASHMAP_DEFENSIVE_SALT
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline void HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::resizeIfNecessary()
{
if (m_size >= m_loadThreshold)
{
CARB_ASSERT(!m_capacity || m_capacity * 2 > m_capacity);
resize(m_capacity ? m_capacity * 2 : MIN_INIT_CAPACITY);
}
else if (!m_entries)
{
const size_t bufSize = allocationSize(m_capacity);
m_entries = (EntryT*)m_alloc(bufSize);
memset(m_entries, 0, bufSize);
}
CARB_ASSERT(m_entries);
CARB_ASSERT(m_capacity);
CARB_ASSERT((m_capacity & (m_capacity - 1)) == 0);
CARB_ASSERT(m_size < m_capacity);
CARB_ASSERT(m_size < m_loadThreshold);
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline void HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::resize(const size_t nextCapacity)
{
CARB_ASSERT(m_size < loadThreshold(nextCapacity));
CARB_ASSERT((nextCapacity & (nextCapacity - 1)) == 0);
HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT> tmp(nextCapacity, m_hasher, m_keyEquals, m_alloc, m_free );
size_t index = 0;
size_t visited = 0;
for ( ; index < m_capacity && visited < m_size; ++index)
{
EntryT *const entry = &m_entries[index];
if ( entry->state == HASH_MAP_ENTRY_STATE_OCCUPIED )
{
ValueT *tmpV;
tmp.allocateEntry(std::move(entry->key), &tmpV);
new (tmpV) ValueT(std::move(entry->value));
CARB_ASSERT(visited < m_size);
++visited;
}
}
CARB_ASSERT(m_size == tmp.m_size);
using std::swap;
swap(m_entries, tmp.m_entries);
swap(m_size, tmp.m_size);
swap(m_capacity, tmp.m_capacity);
swap(m_loadThreshold, tmp.m_loadThreshold);
swap(m_mask, tmp.m_mask);
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline void HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::freeEntryInternal( EntryT *const entry )
{
CARB_ASSERT(entry);
CARB_ASSERT(entry->state == HASH_MAP_ENTRY_STATE_OCCUPIED);
if (!KEY_IS_TRIVIALLY_DESTRUCTIBLE)
{
entry->key.~KeyT();
}
if (!VALUE_IS_TRIVIALLY_DESTRUCTIBLE)
{
entry->value.~ValueT();
}
entry->state = HASH_MAP_ENTRY_STATE_DELETED;
CARB_ASSERT(m_size);
CARB_ASSERT(m_size - 1 < m_size);
--m_size;
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
template<typename CallbackT>
inline void HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::forEach( CallbackT callback )
{
size_t index = 0;
size_t visited = 0;
const size_t size_captured = m_size;
for ( ; index < m_capacity && visited < size_captured; ++index)
{
if (m_entries[index].state == HASH_MAP_ENTRY_STATE_OCCUPIED)
{
callback(m_entries[index].key, m_entries[index].value);
CARB_ASSERT(visited < size_captured);
++visited;
}
}
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
template<typename CallbackT>
inline void HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::forEach(CallbackT callback) const
{
size_t index = 0;
size_t visited = 0;
const size_t size_captured = m_size;
for (; index < m_capacity && visited < size_captured; ++index)
{
if (m_entries[index].state == HASH_MAP_ENTRY_STATE_OCCUPIED)
{
callback(m_entries[index].key, m_entries[index].value);
CARB_ASSERT(visited < size_captured);
++visited;
}
}
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline size_t HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::totalCollisionLength() const
{
size_t len = 0;
if ( m_entries )
{
size_t index = 0;
size_t visited = 0;
const size_t size_captured = m_size;
for (; index < m_capacity && visited < size_captured; ++index)
{
const EntryT *const probe = &m_entries[index];
if (probe->state == HASH_MAP_ENTRY_STATE_OCCUPIED)
{
const EntryT *const natural = &m_entries[hashInternal(probe->key) & m_mask];
len += (size_t)((natural <= probe) ? (probe - natural) : ( ( probe + m_capacity ) - natural) );
CARB_ASSERT(visited < size_captured);
++visited;
}
}
}
return len;
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline bool HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::findFirstAvailable( const KeyT& key, EntryT** outEntry )
{
EntryT* probe;
size_t probeIdx;
// This will technically resize opportunistically if the key might already exist, but at least
// that edge case will only occur once per resize, and being opportunistic avoids searching first.
resizeIfNecessary();
#if USING( ASSERTS )
size_t probes = 0;
#endif // #if USING( ASSERTS )
probeIdx = hashInternal(key) & m_mask;
CARB_ASSERT(m_size < m_capacity); // otherwise we infinite loop
while(1)
{
CARB_ASSERT( probeIdx < m_capacity );
probe = &m_entries[probeIdx];
if ( probe->state == HASH_MAP_ENTRY_STATE_FREE )
{
*outEntry = probe;
return true;
}
else if ( probe->state == HASH_MAP_ENTRY_STATE_OCCUPIED )
{
if ( m_keyEquals(probe->key, key) )
{
*outEntry = probe;
return false;
}
}
else if ( probe->state == HASH_MAP_ENTRY_STATE_DELETED )
{
*outEntry = probe;
return true;
}
probeIdx = ( probeIdx + 1 ) & m_mask;
#if USING( ASSERTS )
++probes;
CARB_ASSERT(probes < m_capacity);
#endif // #if USING( ASSERTS )
}
CARB_ASSERT(false && "unreachable code");
return false;
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline bool HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::findExisting( const KeyT& key, EntryT** outEntry )
{
if (!m_size)
{
return false;
}
EntryT* probe;
size_t probeIdx;
#if USING( ASSERTS )
size_t probes = 0;
#endif // #if USING( ASSERTS )
probeIdx = hashInternal(key) & m_mask;
CARB_ASSERT(m_size < m_capacity); // otherwise we infinite loop
while(1)
{
CARB_ASSERT( probeIdx < m_capacity );
probe = &m_entries[probeIdx];
if ( probe->state == HASH_MAP_ENTRY_STATE_FREE )
{
return false;
}
else if ( probe->state == HASH_MAP_ENTRY_STATE_OCCUPIED )
{
if ( m_keyEquals(probe->key, key) )
{
*outEntry = probe;
return true;
}
}
else
{
// skip
CARB_ASSERT( probe->state == HASH_MAP_ENTRY_STATE_DELETED );
}
probeIdx = ( probeIdx + 1 ) & m_mask;
#if USING( ASSERTS )
++probes;
CARB_ASSERT(probes < m_capacity);
#endif // #if USING( ASSERTS )
}
CARB_ASSERT(false && "unreachable code");
return false;
}
template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT>
inline bool HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::findExisting( const KeyT& key, const EntryT** outEntry ) const
{
return const_cast<HashMap*>(this)->findExisting( key, const_cast< EntryT** >(outEntry) );
}
} // namespace flatcache
} // namespace carb
| 31,028 | C | 35.634002 | 162 | 0.655182 |
omniverse-code/kit/fabric/include/carb/flatcache/Enums.h | // Copyright (c) 2021-2021 NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
namespace carb {
namespace flatcache {
/**
* @enum PtrToPtrKind
*
* @details When getting an array-valued attribute for GPU access, you can
* optionally use this enum to ask for a GPU pointer to the GPU data
* pointer (eGpuPtrToGpuPtr), or a CPU pointer to the GPU data
* pointer (eCpuPtrToGpuPtr).
* The advantage of using eCpuPtrToGpuPtr is that you can dereference
* the returned pointer on the CPU, and pass the GPU data pointer as
* a CUDA kernel parameter.
* The advantage of using eGpuPtrToGpuPtr is that it makes it easier
* to extend kernels to operate on arrays of arrays later. Also it
* allows us to support allocation and resizing of array-valued
* attributes on the GPU in the future.
*
* PtrToPtrKind is not a parameter of methods returning arrays of
* arrays, for example getArrayGPU(). This is because there is no way
* to pass a variable length array of GPU pointers to a kernel using
* its CPU launch parameters. So GPU arrays of arrays always have to
* be passed to kernels as a GPU pointer to an array of GPU pointers.
*/
enum class PtrToPtrKind
{
eNotApplicable = 0,
eGpuPtrToGpuPtr = 0, // eGpuPtrToGpuPtr == eNotApplicable for backward compatibility
eCpuPtrToGpuPtr = 1
};
} // namespace flatcache
} // namespace carb
| 1,865 | C | 41.40909 | 88 | 0.70563 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.