file_path
stringlengths
32
153
content
stringlengths
0
3.14M
omniverse-code/kit/exts/omni.kit.window.audioplayer/omni/kit/window/audioplayer/__init__.py
from .audio_player_window import *
omniverse-code/kit/exts/omni.kit.window.audioplayer/omni/kit/window/audioplayer/audio_player_window.py
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # import carb.settings import carb.dictionary import omni.audioplayer import omni.kit.ui import omni.ui import threading import time import re import asyncio import enum from typing import Callable from omni.kit.window.filepicker import FilePickerDialog PERSISTENT_SETTINGS_PREFIX = "/persistent" class EndReason(enum.Enum): # sound finished naturally FINISHED = 0, # sound was explicitly stopped STOPPED = 1, # seeked to a new location in the sound (causes an end callback) SEEK = 2, # the previous sound ended because another one is being played QUEUED_NEW_SOUND = 3, class AudioPlayerWindowExtension(omni.ext.IExt): """Audio Player Window Extension""" class FieldModel(omni.ui.AbstractValueModel): def __init__(self, end_edit_callback): super(AudioPlayerWindowExtension.FieldModel, self).__init__() self._end_edit_callback = end_edit_callback self._value = "" def get_value_as_string(self): return self._value def begin_edit(self): pass def set_value(self, value): self._value = value self._value_changed() def end_edit(self): self._end_edit_callback(self._value) class SliderModel(omni.ui.AbstractValueModel): def __init__(self, update_callback, end_edit_callback): super(AudioPlayerWindowExtension.SliderModel, self).__init__() self._update_callback = update_callback self._end_edit_callback = end_edit_callback self._value = 0 def get_value_as_int(self): # pragma: no cover return int(self._value) def get_value_as_float(self): # pragma: no cover return float(self._value) def begin_edit(self): # pragma: no cover pass def set_value(self, value): # pragma: no cover self._value = value self._value_changed() self._update_callback(self._value) def end_edit(self): # pragma: no cover self._end_edit_callback(self._value) def _on_file_pick(self, dialog: FilePickerDialog, filename: str, dirname: str): # pragma: no cover path = "" if dirname: path = f"{dirname}/{filename}" elif filename: path = filename dialog.hide() self._file_field.model.set_value(path) # this has to be called manually because set_value doesn't do it self._file_field_end_edit(path) def _choose_file_clicked(self): # pragma: no cover dialog = FilePickerDialog( "Select File", apply_button_label="Select", click_apply_handler=lambda filename, dirname: self._on_file_pick(dialog, filename, dirname), ) dialog.show() def _set_pause_button(self): # pragma: no cover self._play_button.set_style({"image_url": "resources/glyphs/timeline_pause.svg"}) def _set_play_button(self): # pragma: no cover self._play_button.set_style({"image_url": "resources/glyphs/timeline_play.svg"}) def _timeline_str(self, time): # pragma: no cover sec = ":{:02.0f}".format(time % 60) if time > 60.0 * 60.0: return "{:1.0f}".format(time // (60 * 60)) + ":{:02.0f}".format((time // 60) % 60) + sec else: return "{:1.0f}".format(time // 60) + sec def _timeline_ticker(self): # pragma: no cover if not self._playing: return time = self._player.get_play_cursor() self._timeline_cursor_label.text = self._timeline_str(time) self._timeline_slider.model.set_value(time * self._timeline_slider_scale) # if the window was closed, stop the player if not self._window.visible: self._end_reason = EndReason.STOPPED self._player.stop_sound() self._ticker = threading.Timer(0.25, self._timeline_ticker).start() def _loading_ticker(self): labels = {0: "Loading", 1: "Loading.", 2: "Loading..", 3: "Loading..."} if not self._loading: self._loading_label.text = "" return self._loading_label.text = labels[self._loading_counter % 4] self._loading_counter += 1 self._loading_timer = threading.Timer(0.25, self._loading_ticker).start() def _play_sound(self, time): self._loading = True self._player.play_sound( self._file_field.model.get_value_as_string(), time ) def _close_error_window(self): # pragma: no cover self._error_window.visible = False def _set_play_cursor(self, time): # pragma: no cover self._end_reason = EndReason.SEEK self._player.set_play_cursor(time) def _file_loaded(self, success): # pragma: no cover self._loading = False if not success: self._playing = False self._set_play_button() error_text = "Loading failed" file_name = self._file_field.model.get_value_as_string() if re.search("^.*.(m4a|aac)$", file_name): error_text = ( f"Failed to load file '{file_name}' codec not supported - only Vorbis, FLAC and WAVE are supported" ) else: error_text = f"Failed to load file '{file_name}' codec not supported (only Vorbis, FLAC, MP3 and WAVE are supported), file does not exist or the file is corrupted" self._error_window = omni.ui.Window( "Audio Player Error", width=400, height=0, flags=omni.ui.WINDOW_FLAGS_NO_DOCKING ) with self._error_window.frame: with omni.ui.VStack(): with omni.ui.HStack(): omni.ui.Spacer() self._error_window_label = omni.ui.Label( error_text, word_wrap=True, width=380, alignment=omni.ui.Alignment.CENTER ) omni.ui.Spacer() with omni.ui.HStack(): omni.ui.Spacer() self._error_window_ok_button = omni.ui.Button( width=64, height=32, clicked_fn=self._close_error_window, text="ok" ) omni.ui.Spacer() self._waveform_image_provider.set_bytes_data([0, 0, 0, 0], [1, 1]) return if self._new_file: width = 2048 height = 64 raw_image = self._player.draw_waveform(width, height, [0.89, 0.54, 0.14, 1.0], [0.0, 0.0, 0.0, 0.0]) self._waveform_image_provider.set_bytes_data(raw_image, [width, height]) self._new_file = False self._timeline_end_label.text = self._timeline_str(self._player.get_sound_length()) self._sound_length = self._player.get_sound_length() self._timeline_slider_scale = 1.0 / self._sound_length # set the timeline ticker going self._timeline_ticker() # set this back to default self._end_reason = EndReason.FINISHED def _play_finished(self): # pragma: no cover if self._end_reason != EndReason.SEEK and self._end_reason != EndReason.QUEUED_NEW_SOUND: self._playing = False # set the slider to finished self._timeline_cursor_label.text = self._timeline_str(0) self._timeline_slider.model.set_value(0.0) if self._end_reason == EndReason.FINISHED or self._end_reason == EndReason.STOPPED: self._set_play_button() if self._end_reason == EndReason.FINISHED and self._settings.get_as_bool(PERSISTENT_SETTINGS_PREFIX + "/audio/context/closeAudioPlayerOnStop"): self._window.visible = False def _play_clicked(self): # pragma: no cover if self._loading: return if self._playing: if self._paused: self._player.unpause_sound() self._set_pause_button() self._paused = False else: self._player.pause_sound() self._set_play_button() self._paused = True return self._playing = True self._paused = False self._load_result_label.text = "" self._loading_ticker() self._set_pause_button() self._play_sound(self._timeline_slider.model.get_value_as_float() * self._sound_length) def _file_field_end_edit(self, value): self._loading = True self._new_file = True self._load_result_label.text = "" self._loading_ticker() self._stop_clicked() self._player.load_sound(self._file_field.model.get_value_as_string()) def _stop_clicked(self): # pragma: no cover if self._loading: return self._end_reason = EndReason.STOPPED self._player.stop_sound() self._playing = False self._paused = False def _slider_end_edit(self, value): # pragma: no cover if self._loading: return if not self._playing: return self._set_play_cursor(value * self._sound_length) def _slider_changed(self, value): # pragma: no cover if not self._playing and not self._loading: self._timeline_cursor_label.text = self._timeline_str(value * self._sound_length) def open_window(self): """ Make the window become visible Args: No arguments Returns: No return value """ self._window.visible = True def open_window_and_play(self, path): # pragma: no cover """ Make the window become visible then begin playing a file Args: path: The file to begin playing Returns: No return value """ self._playing = True self._loading = True; self._paused = False self._new_file = True self._window.visible = True self._load_result_label.text = "" self._loading_ticker() self._set_pause_button() self._end_reason = EndReason.QUEUED_NEW_SOUND self._file_field.model.set_value(path) self._play_sound(0.0) def _menu_callback(self, a, b): self._window.visible = not self._window.visible def _on_menu_click(self, menu, value): # pragma: no cover if self._content_window is None: return protocol = self._content_window.get_selected_icon_protocol() path = self._content_window.get_selected_icon_path() if not path.startswith(protocol): path = protocol + path self.open_window_and_play(path) def _on_menu_check(self, url): return not not re.search("^.*\\.(wav|wave|ogg|oga|flac|fla|mp3|m4a|spx|opus|adpcm)$", url) def _on_browser_click(self, menu, value): # pragma: no cover if self._content_browser is None: return # protocol = self._content_browser.get_selected_icon_protocol() # path = self._content_browser.get_selected_icon_path() # if not path.startswith(protocol): # path = protocol + path self.open_window_and_play(value) def _on_content_browser_load(self): # pragma: no cover import omni.kit.window.content_browser self._content_browser = omni.kit.window.content_browser.get_content_window() if self._content_browser is not None: self._content_browser_entry = self._content_browser.add_context_menu( "Play Audio", "audio_play.svg", self._on_browser_click, self._on_menu_check ) def _on_content_browser_unload(self): # pragma: no cover if self._content_browser is not None: self._content_browser.delete_context_menu("Play Audio") self._content_browser_entry = None self._content_browser = None def _on_player_event(self, event): if event.type == int(omni.audioplayer.CallbackType.LOADED): success = event.payload["success"] self._file_loaded(success) elif event.type == int(omni.audioplayer.CallbackType.ENDED): self._play_finished() else: print("unrecognized type " + str(event.type)) def on_startup(self): self._content_browser = None self._hooks = [] manager = omni.kit.app.get_app().get_extension_manager() # current content window self._hooks.append( manager.subscribe_to_extension_enable( on_enable_fn=lambda _: self._on_content_browser_load(), on_disable_fn=lambda _: self._on_content_browser_unload(), ext_name="omni.kit.window.content_browser", hook_name="omni.kit.window.audioplayer omni.kit.window.content_browser listener", ) ) self._loading_counter = 0 self._ticker = None self._loading = False self._end_reason = EndReason.FINISHED self._new_file = True self._sound_length = 0 self._timeline_slider_scale = 0 self._file = "" self._playing = False self._paused = False self._player = omni.audioplayer.create_audio_player() self._sub = self._player.get_event_stream().create_subscription_to_pop(self._on_player_event) self._window = omni.ui.Window("Audio Player", width=600, height=200) self._settings = carb.settings.get_settings() self._settings.set_default_bool(PERSISTENT_SETTINGS_PREFIX + "/audio/context/closeAudioPlayerOnStop", False) with self._window.frame: with omni.ui.VStack(height=0, spacing=8): # file dialogue with omni.ui.HStack(): omni.ui.Button( width=32, height=32, clicked_fn=self._choose_file_clicked, style={"image_url": "resources/glyphs/folder.svg"}, ) self._file_field_model = AudioPlayerWindowExtension.FieldModel(self._file_field_end_edit) self._file_field = omni.ui.StringField(self._file_field_model, height=32) # timeline slider with omni.ui.HStack(height=64): self._timeline_cursor_label = omni.ui.Label("0:00", width=25) omni.ui.Label(" / ", width=10) self._timeline_end_label = omni.ui.Label("0:00", width=25) self._timeline_slider_model = AudioPlayerWindowExtension.SliderModel( self._slider_changed, self._slider_end_edit ) with omni.ui.ZStack(): self._waveform_image_provider = omni.ui.ByteImageProvider() self._waveform_image = omni.ui.ImageWithProvider( self._waveform_image_provider, width=omni.ui.Percent(100), height=omni.ui.Percent(100), fill_policy=omni.ui.IwpFillPolicy.IWP_STRETCH, ) with omni.ui.VStack(): omni.ui.Spacer() self._timeline_slider = omni.ui.FloatSlider( self._timeline_slider_model, height=0, style={ "color": 0x00FFFFFF, "background_color": 0x00000000, "draw_mode": omni.ui.SliderDrawMode.HANDLE, "font_size": 22, }, ) omni.ui.Spacer() # buttons with omni.ui.HStack(): with omni.ui.ZStack(): omni.ui.Spacer() self._load_result_label = omni.ui.Label( "", alignment=omni.ui.Alignment.CENTER, style={"color": 0xFF0000FF} ) self._play_button = omni.ui.Button( width=32, height=32, clicked_fn=self._play_clicked, style={"image_url": "resources/glyphs/timeline_play.svg"}, ) omni.ui.Button( width=32, height=32, clicked_fn=self._stop_clicked, style={"image_url": "resources/glyphs/timeline_stop.svg"}, ) with omni.ui.ZStack(): omni.ui.Spacer() self._loading_label = omni.ui.Label("", alignment=omni.ui.Alignment.CENTER) with omni.ui.HStack(alignment=omni.ui.Alignment.LEFT, width=100): omni.ui.Label("Close on Stop", alignment=omni.ui.Alignment.LEFT) omni.ui.Spacer() self._auto_close_on_stop = omni.ui.CheckBox(alignment=omni.ui.Alignment.LEFT) omni.ui.Spacer() self._auto_close_on_stop.model.set_value( self._settings.get_as_bool(PERSISTENT_SETTINGS_PREFIX + "/audio/context/closeAudioPlayerOnStop") ) self._dict = carb.dictionary.get_dictionary() self._auto_close_on_stop.model.add_value_changed_fn( lambda a, b=self._settings: b.set_bool( PERSISTENT_SETTINGS_PREFIX + "/audio/context/closeAudioPlayerOnStop", a.get_value_as_bool() ) ) def on_change(item, event_type): # pragma: no cover self._auto_close_on_stop.model.set_value(self._dict.get(item)) self._subscription = self._settings.subscribe_to_node_change_events( PERSISTENT_SETTINGS_PREFIX + "/audio/context/closeAudioPlayerOnStop", on_change ) # add a callback to open the window # FIXME: disabled until the bugs are worked out self._menuEntry = omni.kit.ui.get_editor_menu().add_item("Window/Audio Player", self._menu_callback) self._window.visible = False def on_shutdown(self): # pragma: no cover self._end_reason = EndReason.STOPPED self._player.stop_sound() if self._ticker != None: self._ticker.cancel() self._settings.unsubscribe_to_change_events(self._subscription) self._subscription = None # run the unload function to avoid breaking the extension when it reloads self._on_content_browser_unload() # remove the subscription before the player to avoid events with a dead player self._sub = None self._player = None self._window = None self._menuEntry = None self._content_window_entry = None
omniverse-code/kit/exts/omni.kit.window.audioplayer/omni/kit/window/audioplayer/tests/__init__.py
from .test_audio_player import * # pragma: no cover
omniverse-code/kit/exts/omni.kit.window.audioplayer/omni/kit/window/audioplayer/tests/test_audio_player.py
## Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. ## ## NVIDIA CORPORATION and its licensors retain all intellectual property ## and proprietary rights in and to this software, related documentation ## and any modifications thereto. Any use, reproduction, disclosure or ## distribution of this software and related documentation without an express ## license agreement from NVIDIA CORPORATION is strictly prohibited. ## import omni.kit.app import omni.kit.test import omni.kit.ui_test import omni.ui as ui import omni.usd import omni.timeline import carb.tokens import omni.usd.audio from omni.ui.tests.test_base import OmniUiTest import pathlib import asyncio; class TestAudioPlayerWindow(OmniUiTest): # pragma: no cover async def _dock_window(self): await self.docked_test_window( window=self._win.window, width=600, height=200) def _dump_ui_tree(self, root): print("DUMP UI TREE START") #windows = omni.ui.Workspace.get_windows() #children = [windows[0].frame] children = [root.frame] print(str(dir(root.frame))) def recurse(children, path=""): for c in children: name = path + "/" + type(c).__name__ print(name) if isinstance(c, omni.ui.ComboBox): print(str(dir(c))) recurse(omni.ui.Inspector.get_children(c), name) recurse(children) print("DUMP UI TREE END") async def setUp(self): await super().setUp() extension_path = carb.tokens.get_tokens_interface().resolve("${omni.kit.window.audioplayer}") self._test_path = pathlib.Path(extension_path).joinpath("data").joinpath("tests").absolute() self._golden_img_dir = self._test_path.joinpath("golden") # open the dropdown window_menu = omni.kit.ui_test.get_menubar().find_menu("Window") self.assertIsNotNone(window_menu) await window_menu.click() # click the audioplayer option to open it player_menu = omni.kit.ui_test.get_menubar().find_menu("Audio Player") self.assertIsNotNone(player_menu) await player_menu.click() self._win = omni.kit.ui_test.find("Audio Player") self.assertIsNotNone(self._win) self._file_name_textbox = self._win.find("**/StringField[*]") self.assertIsNotNone(self._file_name_textbox) async def _test_just_opened(self): await self._dock_window() await self.finalize_test(golden_img_dir=self._golden_img_dir, golden_img_name="test_just_opened.png") async def _test_load_file(self): await self._file_name_textbox.click() await self._file_name_textbox.input(str(self._test_path / "1hz.oga")) await asyncio.sleep(1.0) # delete the text in the textbox so we'll have something constant # for the image comparison await self._file_name_textbox.double_click() await omni.kit.ui_test.emulate_keyboard_press(carb.input.KeyboardInput.DEL) await self._dock_window() await self.finalize_test(golden_img_dir=self._golden_img_dir, golden_img_name="test_open_file.png") async def test_all(self): await self._test_just_opened() await self._test_load_file() self._dump_ui_tree(self._win.window)
omniverse-code/kit/exts/omni.kit.property.geometry/PACKAGE-LICENSES/omni.kit.property.geometry-LICENSE.md
Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. NVIDIA CORPORATION and its licensors retain all intellectual property and proprietary rights in and to this software, related documentation and any modifications thereto. Any use, reproduction, disclosure or distribution of this software and related documentation without an express license agreement from NVIDIA CORPORATION is strictly prohibited.
omniverse-code/kit/exts/omni.kit.property.geometry/config/extension.toml
[package] # Semantic Versioning is used: https://semver.org/ version = "1.2.2" category = "Internal" feature = true # Lists people or organizations that are considered the "authors" of the package. authors = ["NVIDIA"] # The title and description fields are primarly for displaying extension info in UI title = "Geometry Property Widget" description="View and Edit Geometry Property Values" # URL of the extension source repository. repository = "" # Preview image. Folder named "data" automatically goes in git lfs (see .gitattributes file). preview_image = "data/preview.png" # Icon is shown in Extensions window, it is recommended to be square, of size 256x256. icon = "data/icon.png" # Keywords for the extension keywords = ["kit", "usd", "property", "geometry"] # Location of change log file in target (final) folder of extension, relative to the root. # More info on writing changelog: https://keepachangelog.com/en/1.0.0/ changelog="docs/CHANGELOG.md" # Path (relative to the root) or content of readme markdown file for UI. readme = "docs/README.md" [dependencies] "omni.usd" = {} "omni.ui" = {} "omni.kit.window.property" = {} "omni.kit.property.usd" = {} [[python.module]] name = "omni.kit.property.geometry" [[test]] args = [ "--/app/window/dpiScaleOverride=1.0", "--/app/window/scaleToMonitor=false", "--no-window" ] dependencies = [ "omni.kit.renderer.capture", "omni.kit.mainwindow", "omni.kit.ui_test", "omni.kit.test_suite.helpers" ] stdoutFailPatterns.exclude = [ "*Failed to acquire interface*while unloading all plugins*" ]
omniverse-code/kit/exts/omni.kit.property.geometry/omni/kit/property/geometry/__init__.py
from .scripts import *
omniverse-code/kit/exts/omni.kit.property.geometry/omni/kit/property/geometry/scripts/geometry_properties.py
import os import carb import omni.ext from functools import partial from pathlib import Path from pxr import Sdf, Usd, UsdGeom, UsdUI from typing import Any, Callable from omni.kit.property.usd.prim_selection_payload import PrimSelectionPayload _extension_instance = None TEST_DATA_PATH = "" def get_instance(): global _extension_instance return _extension_instance class GeometryPropertyExtension(omni.ext.IExt): def __init__(self): self._registered = False self._button_menu_entry = [] self._visual_property_widget = None super().__init__() def on_startup(self, ext_id): global _extension_instance _extension_instance = self self._register_widget() manager = omni.kit.app.get_app().get_extension_manager() extension_path = manager.get_extension_path(ext_id) global TEST_DATA_PATH TEST_DATA_PATH = Path(extension_path).joinpath("data").joinpath("tests") # +add menu item(s) from omni.kit.property.usd import PrimPathWidget context_menu = omni.kit.context_menu.get_instance() if context_menu is None: carb.log_error("context_menu is disabled!") # pragma: no cover return None # pragma: no cover self._button_menu_entry.append( PrimPathWidget.add_button_menu_entry( "Instanceable", show_fn=context_menu.is_prim_selected, onclick_fn=self._click_toggle_instanceable, ) ) self._button_menu_entry.append( PrimPathWidget.add_button_menu_entry( "Rendering/Toggle Wireframe Mode", name_fn=partial(self._get_primvar_state, prim_name="wireframe", text_name=" Wireframe Mode"), show_fn=partial(context_menu.prim_is_type, type=UsdGeom.Boundable), onclick_fn=partial(self._click_set_primvar, prim_name="wireframe"), ) ) self._button_menu_entry.append( PrimPathWidget.add_button_menu_entry( "Rendering/Toggle Do Not Cast Shadows", name_fn=partial( self._get_primvar_state, prim_name="doNotCastShadows", text_name=" Do Not Cast Shadows" ), show_fn=partial(context_menu.prim_is_type, type=UsdGeom.Boundable), onclick_fn=partial(self._click_set_primvar, prim_name="doNotCastShadows"), ) ) self._button_menu_entry.append( PrimPathWidget.add_button_menu_entry( "Rendering/Toggle Enable Shadow Terminator Fix", name_fn=partial( self._get_primvar_state, prim_name="enableShadowTerminatorFix", text_name=" Enable Shadow Terminator Fix" ), show_fn=partial(context_menu.prim_is_type, type=UsdGeom.Boundable), onclick_fn=partial(self._click_set_primvar, prim_name="enableShadowTerminatorFix"), ) ) self._button_menu_entry.append( PrimPathWidget.add_button_menu_entry( "Rendering/Toggle Enable Fast Refraction Shadow", name_fn=partial( self._get_primvar_state, prim_name="enableFastRefractionShadow", text_name=" Enable Fast Refraction Shadow" ), show_fn=partial(context_menu.prim_is_type, type=UsdGeom.Boundable), onclick_fn=partial(self._click_set_primvar, prim_name="enableFastRefractionShadow"), ) ) self._button_menu_entry.append( PrimPathWidget.add_button_menu_entry( "Rendering/Toggle Disable RT SSS Transmission", name_fn=partial( self._get_primvar_state, prim_name="disableRtSssTransmission", text_name=" Disable RT SSS Transmission" ), show_fn=partial(context_menu.prim_is_type, type=UsdGeom.Boundable), onclick_fn=partial(self._click_set_primvar, prim_name="disableRtSssTransmission"), ) ) self._button_menu_entry.append( PrimPathWidget.add_button_menu_entry( "Multimatted ID:", name_fn=partial( self._get_primvar_state, prim_name="multimatte_id", text_name=" ID for multimatte" ), show_fn=partial(context_menu.prim_is_type, type=UsdGeom.Boundable), onclick_fn=partial(self._click_set_primvar, prim_name="multimatte_id"), ) ) self._button_menu_entry.append( PrimPathWidget.add_button_menu_entry( "Rendering/Toggle Enable Holdout Object", name_fn=partial( self._get_primvar_state, prim_name="holdoutObject", text_name=" Enable Holdout Object" ), show_fn=partial(context_menu.prim_is_type, type=UsdGeom.Boundable), onclick_fn=partial(self._click_set_primvar, prim_name="holdoutObject"), ) ) self._button_menu_entry.append( PrimPathWidget.add_button_menu_entry( "Rendering/Toggle Invisible To Secondary Rays", name_fn=partial( self._get_primvar_state, prim_name="invisibleToSecondaryRays", text_name=" Invisible To Secondary Rays" ), show_fn=partial(context_menu.prim_is_type, type=UsdGeom.Boundable), onclick_fn=partial(self._click_set_primvar, prim_name="invisibleToSecondaryRays"), ) ) self._button_menu_entry.append( PrimPathWidget.add_button_menu_entry( "Rendering/Toggle Is Procedural Volume", name_fn=partial( self._get_primvar_state, prim_name="isVolume", text_name=" Is Volume" ), show_fn=partial(context_menu.prim_is_type, type=UsdGeom.Boundable), onclick_fn=partial(self._click_set_primvar, prim_name="isVolume"), ) ) self._button_menu_entry.append( PrimPathWidget.add_button_menu_entry( "Rendering/Toggle Matte Object", name_fn=partial(self._get_primvar_state, prim_name="isMatteObject", text_name=" Matte Object"), show_fn=partial(context_menu.prim_is_type, type=UsdGeom.Boundable), onclick_fn=partial(self._click_set_primvar, prim_name="isMatteObject"), ) ) self._button_menu_entry.append( PrimPathWidget.add_button_menu_entry( "Rendering/Toggle Hide From Camera", name_fn=partial( self._get_primvar_state, prim_name="hideForCamera", text_name=" Hide From Camera" ), show_fn=[partial(context_menu.prim_is_type, type=UsdGeom.Boundable)], onclick_fn=partial(self._click_set_primvar, prim_name="hideForCamera"), ) ) self._button_menu_entry.append( PrimPathWidget.add_button_menu_entry( "Rendering/Toggle Is Light", name_fn=partial(self._get_primvar_state, prim_name="isLight", text_name=" Is Light"), show_fn=[partial(context_menu.prim_is_type, type=UsdGeom.Boundable)], onclick_fn=partial(self._click_set_primvar, prim_name="isLight"), ) ) def on_shutdown(self): # pragma: no cover if self._registered: self._unregister_widget() # release menu item(s) from omni.kit.property.usd import PrimPathWidget for item in self._button_menu_entry: PrimPathWidget.remove_button_menu_entry(item) global _extension_instance _extension_instance = None def register_custom_visual_attribute(self, attribute_name: str, display_name: str, type_name: str, default_value: Any, predicate: Callable[[Any], bool] = None): """ Add custom attribute with placeholder. """ if self._visual_property_widget: self._visual_property_widget.add_custom_attribute( attribute_name, display_name, type_name, default_value, predicate ) def deregister_custom_visual_attribute(self, attribute_name: str): if self._visual_property_widget: self._visual_property_widget.remove_custom_attribute(attribute_name) def _register_widget(self): import omni.kit.window.property as p from .prim_kind_widget import PrimKindWidget from .prim_geometry_widget import GeometrySchemaAttributesWidget, ImageableSchemaAttributesWidget w = p.get_window() if w: w.register_widget( "prim", "geometry", GeometrySchemaAttributesWidget( "Geometry", UsdGeom.Xformable, [ UsdGeom.BasisCurves, UsdGeom.Capsule, UsdGeom.Cone, UsdGeom.Cube, UsdGeom.Cylinder, UsdGeom.HermiteCurves, UsdGeom.Mesh, UsdGeom.NurbsCurves, UsdGeom.NurbsPatch, UsdGeom.PointInstancer, UsdGeom.Points, UsdGeom.Subset, UsdGeom.Sphere, UsdGeom.Xform, UsdGeom.Gprim, UsdGeom.PointBased, UsdGeom.Boundable, UsdGeom.Curves, UsdGeom.Imageable, UsdGeom.PointBased, UsdGeom.Subset, UsdGeom.ModelAPI, UsdGeom.MotionAPI, UsdGeom.PrimvarsAPI, UsdGeom.XformCommonAPI, UsdGeom.ModelAPI, UsdUI.Backdrop, UsdUI.NodeGraphNodeAPI, UsdUI.SceneGraphPrimAPI, ], [ "proceduralMesh:parameterCheck", "outputs:parameterCheck", "refinementEnableOverride", "refinementLevel", "primvars:doNotCastShadows", "primvars:enableShadowTerminatorFix", "primvars:enableFastRefractionShadow", "primvars:disableRtSssTransmission", "primvars:holdoutObject", "primvars:invisibleToSecondaryRays", "primvars:isMatteObject", "primvars:isVolume", "primvars:multimatte_id", "primvars:numSplits", "primvars:endcaps", UsdGeom.Tokens.proxyPrim, ], [ "primvars:displayColor", "primvars:displayOpacity", "doubleSided", "purpose", "visibility", "xformOpOrder", ], ), ) self._visual_property_widget = ImageableSchemaAttributesWidget( "Visual", UsdGeom.Imageable, [], ["primvars:displayColor", "primvars:displayOpacity", "doubleSided", "singleSided"], [] ) w.register_widget( "prim", "geometry_imageable", self._visual_property_widget, ) w.register_widget("prim", "kind", PrimKindWidget()) self._registered = True def _unregister_widget(self): # pragma: no cover import omni.kit.window.property as p w = p.get_window() if w: w.unregister_widget("prim", "geometry") w.unregister_widget("prim", "geometry_imageable") w.unregister_widget("prim", "kind") self._registered = False def _click_set_primvar(self, payload: PrimSelectionPayload, prim_name: str): stage = payload.get_stage() if not stage: return omni.kit.commands.execute("TogglePrimVarCommand", prim_path=payload.get_paths(), prim_name=prim_name) def _get_primvar_state(self, objects: dict, prim_name: str, text_prefix: str = "", text_name: str = "") -> str: if not "stage" in objects or not "prim_list" in objects or not objects["stage"]: return None stage = objects["stage"] primvar_state = [] for path in objects["prim_list"]: prim = stage.GetPrimAtPath(path) if isinstance(path, Sdf.Path) else path if prim: primvars_api = UsdGeom.PrimvarsAPI(prim) is_primvar = primvars_api.GetPrimvar(prim_name) if is_primvar: primvar_state.append(is_primvar.Get()) else: primvar_state.append(False) if primvar_state == [False] * len(primvar_state): return f"{text_prefix}Set{text_name}" elif primvar_state == [True] * len(primvar_state): return f"{text_prefix}Clear{text_name}" return f"{text_prefix}Toggle{text_name}" def _click_toggle_instanceable(self, payload: PrimSelectionPayload): omni.kit.commands.execute("ToggleInstanceableCommand", prim_path=payload.get_paths())
omniverse-code/kit/exts/omni.kit.property.geometry/omni/kit/property/geometry/scripts/prim_geometry_widget.py
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # import omni.ui as ui import omni.usd from dataclasses import dataclass, field from typing import Any, Callable, OrderedDict, List from omni.kit.property.usd.usd_property_widget import MultiSchemaPropertiesWidget, UsdPropertyUiEntry from omni.kit.property.usd.usd_property_widget import create_primspec_bool, create_primspec_int from omni.kit.property.usd.custom_layout_helper import CustomLayoutFrame, CustomLayoutGroup, CustomLayoutProperty from pxr import Kind, Sdf, Usd, UsdGeom class GeometrySchemaAttributesWidget(MultiSchemaPropertiesWidget): def __init__(self, title: str, schema, schema_subclasses: list, include_list: list = [], exclude_list: list = []): """ Constructor. Args: title (str): Title of the widgets on the Collapsable Frame. schema: The USD IsA schema or applied API schema to filter attributes. schema_subclasses (list): list of subclasses include_list (list): list of additional schema named to add exclude_list (list): list of additional schema named to remove """ super().__init__(title, schema, schema_subclasses, include_list, exclude_list) # custom attributes self.add_custom_schema_attribute("primvars:enableFastRefractionShadow", lambda p: p.IsA(UsdGeom.Gprim), None, "", create_primspec_bool(False)) self.add_custom_schema_attribute("primvars:doNotCastShadows", lambda p: p.IsA(UsdGeom.Gprim), None, "", create_primspec_bool(False)) self.add_custom_schema_attribute("primvars:enableShadowTerminatorFix", lambda p: p.IsA(UsdGeom.Gprim), None, "", create_primspec_bool(True)) self.add_custom_schema_attribute("primvars:holdoutObject", lambda p: p.IsA(UsdGeom.Gprim), None, "", create_primspec_bool(False)) self.add_custom_schema_attribute("primvars:invisibleToSecondaryRays", lambda p: p.IsA(UsdGeom.Gprim), None, "", create_primspec_bool(False)) self.add_custom_schema_attribute("primvars:isMatteObject", lambda p: p.IsA(UsdGeom.Gprim), None, "", create_primspec_bool(False)) self.add_custom_schema_attribute("primvars:isVolume", lambda p: p.IsA(UsdGeom.Gprim), None, "", create_primspec_bool(False)) self.add_custom_schema_attribute("primvars:multimatte_id", lambda p: p.IsA(UsdGeom.Gprim), None, "", create_primspec_int(-1)) self.add_custom_schema_attribute("primvars:disableRtSssTransmission", lambda p: p.IsA(UsdGeom.Gprim), None, "", create_primspec_bool(False)) self.add_custom_schema_attribute("primvars:numSplitsOverride", lambda p: p.IsA(UsdGeom.BasisCurves), None, "", create_primspec_bool(False)) self.add_custom_schema_attribute("primvars:numSplits", lambda p: p.IsA(UsdGeom.BasisCurves), None, "", create_primspec_int(2)) self.add_custom_schema_attribute("primvars:endcaps", lambda p: p.IsA(UsdGeom.BasisCurves), None, "", create_primspec_int(1)) self.add_custom_schema_attribute("refinementEnableOverride", self._is_prim_refinement_level_supported, None, "", create_primspec_bool(False)) self.add_custom_schema_attribute("refinementLevel", self._is_prim_refinement_level_supported, None, "", create_primspec_int(0)) def on_new_payload(self, payload): """ See PropertyWidget.on_new_payload """ self._add_curves = False self._add_points = False if not super().on_new_payload(payload): return False if not self._payload or len(self._payload) == 0: return False used = [] for prim_path in self._payload: prim = self._get_prim(prim_path) if not prim or not prim.IsA(self._schema): return False used += [attr for attr in prim.GetProperties() if attr.GetName() in self._schema_attr_names and not attr.IsHidden()] if (prim.IsA(UsdGeom.BasisCurves)): self._add_curves = True if (prim.IsA(UsdGeom.Points)): self._add_points = True if self.is_custom_schema_attribute_used(prim): used.append(None) return used def _is_prim_refinement_level_supported(self, prim): return ( prim.IsA(UsdGeom.Mesh) or prim.IsA(UsdGeom.Cylinder) or prim.IsA(UsdGeom.Capsule) or prim.IsA(UsdGeom.Cone) or prim.IsA(UsdGeom.Sphere) or prim.IsA(UsdGeom.Cube) ) def _is_prim_single_sided_supported(self, prim): return ( prim.IsA(UsdGeom.Mesh) or prim.IsA(UsdGeom.Cylinder) or prim.IsA(UsdGeom.Capsule) or prim.IsA(UsdGeom.Cone) or prim.IsA(UsdGeom.Sphere) or prim.IsA(UsdGeom.Cube) ) def _customize_props_layout(self, attrs): self.add_custom_schema_attributes_to_props(attrs) frame = CustomLayoutFrame(hide_extra=False) with frame: def update_bounds(stage, prim_paths): timeline = omni.timeline.get_timeline_interface() current_time = timeline.get_current_time() current_time_code = Usd.TimeCode( omni.usd.get_frame_time_code(current_time, stage.GetTimeCodesPerSecond()) ) for path in prim_paths: prim = stage.GetPrimAtPath(path) attr = prim.GetAttribute("extent") if prim else None if prim and attr: bounds = UsdGeom.Boundable.ComputeExtentFromPlugins(UsdGeom.Boundable(prim), current_time_code) attr.Set(bounds) def build_extent_func( stage, attr_name, metadata, property_type, prim_paths: List[Sdf.Path], additional_label_kwargs={}, additional_widget_kwargs={}, ): from omni.kit.window.property.templates import HORIZONTAL_SPACING from omni.kit.property.usd.usd_attribute_model import UsdAttributeModel from omni.kit.property.usd.usd_property_widget import UsdPropertiesWidgetBuilder from omni.kit.property.usd.widgets import ICON_PATH if not attr_name or not property_type: return def value_changed_func(model, widget): val = model.get_value_as_string() widget.set_tooltip(val) with ui.HStack(spacing=HORIZONTAL_SPACING): model = UsdAttributeModel(stage, [path.AppendProperty(attr_name) for path in prim_paths], False, metadata) UsdPropertiesWidgetBuilder._create_label(attr_name, metadata, additional_label_kwargs) kwargs = { "name": "models_readonly", "model": model, "enabled": False, "tooltip": model.get_value_as_string(), } if additional_widget_kwargs: kwargs.update(additional_widget_kwargs) with ui.ZStack(): value_widget = ui.StringField(**kwargs) mixed_overlay = UsdPropertiesWidgetBuilder._create_mixed_text_overlay() ui.Spacer(width=0) with ui.VStack(width=8): ui.Spacer() ui.Image( f"{ICON_PATH}/Default value.svg", width=5.5, height=5.5, ) ui.Spacer() model.add_value_changed_fn(lambda m, w=value_widget: value_changed_func(m,w)) return model def build_size_func( stage, attr_name, metadata, property_type, prim_paths: List[Sdf.Path], additional_label_kwargs={}, additional_widget_kwargs={}, ): from omni.kit.window.property.templates import HORIZONTAL_SPACING from omni.kit.property.usd.usd_attribute_model import UsdAttributeModel from omni.kit.property.usd.usd_property_widget import UsdPropertiesWidgetBuilder from omni.kit.property.usd.widgets import ICON_PATH if not attr_name or not property_type: return with ui.HStack(spacing=HORIZONTAL_SPACING): model_kwargs = UsdPropertiesWidgetBuilder._get_attr_value_range_kwargs(metadata) model = UsdAttributeModel( stage, [path.AppendProperty(attr_name) for path in prim_paths], False, metadata, **model_kwargs ) UsdPropertiesWidgetBuilder._create_label(attr_name, metadata, additional_label_kwargs) widget_kwargs = {"model": model} widget_kwargs.update(UsdPropertiesWidgetBuilder._get_attr_value_soft_range_kwargs(metadata)) if additional_widget_kwargs: widget_kwargs.update(additional_widget_kwargs) with ui.ZStack(): value_widget = UsdPropertiesWidgetBuilder._create_drag_or_slider(ui.FloatDrag, ui.FloatSlider, **widget_kwargs) mixed_overlay = UsdPropertiesWidgetBuilder._create_mixed_text_overlay() UsdPropertiesWidgetBuilder._create_control_state(value_widget=value_widget, mixed_overlay=mixed_overlay, **widget_kwargs) model.add_value_changed_fn(lambda m, s=stage, p=prim_paths: update_bounds(s, p)) return model def build_axis_func( stage, attr_name, metadata, property_type, prim_paths: List[Sdf.Path], additional_label_kwargs={}, additional_widget_kwargs={}, ): from omni.kit.window.property.templates import HORIZONTAL_SPACING from omni.kit.property.usd.usd_attribute_model import TfTokenAttributeModel from omni.kit.property.usd.usd_property_widget import UsdPropertiesWidgetBuilder from omni.kit.property.usd.widgets import ICON_PATH if not attr_name or not property_type: return with ui.HStack(spacing=HORIZONTAL_SPACING): model = None UsdPropertiesWidgetBuilder._create_label(attr_name, metadata, additional_label_kwargs) tokens = metadata.get("allowedTokens") if tokens is not None and len(tokens) > 0: model = TfTokenAttributeModel( stage, [path.AppendProperty(attr_name) for path in prim_paths], False, metadata ) widget_kwargs = {"name": "choices"} if additional_widget_kwargs: widget_kwargs.update(additional_widget_kwargs) with ui.ZStack(): value_widget = ui.ComboBox(model, **widget_kwargs) mixed_overlay = UsdPropertiesWidgetBuilder._create_mixed_text_overlay() else: model = UsdAttributeModel( stage, [path.AppendProperty(attr_name) for path in prim_paths], False, metadata ) widget_kwargs = {"name": "models"} if additional_widget_kwargs: widget_kwargs.update(additional_widget_kwargs) with ui.ZStack(): value_widget = ui.StringField(model, **widget_kwargs) mixed_overlay = UsdPropertiesWidgetBuilder._create_mixed_text_overlay() UsdPropertiesWidgetBuilder._create_control_state( model=model, value_widget=value_widget, mixed_overlay=mixed_overlay, **widget_kwargs ) model.add_item_changed_fn(lambda m, i, s=stage, p=prim_paths: update_bounds(s, p)) return model def build_endcaps_func( stage, attr_name, metadata, property_type, prim_paths: List[Sdf.Path], additional_label_kwargs={}, additional_widget_kwargs={}, ): from omni.kit.window.property.templates import HORIZONTAL_SPACING from omni.kit.property.usd.usd_attribute_model import TfTokenAttributeModel from omni.kit.property.usd.usd_property_widget import UsdPropertiesWidgetBuilder from omni.kit.property.usd.widgets import ICON_PATH if not attr_name or not property_type: return with ui.HStack(spacing=HORIZONTAL_SPACING): model = None UsdPropertiesWidgetBuilder._create_label(attr_name, metadata, additional_label_kwargs) class MyTfTokenAttributeModel(TfTokenAttributeModel): allowed_tokens = ["open", "flat", "round"] def _get_allowed_tokens(self, attr): return self.allowed_tokens def _get_value_from_index(self, value): return value def _update_value(self, force=False): was_updating_value = self._updating_value self._updating_value = True if super(TfTokenAttributeModel, self)._update_value(force): # TODO don't have to do this every time. Just needed when "allowedTokens" actually changed self._update_allowed_token() index = self._value if self._value < len(self._allowed_tokens) else -1 if index != -1 and self._current_index.as_int != index: self._current_index.set_value(index) self._item_changed(None) self._updating_value = was_updating_value model = MyTfTokenAttributeModel( stage, [path.AppendProperty(attr_name) for path in prim_paths], False, metadata ) widget_kwargs = {"name": "choices"} if additional_widget_kwargs: widget_kwargs.update(additional_widget_kwargs) with ui.ZStack(): value_widget = ui.ComboBox(model, **widget_kwargs) mixed_overlay = UsdPropertiesWidgetBuilder._create_mixed_text_overlay() UsdPropertiesWidgetBuilder._create_control_state( model=model, value_widget=value_widget, mixed_overlay=mixed_overlay, **widget_kwargs ) return model if self._add_curves: with CustomLayoutGroup("Curve"): CustomLayoutProperty("curveVertexCounts", "Per curve points") CustomLayoutProperty("points", "Points") CustomLayoutProperty("normals", "Normals") CustomLayoutProperty("widths", "Widths") CustomLayoutProperty("type", "Type") CustomLayoutProperty("basis", "Basis") CustomLayoutProperty("wrap", "Wrap") CustomLayoutProperty("primvars:numSplitsOverride", "Number of BVH splits Override") CustomLayoutProperty("primvars:numSplits", "Number of BVH splits") CustomLayoutProperty("primvars:endcaps", "Endcaps", build_fn=build_endcaps_func) if self._add_points: with CustomLayoutGroup("Points"): CustomLayoutProperty("points", "Points") CustomLayoutProperty("normals", "Normals") CustomLayoutProperty("widths", "Widths") commonSectionName = "Mesh" if self._add_curves or self._add_points: commonSectionName = "Common" with CustomLayoutGroup(commonSectionName): CustomLayoutProperty("normals", "Normals") CustomLayoutProperty("orientation", "Orientation") CustomLayoutProperty("points", "Points") CustomLayoutProperty("velocities", "Velocities") CustomLayoutProperty("accelerations", "Accelerations") CustomLayoutProperty("extent", "Extent", build_fn=build_extent_func) CustomLayoutProperty("size", "Size", build_fn=build_size_func) CustomLayoutProperty("radius", "Radius", build_fn=build_size_func) CustomLayoutProperty("axis", "Axis", build_fn=build_axis_func) CustomLayoutProperty("height", "Height", build_fn=build_size_func) CustomLayoutProperty("polymesh:parameterCheck", "Parameter Check") CustomLayoutProperty("primvars:doNotCastShadows", "Cast Shadows", build_fn=self._inverse_bool_builder) CustomLayoutProperty("primvars:enableShadowTerminatorFix", "Shadow Terminator Fix") CustomLayoutProperty("primvars:enableFastRefractionShadow", "Fast Refraction Shadow") CustomLayoutProperty("primvars:disableRtSssTransmission", "Enable Rt SSS Transmission", build_fn=self._inverse_bool_builder) CustomLayoutProperty("primvars:holdoutObject", "Holdout Object") CustomLayoutProperty("primvars:invisibleToSecondaryRays", "Invisible To Secondary Rays") CustomLayoutProperty("primvars:isMatteObject", "Matte Object") CustomLayoutProperty("primvars:isVolme", "Is Volume") CustomLayoutProperty("primvars:multimatte_id", "Multimatte ID") with CustomLayoutGroup("Face"): CustomLayoutProperty("faceVertexIndices", "Indices") CustomLayoutProperty("faceVertexCounts", "Counts") CustomLayoutProperty("faceVaryingLinearInterpolation", "Linear Interpolation") CustomLayoutProperty("holeIndices", "Hole Indices") with CustomLayoutGroup("Refinement"): CustomLayoutProperty("refinementEnableOverride", "Refinement Override") CustomLayoutProperty("refinementLevel", "Refinement Level") CustomLayoutProperty("interpolateBoundary", "Interpolate Boundary") CustomLayoutProperty("subdivisionScheme", "Subdivision Scheme") CustomLayoutProperty("triangleSubdivisionRule", "Triangle SubdivisionRule") with CustomLayoutGroup("Corner"): CustomLayoutProperty("cornerIndices", "Indices") CustomLayoutProperty("cornerSharpnesses", "Sharpnesses") with CustomLayoutGroup("Crease"): CustomLayoutProperty("creaseIndices", "Indices") CustomLayoutProperty("creaseLengths", "Lengths") CustomLayoutProperty("creaseSharpnesses", "Sharpnesses") return frame.apply(attrs) def get_additional_kwargs(self, ui_prop: UsdPropertyUiEntry): """ Override this function if you want to supply additional arguments when building the label or ui widget. """ additional_widget_kwargs = None if ui_prop.prop_name == "refinementLevel": additional_widget_kwargs = {"min": 0, "max": 5} return None, additional_widget_kwargs def _inverse_bool_builder(self, stage, attr_name, metadata, property_type, prim_paths: List[Sdf.Path], additional_label_kwargs={}, additional_widget_kwargs={} ): import carb.settings from omni.kit.window.property.templates import HORIZONTAL_SPACING from omni.kit.property.usd.usd_attribute_model import UsdAttributeInvertedModel from omni.kit.property.usd.usd_property_widget import UsdPropertiesWidgetBuilder if not attr_name or not property_type: return with ui.HStack(spacing=HORIZONTAL_SPACING): model = UsdAttributeInvertedModel(stage, [path.AppendProperty(attr_name) for path in prim_paths], False, metadata) settings = carb.settings.get_settings() left_aligned = settings.get("ext/omni.kit.window.property/checkboxAlignment") == "left" if not left_aligned: if not additional_label_kwargs: additional_label_kwargs = {} additional_label_kwargs["width"] = 0 UsdPropertiesWidgetBuilder._create_label(attr_name, metadata, additional_label_kwargs) if not left_aligned: ui.Spacer(width=10) ui.Line(style={"color": 0x338A8777}, width=ui.Fraction(1)) ui.Spacer(width=5) with ui.VStack(width=10): ui.Spacer() widget_kwargs = {"width": 10, "height": 0, "name": "greenCheck", "model": model} if additional_widget_kwargs: widget_kwargs.update(additional_widget_kwargs) with ui.ZStack(): with ui.Placer(offset_x=0, offset_y=-2): value_widget = ui.CheckBox(**widget_kwargs) with ui.Placer(offset_x=1, offset_y=-1): mixed_overlay = ui.Rectangle( height=8, width=8, name="mixed_overlay", alignment=ui.Alignment.CENTER, visible=False ) ui.Spacer() if left_aligned: ui.Spacer(width=5) ui.Line(style={"color": 0x338A8777}, width=ui.Fraction(1)) UsdPropertiesWidgetBuilder._create_control_state(value_widget=value_widget, mixed_overlay=mixed_overlay, **widget_kwargs) return model @dataclass(frozen=True) class CustomAttributeInfo: schema_name: str display_name: str type_name: str default_value: Any predicate: Callable[[Any], bool] = None def is_supported(self, prim): return self.predicate is None or self.predicate(prim) def get_metadata(self): return {Sdf.PrimSpec.TypeNameKey: self.type_name, "customData": {"default": self.default_value}} class ImageableSchemaAttributesWidget(MultiSchemaPropertiesWidget): def __init__(self, title: str, schema, schema_subclasses: list, include_list: list = [], exclude_list: list = []): """ Constructor. Args: title (str): Title of the widgets on the Collapsable Frame. schema: The USD IsA schema or applied API schema to filter attributes. schema_subclasses (list): list of subclasses include_list (list): list of additional schema named to add exclude_list (list): list of additional schema named to remove """ super().__init__(title, schema, schema_subclasses, include_list, exclude_list) self._custom_attributes: OrderedDict[str, CustomAttributeInfo] = OrderedDict() self._custom_placeholders: List[str] = [] # custom attributes self.add_custom_schema_attribute("singleSided", self._is_prim_single_sided_supported, None, "", create_primspec_bool(False)) def on_new_payload(self, payload): """ See PropertyWidget.on_new_payload """ self._custom_placeholders.clear() if not super().on_new_payload(payload): return False if not self._payload or len(self._payload) == 0: return False used = [] for prim_path in self._payload: prim = self._get_prim(prim_path) if not prim or not prim.IsA(self._schema): return False used += [attr for attr in prim.GetProperties() if attr.GetName() in self._schema_attr_names and not attr.IsHidden()] for schema_name, attr_info in self._custom_attributes.items(): if attr_info.is_supported(prim) and not prim.GetAttribute(schema_name): self._custom_placeholders.append(schema_name) used.append(None) if self.is_custom_schema_attribute_used(prim): used.append(None) return used def add_custom_attribute(self, attribute_name, display_name, type_name="bool", default_value=False, predicate: Callable[[Any], bool] = None): """ Add custom attribute with placeholder. """ self._schema_attr_base.add(attribute_name) self._custom_attributes.update( {attribute_name: CustomAttributeInfo(attribute_name, display_name, type_name, default_value, predicate)} ) self.request_rebuild() def remove_custom_attribute(self, attribute_name): self._schema_attr_base.remove(attribute_name) del self._custom_attributes[attribute_name] self.request_rebuild() def _is_prim_single_sided_supported(self, prim): return ( prim.IsA(UsdGeom.Mesh) or prim.IsA(UsdGeom.Cylinder) or prim.IsA(UsdGeom.Capsule) or prim.IsA(UsdGeom.Cone) or prim.IsA(UsdGeom.Sphere) or prim.IsA(UsdGeom.Cube) ) def _customize_props_layout(self, attrs): self.add_custom_schema_attributes_to_props(attrs) for schema_name, attr_info in self._custom_attributes.items(): if schema_name in self._custom_placeholders: attrs.append( UsdPropertyUiEntry( schema_name, "", attr_info.get_metadata(), Usd.Attribute, ) ) frame = CustomLayoutFrame(hide_extra=True) with frame: for schema_name, attr_info in self._custom_attributes.items(): CustomLayoutProperty(schema_name, attr_info.display_name) # OMFP-1917: Most Visual settings under the Property tab don't work # Hiding doubleSided, singleSided, primvars:displayColor, primvars:displayOpacity CustomLayoutProperty("doubleSided", "Double Sided", hide_if_true=True) CustomLayoutProperty("singleSided", "Single Sided", hide_if_true=True) CustomLayoutProperty("purpose", "Purpose") CustomLayoutProperty("visibility", "Visibility") CustomLayoutProperty("primvars:displayColor", "Display Color") CustomLayoutProperty("primvars:displayOpacity", "Display Opacity") return frame.apply(attrs)
omniverse-code/kit/exts/omni.kit.property.geometry/omni/kit/property/geometry/scripts/__init__.py
from .geometry_properties import * from .geometry_commands import *
omniverse-code/kit/exts/omni.kit.property.geometry/omni/kit/property/geometry/scripts/geometry_commands.py
import carb import omni.kit.commands from typing import List, Optional, Any from pxr import Usd, Sdf, UsdGeom class PrimVarCommand(omni.kit.commands.Command): """ Set primvar undoable **Command**. Args: prim_path (list): List of paths of prims. prim_name (str): Primvar name. prim_type (): Primvar variable type (EG. Sdf.ValueTypeNames.Bool) value (any): New primvar value. If primvar doesn't exist, it will be created """ def __init__( self, prim_path: List[str], prim_name: str, prim_type: str, value: Any, usd_context_name: Optional[str] = "", ): self._prim_path = prim_path self._prim_name = prim_name self._prim_type = prim_type self._value = value self._usd_context = omni.usd.get_context(usd_context_name) self._undo_values = {} def do(self): stage = self._usd_context.get_stage() for path in self._prim_path: if path: primvars_api = UsdGeom.PrimvarsAPI(stage.GetPrimAtPath(path)) value = primvars_api.GetPrimvar(self._prim_name) if value: if value.GetTypeName() != self._prim_type: carb.log_error(f"PrimVarCommand: cannot set value as {path}.{self._prim_name} is type {value.GetTypeName()} and expected type is {self._prim_type}") else: self._undo_values[str(path)] = value.Get() value.Set(self._value) else: self._undo_values[str(path)] = None primvars_api.CreatePrimvar(self._prim_name, self._prim_type).Set(self._value) def undo(self): stage = self._usd_context.get_stage() for path in self._undo_values.keys(): primvars_api = UsdGeom.PrimvarsAPI(stage.GetPrimAtPath(path)) value = primvars_api.GetPrimvar(self._prim_name) orig_value = self._undo_values[path] if orig_value: value.Set(orig_value) else: primvars_api.RemovePrimvar(self._prim_name) self._undo_values = {} class TogglePrimVarCommand(omni.kit.commands.Command): """ Toggle primvar undoable **Command**. Args: prim_path (list): List of paths of prims. prim_name (str): Primvar name. """ def __init__( self, prim_path: List[str], prim_name: str, usd_context_name: Optional[str] = "", ): self._prim_path = prim_path self._prim_name = prim_name self._usd_context = omni.usd.get_context(usd_context_name) self._undo_values = {} def do(self): stage = self._usd_context.get_stage() for path in self._prim_path: if path: primvars_api = UsdGeom.PrimvarsAPI(stage.GetPrimAtPath(path)) value = primvars_api.GetPrimvar(self._prim_name) if value: if value.GetTypeName() != Sdf.ValueTypeNames.Bool: carb.log_error(f"TogglePrimVarCommand: cannot set value as {value.GetTypeName()} isn't a {self._prim_type}") else: self._undo_values[str(path)] = value.Get() value.Set(not value.Get()) else: self._undo_values[path] = None primvars_api.CreatePrimvar(self._prim_name, Sdf.ValueTypeNames.Bool).Set(True) def undo(self): stage = self._usd_context.get_stage() for path in self._undo_values.keys(): primvars_api = UsdGeom.PrimvarsAPI(stage.GetPrimAtPath(path)) value = primvars_api.GetPrimvar(self._prim_name) orig_value = self._undo_values[path] if orig_value: value.Set(orig_value) else: primvars_api.RemovePrimvar(self._prim_name) self._undo_values = {} class ToggleInstanceableCommand(omni.kit.commands.Command): """ Toggle instanceable undoable **Command**. Args: prim_path (list): List of paths of prims. """ def __init__( self, prim_path: List[str], usd_context_name: Optional[str] = "", ): self._prim_path = prim_path self._usd_context = omni.usd.get_context(usd_context_name) self._undo_values = {} def do(self): stage = self._usd_context.get_stage() for path in self._prim_path: if path: prim = stage.GetPrimAtPath(path) value = prim.IsInstanceable() self._undo_values[str(path)] = value prim.SetInstanceable(not value) def undo(self): stage = self._usd_context.get_stage() for path in self._undo_values.keys(): prim = stage.GetPrimAtPath(path) value = self._undo_values[path] prim.SetInstanceable(value) self._undo_values = {}
omniverse-code/kit/exts/omni.kit.property.geometry/omni/kit/property/geometry/scripts/prim_kind_widget.py
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # import carb import omni.ui as ui import omni.usd import carb from omni.kit.window.property.templates import SimplePropertyWidget, LABEL_WIDTH, LABEL_HEIGHT, HORIZONTAL_SPACING from omni.kit.property.usd.usd_property_widget import UsdPropertiesWidgetBuilder, UsdPropertiesWidget from omni.kit.property.usd.usd_object_model import MetadataObjectModel from pxr import Kind, Usd, UsdGeom class Constant: def __setattr__(self, name, value): raise Exception(f"Can't change Constant.{name}") # pragma: no cover FONT_SIZE = 14.0 MIXED = "Mixed" MIXED_COLOR = 0xFFCC9E61 class PrimKindWidget(UsdPropertiesWidget): def __init__(self): super().__init__(title="Kind", collapsed=False) self._metadata_model = None def on_new_payload(self, payload): """ See PropertyWidget.on_new_payload """ if not super().on_new_payload(payload): # pragma: no cover return False # pragma: no cover if len(self._payload) == 0: return False for prim_path in self._payload: # pragma: no cover prim = self._get_prim(prim_path) # pragma: no cover if not prim or not prim.IsA(UsdGeom.Imageable): # pragma: no cover return False return True def reset(self): super().reset() if self._metadata_model: self._metadata_model.clean() self._metadata_model = None def build_items(self): super().build_items() # get Kinds all_kinds = Kind.Registry.GetAllKinds() all_kinds.insert(0, "") # http://graphics.pixar.com/usd/docs/USD-Glossary.html#USDGlossary-Kind # "model" is considered an abstract type and should not be assigned as any prim's kind. all_kinds.remove(Kind.Tokens.model) kind = None ambiguous = False stage = self._payload.get_stage() for path in self._payload: prim = stage.GetPrimAtPath(path) if prim: prim_kind = Usd.ModelAPI(prim).GetKind() if kind == None: kind = prim_kind elif kind != prim_kind: kind = "mixed" if prim_kind not in all_kinds: # pragma: no cover all_kinds.append(prim_kind) # pragma: no cover carb.log_verbose(f"{path} has invalid Kind:{prim_kind}") # pragma: no cover if kind == None: # pragma: no cover return # pragma: no cover if self._filter.matches("Kind"): self._any_item_visible = True highlight = self._filter.name with ui.HStack(spacing=HORIZONTAL_SPACING): UsdPropertiesWidgetBuilder._create_label("Kind", {}, {"highlight": highlight}) with ui.ZStack(): self._metadata_model = MetadataObjectModel( stage, [path for path in self._payload], False, {}, key="kind", default="", options=all_kinds ) value_widget = ui.ComboBox(self._metadata_model, name="choices") mixed_overlay = UsdPropertiesWidgetBuilder._create_mixed_text_overlay() UsdPropertiesWidgetBuilder._create_control_state(self._metadata_model, value_widget, mixed_overlay) def _get_shared_properties_from_selected_prims(self, anchor_prim): return None def _get_prim(self, prim_path): if prim_path: stage = self._payload.get_stage() if stage: return stage.GetPrimAtPath(prim_path) return None # pragma: no cover
omniverse-code/kit/exts/omni.kit.property.geometry/omni/kit/property/geometry/tests/test_path_toggle.py
## Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. ## ## NVIDIA CORPORATION and its licensors retain all intellectual property ## and proprietary rights in and to this software, related documentation ## and any modifications thereto. Any use, reproduction, disclosure or ## distribution of this software and related documentation without an express ## license agreement from NVIDIA CORPORATION is strictly prohibited. ## import omni.kit.test import os import omni.kit.app import omni.usd from omni.kit.test.async_unittest import AsyncTestCase from omni.kit import ui_test from pxr import Gf from omni.kit.test_suite.helpers import open_stage, get_test_data_path, select_prims, wait_stage_loading, arrange_windows class PropertyPathAddMenu(AsyncTestCase): # Before running each test async def setUp(self): await arrange_windows("Stage", 64) await open_stage(get_test_data_path(__name__, "geometry_test.usda")) # After running each test async def tearDown(self): await wait_stage_loading() async def test_property_path_rendering(self): await ui_test.find("Property").focus() usd_context = omni.usd.get_context() stage = usd_context.get_stage() # select cube await select_prims(["/World/Cube"]) await ui_test.human_delay() # verify not set prim = stage.GetPrimAtPath("/World/Cube") attr = prim.GetAttribute("primvars:wireframe") self.assertFalse(attr.IsValid()) # click "Add" add_widget = [w for w in ui_test.find_all("Property//Frame/**/Button[*].identifier==''") if w.widget.text.endswith("Add")][0] await add_widget.click() # select wireframe await ui_test.select_context_menu("Rendering/Set Wireframe Mode") # verify set self.assertTrue(attr.IsValid()) self.assertTrue(attr.Get()) # undo omni.kit.undo.undo() # verify not set self.assertFalse(attr.IsValid())
omniverse-code/kit/exts/omni.kit.property.geometry/omni/kit/property/geometry/tests/__init__.py
from .test_geometry import * from .test_commands import * from .test_path_toggle import *
omniverse-code/kit/exts/omni.kit.property.geometry/omni/kit/property/geometry/tests/test_commands.py
## Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. ## ## NVIDIA CORPORATION and its licensors retain all intellectual property ## and proprietary rights in and to this software, related documentation ## and any modifications thereto. Any use, reproduction, disclosure or ## distribution of this software and related documentation without an express ## license agreement from NVIDIA CORPORATION is strictly prohibited. ## import pathlib import omni.kit.app import omni.kit.commands import omni.kit.test import omni.ui as ui from omni.ui.tests.test_base import OmniUiTest from omni.kit.test_suite.helpers import open_stage, get_test_data_path from omni.kit import ui_test from pxr import Sdf class TestCommandWidget(OmniUiTest): # Before running each test async def setUp(self): await super().setUp() await open_stage(get_test_data_path(__name__, "geometry_test.usda")) # After running each test async def tearDown(self): await super().tearDown() async def test_command_prim_var(self): stage = omni.usd.get_context().get_stage() prim = stage.GetPrimAtPath("/World/Cube") attr = prim.GetAttribute('primvars:test_int') self.assertFalse(attr.IsValid()) # create primvar as int omni.kit.commands.execute("PrimVarCommand", prim_path=["/World/Cube"], prim_name="test_int", prim_type=Sdf.ValueTypeNames.Int, value=123456) attr = prim.GetAttribute('primvars:test_int') self.assertTrue(attr.IsValid()) self.assertEqual(attr.Get(), 123456) # try and change using bool omni.kit.commands.execute("PrimVarCommand", prim_path=["/World/Cube"], prim_name="test_int", prim_type=Sdf.ValueTypeNames.Bool, value=True) attr = prim.GetAttribute('primvars:test_int') self.assertTrue(attr.IsValid()) self.assertEqual(attr.Get(), 123456) # change primvar omni.kit.commands.execute("PrimVarCommand", prim_path=["/World/Cube"], prim_name="test_int", prim_type=Sdf.ValueTypeNames.Int, value=654321) attr = prim.GetAttribute('primvars:test_int') self.assertTrue(attr.IsValid()) self.assertEqual(attr.Get(), 654321) # undo omni.kit.undo.undo() omni.kit.undo.undo() omni.kit.undo.undo() # verify undo removed primvar attr = prim.GetAttribute('primvars:test_int') self.assertFalse(attr.IsValid()) # create primvar as bool omni.kit.commands.execute("PrimVarCommand", prim_path=["/World/Cube"], prim_name="test_bool", prim_type=Sdf.ValueTypeNames.Bool, value=True) attr = prim.GetAttribute('primvars:test_bool') self.assertTrue(attr.IsValid()) self.assertEqual(attr.Get(), True) # try and change using int omni.kit.commands.execute("PrimVarCommand", prim_path=["/World/Cube"], prim_name="test_bool", prim_type=Sdf.ValueTypeNames.Int, value=123456) attr = prim.GetAttribute('primvars:test_bool') self.assertTrue(attr.IsValid()) self.assertEqual(attr.Get(), True) # change primvar omni.kit.commands.execute("PrimVarCommand", prim_path=["/World/Cube"], prim_name="test_bool", prim_type=Sdf.ValueTypeNames.Bool, value=False) attr = prim.GetAttribute('primvars:test_bool') self.assertTrue(attr.IsValid()) self.assertEqual(attr.Get(), False) # undo omni.kit.undo.undo() omni.kit.undo.undo() omni.kit.undo.undo() # verify undo removed primvar attr = prim.GetAttribute('primvars:test_bool') self.assertFalse(attr.IsValid()) async def test_command_toggle_prim_var(self): stage = omni.usd.get_context().get_stage() prim = stage.GetPrimAtPath("/World/Cube") attr = prim.GetAttribute('primvars:test_bool') self.assertFalse(attr.IsValid()) # create primvar as bool omni.kit.commands.execute("TogglePrimVarCommand", prim_path=["/World/Cube"], prim_name="test_bool") attr = prim.GetAttribute('primvars:test_bool') self.assertTrue(attr.IsValid()) self.assertEqual(attr.Get(), True) # try and change using int omni.kit.commands.execute("PrimVarCommand", prim_path=["/World/Cube"], prim_name="test_bool", prim_type=Sdf.ValueTypeNames.Int, value=123456) attr = prim.GetAttribute('primvars:test_bool') self.assertTrue(attr.IsValid()) self.assertEqual(attr.Get(), True) # change primvar omni.kit.commands.execute("TogglePrimVarCommand", prim_path=["/World/Cube"], prim_name="test_bool") attr = prim.GetAttribute('primvars:test_bool') self.assertTrue(attr.IsValid()) self.assertEqual(attr.Get(), False) # undo omni.kit.undo.undo() omni.kit.undo.undo() omni.kit.undo.undo() # verify undo removed primvar attr = prim.GetAttribute('primvars:test_bool') self.assertFalse(attr.IsValid()) async def test_command_toggle_instanceable(self): stage = omni.usd.get_context().get_stage() prim = stage.GetPrimAtPath("/World/Cube") self.assertFalse(prim.IsInstanceable()) # toggle instanceable omni.kit.commands.execute("ToggleInstanceableCommand", prim_path=["/World/Cube"]) self.assertTrue(prim.IsInstanceable()) # toggle instanceable omni.kit.commands.execute("ToggleInstanceableCommand", prim_path=["/World/Cube"]) self.assertFalse(prim.IsInstanceable()) # undo omni.kit.undo.undo() omni.kit.undo.undo() omni.kit.undo.undo() # verify undo self.assertFalse(prim.IsInstanceable())
omniverse-code/kit/exts/omni.kit.property.geometry/omni/kit/property/geometry/tests/test_geometry.py
## Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. ## ## NVIDIA CORPORATION and its licensors retain all intellectual property ## and proprietary rights in and to this software, related documentation ## and any modifications thereto. Any use, reproduction, disclosure or ## distribution of this software and related documentation without an express ## license agreement from NVIDIA CORPORATION is strictly prohibited. ## import omni.kit.app import omni.kit.commands import omni.kit.test import omni.ui as ui from omni.ui.tests.test_base import OmniUiTest from omni.kit import ui_test from pxr import Kind, Sdf, Gf, UsdGeom from omni.kit.property.geometry import geometry_properties import pathlib class TestGeometryWidget(OmniUiTest): # Before running each test async def setUp(self): await super().setUp() from omni.kit.property.geometry.scripts.geometry_properties import TEST_DATA_PATH self._golden_img_dir = TEST_DATA_PATH.absolute().joinpath("golden_img").absolute() self._usd_path = TEST_DATA_PATH.absolute() from omni.kit.property.usd.usd_attribute_widget import UsdPropertiesWidget import omni.kit.window.property as p self._w = p.get_window() # After running each test async def tearDown(self): await super().tearDown() async def test_geometry_ui(self): usd_context = omni.usd.get_context() await self.docked_test_window( window=self._w._window, width=450, height=700, restore_window = ui.Workspace.get_window("Layer") or ui.Workspace.get_window("Stage"), restore_position = ui.DockPosition.BOTTOM) test_file_path = self._usd_path.joinpath("geometry_test.usda").absolute() await usd_context.open_stage_async(str(test_file_path)) await omni.kit.app.get_app().next_update_async() # Select the prim. usd_context.get_selection().set_selected_prim_paths(["/World/Cube"], True) # Need to wait for an additional frames for omni.ui rebuild to take effect await ui_test.human_delay(10) await self.finalize_test(golden_img_dir=self._golden_img_dir, golden_img_name="test_geometry_ui.png") async def test_geometry_mixed_ui(self): usd_context = omni.usd.get_context() await self.docked_test_window( window=self._w._window, width=450, height=700, restore_window = ui.Workspace.get_window("Layer") or ui.Workspace.get_window("Stage"), restore_position = ui.DockPosition.BOTTOM) test_file_path = self._usd_path.joinpath("geometry_test.usda").absolute() await usd_context.open_stage_async(str(test_file_path)) await omni.kit.app.get_app().next_update_async() # Select the prim. usd_context.get_selection().set_selected_prim_paths(["/World/Cube", "/World/Looks"], True) # Need to wait for an additional frames for omni.ui rebuild to take effect await ui_test.human_delay(10) await self.finalize_test(golden_img_dir=self._golden_img_dir, golden_img_name="test_geometry_mixed_ui.png") async def test_custom_visual_attribute_ui(self): usd_context = omni.usd.get_context() await self.docked_test_window( window=self._w._window, width=450, height=800, restore_window = ui.Workspace.get_window("Layer") or ui.Workspace.get_window("Stage"), restore_position = ui.DockPosition.BOTTOM) test_file_path = self._usd_path.joinpath("geometry_test.usda").absolute() await usd_context.open_stage_async(str(test_file_path)) await omni.kit.app.get_app().next_update_async() inst = geometry_properties.get_instance() self.assertIsNotNone(inst) def is_cube(prim): return prim.IsA(UsdGeom.Cube) inst.register_custom_visual_attribute("foo", "Foo", "bool", False, is_cube) inst.register_custom_visual_attribute("bar", "Bar", "int", 1234, is_cube) # Select the prim. usd_context.get_selection().set_selected_prim_paths(["/World/Cube"], True) # Need to wait for an additional frames for omni.ui rebuild to take effect await ui_test.human_delay(10) await omni.kit.app.get_app().next_update_async() await self.finalize_test(golden_img_dir=self._golden_img_dir, golden_img_name="test_geometry_ui_custom.png") # Clean up inst.deregister_custom_visual_attribute("foo") inst.deregister_custom_visual_attribute("bar") await ui_test.human_delay(10) await omni.kit.app.get_app().next_update_async()
omniverse-code/kit/exts/omni.kit.property.geometry/docs/CHANGELOG.md
# Changelog The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). ## [1.2.2] - 2022-09-27 ### Changes - Changed primvars:numSplits* text ## [1.2.1] - 2022-05-13 ### Changes - Cleaned up ImageWithProvider vs Image usage ## [1.2.0] - 2021-05-31 ### Added - Added extent regeneration on size/radius/axis changes ## [1.1.0] - 2021-03-19 ### Added - Added soft range [0, 5] for refinementLevel. ## [1.0.7] - 2021-02-19 ### Changes - Added UI test ## [1.0.6] - 2020-12-09 ### Changes - Added extension icon - Added readme - Updated preview image ## [1.0.5] - 2020-11-20 ### Changes - Silenced unknown kind warning ## [1.0.4] - 2020-11-06 ### Changes - Update Kind to use metadata model ## [1.0.3] - 2020-10-27 ### Changes - Fixed spacing on kind widget ## [1.0.2] - 2020-10-22 ### Changes - Improved layout ## [1.0.1] - 2020-10-22 ### Changes - Moved schema into bundle ## [1.0.0] - 2020-10-05 ### Changes - Сreated
omniverse-code/kit/exts/omni.kit.property.geometry/docs/README.md
# omni.kit.property.geometry ## Introduction Property window extensions are for viewing and editing Usd Prim Attributes ## This extension supports editing of these Usd Types; - UsdGeom.BasisCurves - UsdGeom.Capsule - UsdGeom.Cone - UsdGeom.Cube - UsdGeom.Cylinder - UsdGeom.HermiteCurves - UsdGeom.Mesh - UsdGeom.NurbsCurves - UsdGeom.NurbsPatch - UsdGeom.PointInstancer - UsdGeom.Points - UsdGeom.Subset - UsdGeom.Sphere - UsdGeom.Xform - UsdGeom.Gprim - UsdGeom.PointBased - UsdGeom.Boundable - UsdGeom.Curves - UsdGeom.Imageable - UsdGeom.PointBased - UsdUI.Backdrop ### and supports editing of these Usd APIs; - UsdGeom.ModelAPI - UsdGeom.MotionAPI - UsdGeom.PrimvarsAPI - UsdGeom.XformCommonAPI - UsdGeom.ModelAPI - UsdUI.NodeGraphNodeAPI - UsdUI.SceneGraphPrimAPI
omniverse-code/kit/exts/omni.kit.property.geometry/docs/index.rst
omni.kit.property.geometry ########################### Property Geometry Values .. toctree:: :maxdepth: 1 CHANGELOG
omniverse-code/kit/exts/omni.kit.property.geometry/data/tests/geometry_test.usda
#usda 1.0 ( defaultPrim = "World" endTimeCode = 100 metersPerUnit = 0.009999999776482582 startTimeCode = 0 timeCodesPerSecond = 24 upAxis = "Y" ) def Xform "World" { def DistantLight "defaultLight" ( prepend apiSchemas = ["ShapingAPI"] ) { float angle = 1 float intensity = 3000 float shaping:cone:angle = 180 float shaping:cone:softness float shaping:focus color3f shaping:focusTint asset shaping:ies:file double3 xformOp:rotateXYZ = (315, 0, 0) double3 xformOp:scale = (1, 1, 1) double3 xformOp:translate = (0, 0, 0) uniform token[] xformOpOrder = ["xformOp:translate", "xformOp:rotateXYZ", "xformOp:scale"] } def Cube "Cube" ( kind = "component" ) { float3[] extent = [(-50, -50, -50), (50, 50, 50)] rel material:binding = </World/Looks/PreviewSurface> ( bindMaterialAs = "weakerThanDescendants" ) uniform token purpose = "render" double size = 100 double3 xformOp:rotateXYZ = (10, 20, 30) double3 xformOp:scale = (2, 3, 4) double3 xformOp:translate = (110, 45, 0) uniform token[] xformOpOrder = ["xformOp:translate", "xformOp:rotateXYZ", "xformOp:scale"] } def Scope "Looks" { def Material "PreviewSurface" { token outputs:surface.connect = </World/Looks/PreviewSurface/Shader.outputs:surface> def Shader "Shader" { reorder properties = ["inputs:diffuseColor", "inputs:emissiveColor", "inputs:useSpecularWorkflow", "inputs:specularColor", "inputs:metallic", "inputs:roughness", "inputs:clearcoat", "inputs:clearcoatRoughness", "inputs:opacity", "inputs:opacityThreshold", "inputs:ior", "inputs:normal", "inputs:displacement", "inputs:occlusion", "outputs:surface", "outputs:displacement"] uniform token info:id = "UsdPreviewSurface" float inputs:clearcoat = 0 float inputs:clearcoatRoughness = 0.01 color3f inputs:diffuseColor = (0.18, 0.18, 0.18) float inputs:displacement = 0 color3f inputs:emissiveColor = (0, 0, 0) float inputs:ior = 1.5 float inputs:metallic = 0 normal3f inputs:normal = (0, 0, 1) float inputs:occlusion = 1 float inputs:opacity = 1 float inputs:opacityThreshold = 0 float inputs:roughness = 0.5 ( customData = { dictionary range = { double max = 1 double min = 0 } } ) color3f inputs:specularColor = (0, 0, 0) int inputs:useSpecularWorkflow = 0 ( customData = { dictionary range = { int max = 1 int min = 0 } } ) token outputs:displacement token outputs:surface } } } }
omniverse-code/kit/exts/omni.kit.viewport.menubar.display/omni/kit/viewport/menubar/display/style.py
from pathlib import Path CURRENT_PATH = Path(__file__).parent ICON_PATH = CURRENT_PATH.parent.parent.parent.parent.parent.joinpath("data").joinpath("icons") UI_STYLE = {"Menu.Item.Icon::Display": {"image_url": f"{ICON_PATH}/viewport_visibility.svg"}}
omniverse-code/kit/exts/omni.kit.viewport.menubar.display/omni/kit/viewport/menubar/display/extension.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # __all__ = ["ViewportDisplayMenuBarExtension", "get_instance"] from typing import Union from omni.kit.viewport.menubar.core import BaseCategoryItem from .display_menu_container import DEFAULT_SECTION, DisplayMenuContainer import omni.ext _extension_instance = None def get_instance(): global _extension_instance return _extension_instance class ViewportDisplayMenuBarExtension(omni.ext.IExt): """The Entry Point for the Display Settings in Viewport Menu Bar""" def on_startup(self, ext_id): self._display_menu = DisplayMenuContainer() global _extension_instance _extension_instance = self def on_shutdown(self): self._display_menu.destroy() self._display_menu = None global _extension_instance _extension_instance = None def register_custom_setting(self, text: str, setting_path: str): """ Register custom display setting. Args: text (str): Text shown in menu item. setting_path (str): Setting path for custom display setting (bool value). """ if self._display_menu: self._display_menu.register_custom_setting(text, setting_path) def deregister_custom_setting(self, text: str): """ Deregister custom display setting. Args: text (str): Text shown in menu item. """ if self._display_menu: self._display_menu.deregister_custom_setting(text) def register_custom_category_item(self, category: str, item: BaseCategoryItem, section: str = DEFAULT_SECTION): """ Register custom display setting in category. Args: category (str): Category to add menu item. Can be an existing category e.g. "Heads Up Display" or a new one. item (item: BaseCategoryItem): Item to append. section (str): Optional section to organise category, default no section. """ if self._display_menu: self._display_menu.register_custom_category_item(category, item, section) def deregister_custom_category_item(self, category: str, item: BaseCategoryItem): """ Deregister custom display setting in category. Args: category (str): Category to remove menu item. Can be an existing category e.g. "Heads Up Display" or a new one. item (item: BaseCategoryItem): Item to remove. """ if self._display_menu: self._display_menu.deregister_custom_category_item(category, item)
omniverse-code/kit/exts/omni.kit.viewport.menubar.display/omni/kit/viewport/menubar/display/__init__.py
from .extension import *
omniverse-code/kit/exts/omni.kit.viewport.menubar.display/omni/kit/viewport/menubar/display/model.py
import omni.ui as ui class DisplayLayerModel(ui.SimpleBoolModel): def __init__(self, layer) -> None: self._layer = layer super().__init__() def get_value_as_bool(self) -> bool: return self._layer.visible def set_value(self, visible: bool): if visible != self._layer.visible: self._layer.visible = visible self._value_changed() def begin_edit(self) -> None: pass def end_edit(self) -> None: pass
omniverse-code/kit/exts/omni.kit.viewport.menubar.display/omni/kit/viewport/menubar/display/display_menu_container.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # __all__ = ["DisplayMenuContainer"] from omni.kit.viewport.menubar.core import ( IconMenuDelegate, SettingModel, ViewportMenuContainer, CategoryMenuContainer, SelectableMenuItem, SimpleCategoryModel, CategoryStateItem, BaseCategoryItem, CategoryCustomItem, CategoryCollectionItem ) from .style import UI_STYLE import carb import carb.settings import omni.ui as ui import omni.kit.app import omni.usd from functools import partial from typing import Dict, List, Optional SHOW_BY_TYPE_EXCLUDE_LIST = "/exts/omni.kit.viewport.menubar.display/showByType/exclude_list" HEADS_UP_CATEGORY_NAME = "Heads Up Display" SHOW_BY_TYPE_CATEGORY_NAME = "Show By Type" SHOW_BY_PURPOSE_CATEGORY_NAME = "Show By Purpose" DEFAULT_CATEGORIES = [HEADS_UP_CATEGORY_NAME, SHOW_BY_TYPE_CATEGORY_NAME, SHOW_BY_PURPOSE_CATEGORY_NAME] DEFAULT_SECTION = "default" def _make_viewport_setting(viewport_api_id: str, setting: str): return f"/persistent/app/viewport/{viewport_api_id}/{setting}/visible" class DisplayMenuContainer(ViewportMenuContainer): """The menu with the visibility settings""" def __init__(self): super().__init__( name="Display", delegate=IconMenuDelegate("Display"), visible_setting_path="/exts/omni.kit.viewport.menubar.display/visible", order_setting_path="/exts/omni.kit.viewport.menubar.display/order", style=UI_STYLE ) self._root_menu: Optional[ui.Menu] = None self._category_models: Dict[str, SimpleCategoryModel] = {} self._custom_settings: List[List[str, str]] = [] self._custom_category_items: Dict[str, List[BaseCategoryItem]] = {} self._section_categories: Dict[str, List[str]] = {} self._section_categories[DEFAULT_SECTION] = DEFAULT_CATEGORIES[:] # Copy the default categories list def destroy(self): super().destroy() def register_custom_setting(self, text: str, setting_path: str): self._custom_settings.append((text, setting_path)) if self._root_menu: self._root_menu.invalidate() def deregister_custom_setting(self, text: str): found = [item for item in self._custom_settings if item[0] == text] if found: for item in found: self._custom_settings.remove(item) if self._root_menu: self._root_menu.invalidate() def register_custom_category_item(self, category: str, item: BaseCategoryItem, section: str): is_top_category = False if category not in DEFAULT_CATEGORIES and category not in self._category_models: if item.text == category and isinstance(item, CategoryCollectionItem): self._category_models[category] = SimpleCategoryModel(category, root=item) is_top_category = True else: self._category_models[category] = SimpleCategoryModel(category) if category not in self._custom_category_items: self._custom_category_items[category] = [] if section not in self._section_categories: self._section_categories[section] = [] if not is_top_category: self._custom_category_items[category].append(item) if category not in self._section_categories[section]: self._section_categories[section].append(category) if self._root_menu: self._root_menu.invalidate() def deregister_custom_category_item(self, category: str, item: BaseCategoryItem): if category in self._custom_category_items: if item in self._custom_category_items[category]: self._custom_category_items[category].remove(item) if category not in DEFAULT_CATEGORIES: if (item.text == category and isinstance(item, CategoryCollectionItem)) or len(self._custom_category_items[category]) == 0: del self._category_models[category] # Now clean up section sections = list(self._section_categories.keys()) for section in sections: if category in self._section_categories[section]: self._section_categories[section].remove(category) if len(self._section_categories[section]) == 0: del self._section_categories[section] if self._root_menu: self._root_menu.invalidate() def build_fn(self, viewport_context: dict): self._root_menu = ui.Menu(self.name, delegate=self._delegate, on_build_fn=partial(self._build_menu_items, viewport_context), style=self._style) def _build_menu_items(self, viewport_context: dict, *args, **kwargs): viewport = viewport_context.get("viewport_api") viewport_api_id: str = str(viewport.id) settings = carb.settings.get_settings() show_by_type_items: list[BaseCategoryItem] = [ CategoryStateItem("Cameras", setting_path=_make_viewport_setting(viewport_api_id, "scene/cameras")), CategoryStateItem("Lights", setting_path=_make_viewport_setting(viewport_api_id, "scene/lights")), CategoryStateItem("Skeletons", setting_path=_make_viewport_setting(viewport_api_id, "scene/skeletons")), CategoryStateItem("Audio", setting_path=_make_viewport_setting(viewport_api_id, "scene/audio")), ] if (exclude_list := settings.get(SHOW_BY_TYPE_EXCLUDE_LIST)): show_by_type_items = [item for item in show_by_type_items if item.text not in exclude_list] # 105.1: Support alternate label of memory (i.e. "Host Memory", "Process Memory", "Memory") # Defaults to pre 105.1 label (Host Memory) when not specified mem_label = settings.get("/exts/omni.kit.viewport.window/hud/hostMemory/label") if mem_label is None: mem_label = "Host" default_category_models = { HEADS_UP_CATEGORY_NAME: SimpleCategoryModel( HEADS_UP_CATEGORY_NAME, [ CategoryStateItem("FPS", setting_path=_make_viewport_setting(viewport_api_id, "hud/renderFPS")), CategoryStateItem("Device Memory", setting_path=_make_viewport_setting(viewport_api_id, "hud/deviceMemory")), CategoryStateItem(f"{mem_label} Memory", setting_path=_make_viewport_setting(viewport_api_id, "hud/hostMemory")), CategoryStateItem("Resolution", setting_path=_make_viewport_setting(viewport_api_id, "hud/renderResolution")), CategoryStateItem("Progress", setting_path=_make_viewport_setting(viewport_api_id, "hud/renderProgress")), ] ), SHOW_BY_TYPE_CATEGORY_NAME: SimpleCategoryModel( SHOW_BY_TYPE_CATEGORY_NAME, show_by_type_items ), SHOW_BY_PURPOSE_CATEGORY_NAME: SimpleCategoryModel( SHOW_BY_PURPOSE_CATEGORY_NAME, [ CategoryStateItem("Guide", setting_path="/persistent/app/hydra/displayPurpose/guide"), CategoryStateItem("Proxy", setting_path="/persistent/app/hydra/displayPurpose/proxy"), CategoryStateItem("Render", setting_path="/persistent/app/hydra/displayPurpose/render"), ] ) } self._category_models.update(default_category_models) # XXX: These add_item calls currently must occur to add the separator! self._category_models[SHOW_BY_TYPE_CATEGORY_NAME].add_item(CategoryCustomItem( "Meshes", lambda: SelectableMenuItem("Meshes", SettingModel(setting_path=_make_viewport_setting(viewport_api_id, "scene/meshes"))) )) self._category_models[HEADS_UP_CATEGORY_NAME].add_item(CategoryCustomItem( "Camera Speed", lambda: SelectableMenuItem("Camera Speed", SettingModel(_make_viewport_setting(viewport_api_id, "hud/cameraSpeed"))) )) identifier = "omni.kit.viewport.menubar.display" # Create default section categories first for name in self._section_categories[DEFAULT_SECTION]: model = self._category_models[name] if name in self._custom_category_items: for item in self._custom_category_items[name]: model.add_item(item) # XXX: Workaround nested creation of these items not being able to trigger an action! trigger_fns = None if name == SHOW_BY_TYPE_CATEGORY_NAME: icon_click_id = f"{identifier}.{name}.{name}" # Left-most check/mixed icon was toggled trigger_fns = { "Cameras": partial(self.__trigger_action, "toggle_camera_visibility", viewport_api=viewport), "Lights": partial(self.__trigger_action, "toggle_light_visibility", viewport_api=viewport), "Skeletons": partial(self.__trigger_action, "toggle_skeleton_visibility", viewport_api=viewport), "Audio": partial(self.__trigger_action, "toggle_audio_visibility", viewport_api=viewport), "Meshes": partial(self.__trigger_action, "toggle_mesh_visibility", viewport_api=viewport), icon_click_id: partial(self.__trigger_action, "toggle_show_by_type_visibility", viewport_api=viewport), } CategoryMenuContainer(model, identifier=f"{identifier}.{name}", trigger_fns=trigger_fns) # Now iterate named sections, with a separator for each. for section, categories in self._section_categories.items(): if section is DEFAULT_SECTION: continue ui.Separator(text=section) for name in categories: model = self._category_models[name] if name in self._custom_category_items: for item in self._custom_category_items[name]: model.add_item(item) CategoryMenuContainer(model, identifier=f"{identifier}.{name}") ui.Separator() # This currently is just easier tied to legacy global setting SelectableMenuItem("Selection Outline", SettingModel(_make_viewport_setting(viewport_api_id, "guide/selection")), triggered_fn=partial(self.__trigger_action, "toggle_selection_hilight_visibility", viewport_api=viewport), trigger_will_set_model=True ) SelectableMenuItem("Axis", SettingModel(_make_viewport_setting(viewport_api_id, "guide/axis")), triggered_fn=partial(self.__trigger_action, "toggle_axis_visibility", viewport_api=viewport), trigger_will_set_model=True ) SelectableMenuItem("Grid", SettingModel(_make_viewport_setting(viewport_api_id, "guide/grid")), triggered_fn=partial(self.__trigger_action, "toggle_grid_visibility", viewport_api=viewport), trigger_will_set_model=True ) # Custom display settings if self._custom_settings: ui.Separator() for (text, setting_path) in self._custom_settings: SelectableMenuItem(text, SettingModel(setting_path)) def __trigger_action(self, action: str, *args, **kwargs): import omni.kit.actions.core action_registry = omni.kit.actions.core.get_action_registry() if action_registry: exc_action = action_registry.get_action("omni.kit.viewport.actions", action) if exc_action: exc_action.execute(*args, **kwargs) else: carb.log_error(f"Could not find action to run: '{action}'") else: carb.log_error(f"Could not get action_registry to run '{action}")
omniverse-code/kit/exts/omni.kit.viewport.menubar.display/omni/kit/viewport/menubar/display/tests/__init__.py
from .test_ui import *
omniverse-code/kit/exts/omni.kit.viewport.menubar.display/omni/kit/viewport/menubar/display/tests/test_ui.py
import omni.kit.test from re import I from omni.ui.tests.test_base import OmniUiTest import omni.kit.ui_test as ui_test from omni.kit.ui_test import Vec2 import omni.usd import omni.kit.app from pathlib import Path import carb.input import asyncio import omni.ui as ui from omni.kit.viewport.menubar.core import CategoryCollectionItem, CategoryStateItem, CategoryCustomItem, ViewportMenuDelegate, SelectableMenuItem CURRENT_PATH = Path(__file__).parent TEST_DATA_PATH = CURRENT_PATH.parent.parent.parent.parent.parent.parent.joinpath("data").joinpath("tests") TEST_WIDTH, TEST_HEIGHT = 600, 400 TEST_SETTING_TRUE = "/exts/test/setting/true" TEST_SETTING_FALSE = "/exts/test/setting/false" class TestSettingMenuWindow(OmniUiTest): async def setUp(self): self._golden_img_dir = TEST_DATA_PATH.absolute().joinpath("golden_img").absolute() await self.create_test_area(width=TEST_WIDTH, height=TEST_HEIGHT) await omni.kit.app.get_app().next_update_async() async def test_general(self): await self._show_display_menu("menubar_display.png", None) async def test_heads_up(self): await self._show_display_menu("menubar_display_headsup.png", 86) async def test_show_by_type(self): await self._show_display_menu("menubar_display_show_type.png", 106) async def test_show_by_purpose(self): await self._show_display_menu("menubar_display_show_purpose.png", 126) async def test_show_custom_menu_item(self): inst = omni.kit.viewport.menubar.display.get_instance() custom_collection_item = CategoryCollectionItem( "Custom catetory", [ CategoryStateItem("Custom Item", ui.SimpleBoolModel(True)), ] ) inst.register_custom_category_item("Show By Type", custom_collection_item) def _build_menu(): with ui.Menu("Physics", delegate=ViewportMenuDelegate()): SelectableMenuItem("Joints", ui.SimpleBoolModel(True)) with ui.Menu("Colliders", delegate=ViewportMenuDelegate()): SelectableMenuItem("None", ui.SimpleBoolModel(True)) SelectableMenuItem("Selected", ui.SimpleBoolModel(False)) SelectableMenuItem("All", ui.SimpleBoolModel(False)) ui.Separator() SelectableMenuItem("Normals", ui.SimpleBoolModel(False)) physics_item = CategoryCustomItem("Physics", _build_menu) inst.register_custom_category_item("Show By Type", physics_item) settings = carb.settings.get_settings() settings.set(TEST_SETTING_FALSE, False) settings.set(TEST_SETTING_TRUE, True) inst.register_custom_setting("test new setting (True)", TEST_SETTING_TRUE) inst.register_custom_setting("test new setting (False)", TEST_SETTING_FALSE) await omni.kit.app.get_app().next_update_async() await self._show_display_menu("menubar_display_custom.png", 106) inst.deregister_custom_category_item("Show By Type", custom_collection_item) inst.deregister_custom_category_item("Show By Type", physics_item) inst.deregister_custom_setting("test new setting (True)") inst.deregister_custom_setting("test new setting (False)") await omni.kit.app.get_app().next_update_async() async def test_show_custom_category_and_section(self): inst = omni.kit.viewport.menubar.display.get_instance() category = "Draw Overlay" section = "Selection Display" did_shown_changed_callback = False def on_shown(s): print("on_shown: {s}") nonlocal did_shown_changed_callback did_shown_changed_callback = True overlay_item = CategoryCollectionItem( category, [ CategoryCustomItem("Points", lambda: SelectableMenuItem("Points", model=ui.SimpleBoolModel())), CategoryCustomItem("Normals", lambda: SelectableMenuItem("Normals", model=ui.SimpleBoolModel())) ], shown_changed_fn=on_shown ) inst.register_custom_category_item(category, overlay_item, section) await omni.kit.app.get_app().next_update_async() await self._show_display_menu("menubar_display_custom_category_and_section.png", 166) self.assertTrue(did_shown_changed_callback) inst.deregister_custom_category_item(category, overlay_item) await omni.kit.app.get_app().next_update_async() async def _show_display_menu(self, golden_img_name: str, y: int = None) -> None: # Enable mouse input app_window = omni.appwindow.get_default_app_window() for device in [carb.input.DeviceType.MOUSE]: app_window.set_input_blocking_state(device, None) try: await ui_test.emulate_mouse_move(Vec2(20, 46), human_delay_speed=4) await ui_test.emulate_mouse_click() if y is not None: await ui_test.emulate_mouse_move(Vec2(20, y)) await self.finalize_test(golden_img_dir=self._golden_img_dir, golden_img_name=golden_img_name) finally: for i in range(3): await omni.kit.app.get_app().next_update_async() await ui_test.emulate_mouse_move(Vec2(300, 26)) await ui_test.emulate_mouse_click() for i in range(3): await omni.kit.app.get_app().next_update_async()
omniverse-code/kit/exts/omni.kit.viewport.menubar.display/docs/index.rst
omni.kit.viewport.menubar.display ################################# Display Setting in Viewport MenuBar .. toctree:: :maxdepth: 1 README CHANGELOG .. automodule:: omni.kit.viewport.menubar.display :platform: Windows-x86_64, Linux-x86_64 :members: :undoc-members: :show-inheritance: :imported-members:
omniverse-code/kit/exts/omni.kit.usdz_export/PACKAGE-LICENSES/omni.kit.usdz_export-LICENSE.md
Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. NVIDIA CORPORATION and its licensors retain all intellectual property and proprietary rights in and to this software, related documentation and any modifications thereto. Any use, reproduction, disclosure or distribution of this software and related documentation without an express license agreement from NVIDIA CORPORATION is strictly prohibited.
omniverse-code/kit/exts/omni.kit.usdz_export/config/extension.toml
[package] title = "USDZ Exporter" description = "Packages assets into a USDZ archive." authors = ["NVIDIA"] version = "1.0.1" changelog="docs/CHANGELOG.md" preview_image = "data/preview.png" readme = "docs/README.md" #icon = "data/icon.png" category = "Internal" feature = true [[python.module]] name = "omni.kit.usdz_export" [dependencies] "omni.kit.pip_archive" = {} "omni.ui" = {} "omni.usd" = {} "omni.usd.libs" = {} "omni.kit.tool.collect" = {} "omni.kit.window.file_exporter" = {} # Additional python module with tests, to make them discoverable by test system. [[python.module]] name = "omni.kit.usdz_export.tests" [[test]] args = [ "--/app/asyncRendering=false", "--/rtx/materialDb/syncLoads=true", "--/omni.kit.plugin/syncUsdLoads=true", "--/rtx/hydra/materialSyncLoads=true" ] dependencies = [ "omni.kit.material.library", ]
omniverse-code/kit/exts/omni.kit.usdz_export/omni/kit/usdz_export/__init__.py
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from .extension_usdz import UsdzExportExtension from .layers_menu import export, usdz_export
omniverse-code/kit/exts/omni.kit.usdz_export/omni/kit/usdz_export/extension_usdz.py
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from .layers_menu import layers_available from .layers_menu import LayersMenu import omni.ext import omni.kit.app class UsdzExportExtension(omni.ext.IExt): def on_startup(self, ext_id): # Setup a callback for the event app = omni.kit.app.get_app_interface() ext_manager = app.get_extension_manager() self.__extensions_subscription = ext_manager.get_change_event_stream().create_subscription_to_pop( self._on_event, name="omni.kit.usdz_export" ) self.__layers_menu = None self._on_event(None) def _on_event(self, event): # Create/destroy the menu in the Layers window if self.__layers_menu: if not layers_available(): self.__layers_menu.destroy() self.__layers_menu = None else: if layers_available(): self.__layers_menu = LayersMenu() def on_shutdown(self): self.__extensions_subscription = None if self.__layers_menu: self.__layers_menu.destroy() self.__layers_menu = None
omniverse-code/kit/exts/omni.kit.usdz_export/omni/kit/usdz_export/layers_menu.py
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from .utils import is_extension_loaded, copy, list_folder_async from pxr import Sdf, Usd from pathlib import Path from zipfile import ZipFile from functools import partial from typing import Callable, List from omni.kit.window.file_exporter import get_file_exporter from omni.kit.widget.prompt import PromptManager import carb import omni.kit.tool.collect as collect import omni.usd import asyncio import tempfile import os import shutil import omni.kit.app import omni.kit.notification_manager as nm def layers_available() -> bool: """Returns True if the extension "omni.kit.widget.layers" is loaded""" return is_extension_loaded("omni.kit.widget.layers") async def usdz_export(identifier, export_path): try: target_out = export_path carb.log_info(f"Starting to export layer '{identifier}' to '{target_out}'") prompt = PromptManager.post_simple_prompt("Please Wait", "Exporting to USDZ...", ok_button_info=None, modal=True) # Waits for prompt to be shown await omni.kit.app.get_app().next_update_async() await omni.kit.app.get_app().next_update_async() layer = Sdf.Layer.FindOrOpen(identifier) if not layer: message = f"Failed to export layer {identifier} as it does not exist." carb.log_error(message) nm.post_notification(message, status=nm.NotificationStatus.WARNING) return with tempfile.TemporaryDirectory() as tmp_path: tmp_path = Path(tmp_path) collect_path = tmp_path.joinpath("collected") split_ext = os.path.splitext(identifier) # Can't collect USDZ files because MDLs can't be resolved if (split_ext[1] == '.usdz'): input_usdz_temp_path = str(tmp_path.joinpath('temp_copy.usdz')) await copy(identifier, str(input_usdz_temp_path)) with ZipFile(input_usdz_temp_path, 'r') as zip_ref: zip_ref.extractall(str(tmp_path)) tmp_file_path = str(tmp_path.joinpath("main.usdc")) layer.Export(tmp_file_path) entry_layer_to_collect = tmp_file_path elif not omni.usd.is_usd_writable_filetype(identifier) or identifier.startswith('anon'): tmp_file_path = str(tmp_path.joinpath("main.usdc")) layer.Export(tmp_file_path) entry_layer_to_collect = tmp_file_path else: entry_layer_to_collect = identifier collector = collect.Collector(entry_layer_to_collect, str(collect_path), flat_collection=True) await collector.collect(None, None) # must create USDZ locally because the UsdUtils package cannot handle omniverse:// URIs absolute_paths, relative_paths = await list_folder_async(str(collect_path)) local_out_path = collect_path.joinpath("local_out.usdz") # Create usdz package manually without using USD API as it cannot handle UDIM textures. zip_writer = Usd.ZipFileWriter.CreateNew(str(local_out_path)) with zip_writer: for absolute_path, relative_path in zip(absolute_paths, relative_paths): url = omni.client.break_url(absolute_path) absolute_path = url.path # FIXME: omni.client will return windows path prefixed with '/' if os.name == "nt" and absolute_path[0] == '/': absolute_path = absolute_path[1:] zip_writer.AddFile(absolute_path, relative_path) await copy(str(local_out_path), target_out) layer = None zip_writer = None finally: prompt.visible = False prompt = None carb.log_info(f"Finished exporting layer '{identifier}' to '{target_out}'") def export(objects): """Export the target layer to USDZ""" def on_export(callback: Callable, flatten: bool, filename: str, dirname: str, extension: str = '', selections: List[str] = []): nonlocal objects path = f"{dirname}/{filename}{extension}" item = objects["item"] identifier = item().identifier asyncio.ensure_future(usdz_export(identifier, path)) file_picker = get_file_exporter() file_picker.show_window( title="Export To USDZ", export_button_label="Export", export_handler=partial(on_export, None, False), file_extension_types=[(".usdz", "Zipped package")] ) class LayersMenu: """ When this object is alive, Layers 2.0 has an additional action for exporting the layer to USDZ. """ def __init__(self): import omni.kit.widget.layers as layers self.__menu_subscription = layers.ContextMenu.add_menu( [ {"name": ""}, { "name": "Export USDZ", "glyph": "menu_rename.svg", "show_fn": [ layers.ContextMenu.is_layer_item, layers.ContextMenu.is_not_missing_layer, layers.ContextMenu.is_layer_not_locked_by_other, layers.ContextMenu.is_layer_and_parent_unmuted ], "onclick_fn": export, } ] ) def destroy(self): """Remove the menu from Layers 2.0""" self.__menu_subscription = None
omniverse-code/kit/exts/omni.kit.usdz_export/omni/kit/usdz_export/utils.py
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # import os import omni.kit.app import traceback import carb import omni.client import omni.client.utils as clientutils def is_extension_loaded(extansion_name: str) -> bool: """ Returns True if the extension with the given name is loaded. """ def is_ext(id: str, extension_name: str) -> bool: id_name = id.split("-")[0] return id_name == extension_name app = omni.kit.app.get_app_interface() ext_manager = app.get_extension_manager() extensions = ext_manager.get_extensions() loaded = next((ext for ext in extensions if is_ext(ext["id"], extansion_name) and ext["enabled"]), None) return not not loaded async def copy(src_path: str, dest_path: str): carb.log_info(f"Copying from {src_path} to {dest_path}...") try: result = await omni.client.copy_async(src_path, dest_path, omni.client.CopyBehavior.OVERWRITE) if result != omni.client.Result.OK: carb.log_error(f"Cannot copy from {src_path} to {dest_path}, error code: {result}.") return False else: return True except Exception as e: traceback.print_exc() carb.log_error(str(e)) return False async def list_folder_async(folder_path): def compute_absolute_path(base_path, is_base_path_folder, path, is_path_folder): if is_base_path_folder and not base_path.endswith("/"): base_path += "/" if is_path_folder and not path.endswith("/"): path += "/" return clientutils.make_absolute_url_if_possible(base_path, path) def remove_prefix(text, prefix): if text.startswith(prefix): return text[len(prefix) :] return text absolute_paths = [] relative_paths = [] result, entry = await omni.client.stat_async(folder_path) if result == omni.client.Result.OK and entry.flags & omni.client.ItemFlags.CAN_HAVE_CHILDREN: is_folder = True else: is_folder = False folder_path = clientutils.make_file_url_if_possible(folder_path) if not is_folder: absolute_paths = [folder_path] relative_paths = [os.path.basename(folder_path)] else: if not folder_path.endswith("/"): folder_path += "/" folder_queue = [folder_path] while len(folder_queue) > 0: folder = folder_queue.pop(0) (result, entries) = await omni.client.list_async(folder) if result != omni.client.Result.OK: break folders = set((e.relative_path for e in entries if e.flags & omni.client.ItemFlags.CAN_HAVE_CHILDREN)) for f in folders: folder_queue.append(compute_absolute_path(folder, True, f, False)) files = set((e.relative_path for e in entries if not e.flags & omni.client.ItemFlags.CAN_HAVE_CHILDREN)) for file in files: absolute_path = compute_absolute_path(folder, True, file, False) absolute_paths.append(absolute_path) relative_path = remove_prefix(absolute_path, folder_path[:-1]) relative_path = relative_path.replace("\\", "/") if relative_path != "/" and relative_path.startswith("/"): relative_path = relative_path[1:] if len(relative_path) > 0: relative_paths.append(relative_path) return absolute_paths, relative_paths
omniverse-code/kit/exts/omni.kit.usdz_export/omni/kit/usdz_export/tests/__init__.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from .usdz_export_test import TestUsdzExport
omniverse-code/kit/exts/omni.kit.usdz_export/omni/kit/usdz_export/tests/usda_test.py
## Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. ## ## NVIDIA CORPORATION and its licensors retain all intellectual property ## and proprietary rights in and to this software, related documentation ## and any modifications thereto. Any use, reproduction, disclosure or ## distribution of this software and related documentation without an express ## license agreement from NVIDIA CORPORATION is strictly prohibited. ## from ..layer_watch import LayerWatch from omni.ui.tests.test_base import OmniUiTest from pathlib import Path from pxr import Usd from pxr import UsdGeom import omni.client import omni.kit import omni.usd import os import time import unittest OMNI_SERVER = "omniverse://kit.nucleus.ov-ci.nvidia.com" OMNI_USER = "omniverse" OMNI_PASS = "omniverse" class TestUsdaEdit(OmniUiTest): def __set_omni_credentials(self): # Save the environment to be able to restore it self.__OMNI_USER = os.environ.get("OMNI_USER", None) self.__OMNI_PASS = os.environ.get("OMNI_PASS", None) # Set the credentials os.environ["OMNI_USER"] = OMNI_USER os.environ["OMNI_PASS"] = OMNI_PASS def __restore_omni_credentials(self): if self.__OMNI_USER is not None: os.environ["OMNI_USER"] = self.__OMNI_USER else: os.environ.pop("OMNI_USER") if self.__OMNI_PASS is not None: os.environ["OMNI_PASS"] = self.__OMNI_PASS else: os.environ.pop("OMNI_PASS") async def test_open_file(self): # New stage with a sphere await omni.usd.get_context().new_stage_async() omni.kit.commands.execute("CreatePrim", prim_path="/Sphere", prim_type="Sphere", select_new_prim=False) stage = omni.usd.get_context().get_stage() # Create USDA usda_filename = LayerWatch().start_watch(stage.GetRootLayer().identifier) # Check it's a valid stage duplicate = Usd.Stage.Open(usda_filename) self.assertTrue(duplicate) # Check it has the sphere sphere = duplicate.GetPrimAtPath("/Sphere") self.assertTrue(sphere) UsdGeom.Cylinder.Define(duplicate, '/Cylinder') duplicate.Save() await omni.kit.app.get_app().next_update_async() # Check cylinder is created cylinder = duplicate.GetPrimAtPath("/Cylinder") self.assertTrue(cylinder) # Remove USDA LayerWatch().stop_watch(stage.GetRootLayer().identifier) # Check the file is removed self.assertFalse(Path(usda_filename).exists()) await omni.kit.app.get_app().next_update_async() @unittest.skip("Works locally, but fails on TC for server connection in linux -> flaky") async def test_edit_file_on_nucleus(self): self.__set_omni_credentials() # Create a new stage on server temp_usd_folder = f"{OMNI_SERVER}/Users/test_usda_edit_{str(time.time())}" temp_usd_file_path = f"{temp_usd_folder}/test_edit_file_on_nucleus.usd" # cleanup first await omni.client.delete_async(temp_usd_folder) # create the folder result = await omni.client.create_folder_async(temp_usd_folder) self.assertEqual(result, omni.client.Result.OK) stage = Usd.Stage.CreateNew(temp_usd_file_path) await omni.kit.app.get_app().next_update_async() UsdGeom.Xform.Define(stage, '/xform') UsdGeom.Sphere.Define(stage, '/xform/sphere') await omni.kit.app.get_app().next_update_async() stage.Save() # Start watching and edit the temp stage usda_filename = LayerWatch().start_watch(stage.GetRootLayer().identifier) temp_stage = Usd.Stage.Open(usda_filename) # Create another sphere UsdGeom.Sphere.Define(temp_stage, '/xform/sphere1') # Save the stage temp_stage.Save() # UsdStage saves the temorary file and renames it to usda, so we need # to touch it to let LayerWatch know it's changed. Path(usda_filename).touch() # Small delay because watchdog in LayerWatch doesn't call the callback # right away. So we need to give it some time. await LayerWatch().wait_import_async(stage.GetRootLayer().identifier) # Remove USDA LayerWatch().stop_watch(stage.GetRootLayer().identifier) stage.Reload() # Check the second sphere is there sphere = stage.GetPrimAtPath("/xform/sphere1") self.assertTrue(sphere) # Remove the temp folder result = await omni.client.delete_async(temp_usd_folder) self.assertEqual(result, omni.client.Result.OK) self.__restore_omni_credentials()
omniverse-code/kit/exts/omni.kit.usdz_export/omni/kit/usdz_export/tests/usdz_export_test.py
## Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. ## ## NVIDIA CORPORATION and its licensors retain all intellectual property ## and proprietary rights in and to this software, related documentation ## and any modifications thereto. Any use, reproduction, disclosure or ## distribution of this software and related documentation without an express ## license agreement from NVIDIA CORPORATION is strictly prohibited. ## from omni.ui.tests.test_base import OmniUiTest from pathlib import Path from pxr import Usd from pxr import UsdGeom import carb import omni.client import omni.kit import omni.usd import os import time import unittest from omni.kit.usdz_export import usdz_export OMNI_SERVER = "omniverse://ov-test" class TestUsdzExport(OmniUiTest): def get_test_dir(self): token = carb.tokens.get_tokens_interface() data_dir = token.resolve("${data}") return f"{data_dir}" async def test_export_usdz_file(self): usdz_size = 2600000 usdz_size_tc = 2675966 current_path = Path(__file__) test_data_path = current_path.parent.parent.parent.parent.parent.joinpath("data") test_stage_path = str(test_data_path.joinpath("test_stage").joinpath("scene.usd")) test_dir = self.get_test_dir() export_file_path = Path(test_dir).joinpath("out.usdz").resolve() await usdz_export(test_stage_path, export_file_path.__str__()) self.assertTrue(os.path.isfile(export_file_path.__str__()), 'out.usdz does not exist') size = os.stat(export_file_path).st_size self.assertTrue(size >= usdz_size and size <= usdz_size_tc, f'File size mismatch, expected {usdz_size} but got {size}')
omniverse-code/kit/exts/omni.kit.usdz_export/docs/CHANGELOG.md
# Changelog ## [1.0.1] - 2022-11-08 - Add "omni.kit.window.file_exporter" as dependency. ## [1.0.0] - 2022-08-18 - Initial extension.
omniverse-code/kit/exts/omni.kit.usdz_export/docs/README.md
# USDZ Exporter [omni.kit.usdz_export] Exports selected layer to a USDZ archive.
omniverse-code/kit/fabric/include/carb/flatcache/IToken.h
// Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <carb/Interface.h> #ifndef __CUDACC__ // InterfaceUtils.h provides carb::getCachedInterface and is not CUDA-compatible #include <carb/InterfaceUtils.h> #endif // __CUDACC__ // Set to empty macro when IToken::iToken static member is removed #define FLATCACHE_ITOKEN_INIT \ const carb::flatcache::IToken* carb::flatcache::iToken = nullptr; namespace carb { namespace flatcache { // TokenC are integer keys that identify paths to C-ABI interfaces struct TokenC { uint64_t token; // Note that in the name comparisons below we mask off USD's lifetime bit. // For example, tokens created from the same string are considered equal even // if one was created with finite lifetime and the other infinite lifetime. constexpr bool operator<(const TokenC& other) const { return (token & ~1) < (other.token & ~1); } constexpr bool operator==(const TokenC& other) const { return (token & ~1) == (other.token & ~1); } constexpr bool operator!=(const TokenC& other) const { return (token & ~1) != (other.token & ~1); } }; static_assert(std::is_standard_layout<TokenC>::value, "Struct must be standard layout as it is used in C-ABI interfaces"); // We don't reference count the uninitialized (or empty) token, and we use // this fact to avoid unnecessary dll calls to addRef()/removeRef(), for // example during std::vector resize. To do this we need to check whether a // token is uninitialized without the dll call getEmptyToken(), so we store // its value here in a constant. // We run automated test "IToken::getEmptyToken() dll call can be replaced with // constant, kUninitializedToken" to ensure that this constant never // changes. static constexpr TokenC kUninitializedToken{0}; // C-ABI interface to pxr::TfToken struct IToken { CARB_PLUGIN_INTERFACE("carb::flatcache::IToken", 0, 1); TokenC (*getHandle)(const char* name); const char* (*getText)(TokenC handle); void (*addRef)(TokenC handle); void (*removeRef)(TokenC handle); TokenC (*getEmptyToken)(); uint64_t (*size)(TokenC handle); }; // C++ wrapper for IToken class Token { static carb::flatcache::IToken& sIToken(); public: // DEPRECATED: keeping for binary compatibility // Will be removed in October 2021 - @TODO set FLATCACHE_ITOKEN_INIT to empty macro when removed! // Still safe to use if initialized in a given dll static const carb::flatcache::IToken* iToken; Token() : mHandle(kUninitializedToken) { } Token(const char* string) { mHandle = sIToken().getHandle(string); } // Needs to be noexcept for std::vector::resize() to move instead of copy ~Token() noexcept { #ifndef __CUDACC__ if (mHandle != kUninitializedToken) { if (!carb::isFrameworkValid()) { return; } // IToken can be nullptr durin exit process if (auto iToken = carb::getCachedInterface<carb::flatcache::IToken>()) { iToken->removeRef(mHandle); } } #endif // __CUDACC__ } // Copy constructor Token(const Token& other) : mHandle(other.mHandle) { if (mHandle != kUninitializedToken) { sIToken().addRef(mHandle); } } // Copy construct from integer Token(TokenC token) : mHandle(token) { if (mHandle != kUninitializedToken) { sIToken().addRef(mHandle); } } // Move constructor // Needs to be noexcept for std::vector::resize() to move instead of copy Token(Token&& other) noexcept { // We are moving the src handle so don't need to change its refcount mHandle = other.mHandle; // Make source invalid other.mHandle = kUninitializedToken; } // Copy assignment Token& operator=(const Token& other) { if (this != &other) { if (mHandle != kUninitializedToken) { sIToken().removeRef(mHandle); } mHandle = other.mHandle; if (other.mHandle != kUninitializedToken) { sIToken().addRef(mHandle); } } return *this; } // Move assignment Token& operator=(Token&& other) noexcept { if (&other == this) return *this; // We are about to overwrite the dest handle, so decrease its refcount if (mHandle != kUninitializedToken) { sIToken().removeRef(mHandle); } // We are moving the src handle so don't need to change its refcount mHandle = other.mHandle; other.mHandle = kUninitializedToken; return *this; } const char* getText() const { return sIToken().getText(mHandle); } uint64_t size() const { return sIToken().size(mHandle); } std::string getString() const { return std::string(sIToken().getText(mHandle), sIToken().size(mHandle)); } // Note that in the name comparisons below TokenC masks off USD's lifetime bit. // In other words, tokens created from the same string are considered equal even // if one was created with finite lifetime and the other infinite lifetime. constexpr bool operator<(const Token& other) const { return mHandle < other.mHandle; } constexpr bool operator!=(const Token& other) const { return mHandle != other.mHandle; } constexpr bool operator==(const Token& other) const { return mHandle == other.mHandle; } constexpr operator TokenC() const { return mHandle; } private: TokenC mHandle; }; static_assert(std::is_standard_layout<Token>::value, "Token must be standard layout as it is used in C-ABI interfaces"); #ifndef __CUDACC__ inline carb::flatcache::IToken& Token::sIToken() { // Acquire carbonite interface on first use carb::flatcache::IToken* iToken = carb::getCachedInterface<carb::flatcache::IToken>(); CARB_ASSERT(iToken); return *iToken; } #endif // __CUDACC__ inline uint64_t swapByteOrder(uint64_t val) { #if !CARB_COMPILER_MSC // Compilers other than MSVC tend to turn the following into a single instruction like bswap val = ((val & 0xFF00000000000000u) >> 56u) | ((val & 0x00FF000000000000u) >> 40u) | ((val & 0x0000FF0000000000u) >> 24u) | ((val & 0x000000FF00000000u) >> 8u) | ((val & 0x00000000FF000000u) << 8u) | ((val & 0x0000000000FF0000u) << 24u) | ((val & 0x000000000000FF00u) << 40u) | ((val & 0x00000000000000FFu) << 56u); #else // MSVC does not currently optimize the above code, so we have to use an intrinsic to get bswap val = _byteswap_uint64(val); #endif return val; } inline size_t hash(TokenC token) { size_t tokenWithoutMortalityBit = token.token & ~1; // The following Hash function was chosen to match the one in pxr\base\tf\hash.h // This is based on Knuth's multiplicative hash for integers. The // constant is the closest prime to the binary expansion of the inverse // golden ratio. The best way to produce a hash table bucket index from // the result is to shift the result right, since the higher order bits // have the most entropy. But since we can't know the number of buckets // in a table that's using this, we just reverse the byte order instead, // to get the highest entropy bits into the low-order bytes. return swapByteOrder(tokenWithoutMortalityBit * 11400714819323198549ULL); } inline size_t hash(Token const& token) { return hash(TokenC(token)); } } } namespace std { template <> struct hash<carb::flatcache::Token> { std::size_t operator()(const carb::flatcache::Token& key) const { return carb::flatcache::hash(key); } }; template <> class hash<carb::flatcache::TokenC> { public: size_t operator()(const carb::flatcache::TokenC& key) const { return carb::flatcache::hash(key); } }; }
omniverse-code/kit/fabric/include/carb/flatcache/Defines.h
// Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once // Improved #define preprocessor directives that support compile-time checking for mispelled or missing // directives. Basically, the same as #define MY_FEATURE 0/1, but with a bit more compile-time safety, // and ease of use around mixing or combining boolean logic. // // Example usage: // #define MY_FEATURE_A IN_USE // #define MY_FEATURE_B NOT_IN_USE // #define MY_FEATURE_C USE_IF( USING( MY_FEATURE_A ) && USING( MY_FEATURE_B ) ) // ... // void doStuff() // { // #if USING( MY_FEATURE_C ) // doStuff_C(); // #else // #if USING( MY_FEATURE_C ) // doStuff_NotC(); // #endif // #if USING( MY_FEATURE_C ) // } #define IN_USE && #define NOT_IN_USE &&! #define USE_IF(X) &&((X)?1:0)&& #define USING(X) (1 X 1) #ifndef NDEBUG #define DEVELOPMENT_BUILD IN_USE #else // #ifndef NDEBUG #define DEVELOPMENT_BUILD NOT_IN_USE #endif // #ifndef NDEBUG #ifdef _WIN32 #define WINDOWS_BUILD IN_USE #define LINUX_BUILD NOT_IN_USE #elif defined(__linux__) // #ifdef _WIN32 #define WINDOWS_BUILD NOT_IN_USE #define LINUX_BUILD IN_USE #else // #elif defined(__linux__) // #ifdef _WIN32 #error "Unsupported platform" #endif #define ASSERTS USE_IF( USING( DEVELOPMENT_BUILD ) )
omniverse-code/kit/fabric/include/carb/flatcache/WrapperImpl.h
// Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once // The purpose of this file is to implement the C++ classes StageInProgress, // StageAtTime, StageAtTimeInterval and StageHistoryWindow by calling the // carbonite C-ABI interfaces, IStageInProgress, IStageAtTime, // IStageAtTimeWindow and IStageHistoryWindow. // // #include "StageWithHistory.h" #include <carb/InterfaceUtils.h> #include <carb/logging/Log.h> #include <type_traits> #include <cstdint> namespace carb { namespace flatcache { // StageInProgress implementation starts here // RAII constructor inline StageInProgress::StageInProgress(StageWithHistory& stageWithHistory, size_t simFrameNumber) { auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); m_stageInProgress = iStageInProgress->create(stageWithHistory.m_usdStageId, simFrameNumber); m_usdStageId = stageWithHistory.m_usdStageId; m_createdFromId = false; } // Non-RAII constructor inline StageInProgress::StageInProgress(StageInProgressId stageInProgressId) { m_stageInProgress = stageInProgressId; m_createdFromId = true; // m_usdStageId is not valid when m_createdFromId==true } inline StageInProgress::~StageInProgress() { if (!m_createdFromId) { auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); iStageInProgress->destroy(m_usdStageId); } } inline size_t StageInProgress::getFrameNumber() { auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); return iStageInProgress->getFrameNumber(m_stageInProgress); } inline ValidMirrors StageInProgress::getAttributeValidBits(const Path& path, const Token& attrName) const { auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); return iStageInProgress->getAttributeValidBits(m_stageInProgress, path, attrName); } inline RationalTime StageInProgress::getFrameTime() { auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); return iStageInProgress->getFrameTime(m_stageInProgress); } template <typename T> T* StageInProgress::getAttribute(const Path& path, const Token& attrName) { auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); SpanC ptrAndSize = iStageInProgress->getAttribute(m_stageInProgress, path, attrName); if (sizeof(T) == ptrAndSize.elementSize) { return reinterpret_cast<T*>(ptrAndSize.ptr); } else { CARB_LOG_WARN_ONCE("Trying to access %zu bytes from %s.%s, but flatcache has only %zu bytes", sizeof(T), path.getText(), attrName.getText(), ptrAndSize.elementSize); return nullptr; } } template <typename T> const T* StageInProgress::getAttributeRd(const Path& path, const Token& attrName) { auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); ConstSpanC ptrAndSize = iStageInProgress->getAttributeRd(m_stageInProgress, path, attrName); if (sizeof(T) == ptrAndSize.elementSize) { return reinterpret_cast<const T*>(ptrAndSize.ptr); } else { CARB_LOG_WARN_ONCE("Trying to access %zu bytes from %s.%s, but flatcache has only %zu bytes", sizeof(T), path.getText(), attrName.getText(), ptrAndSize.elementSize); return nullptr; } } template <typename T> T* StageInProgress::getAttributeWr(const Path& path, const Token& attrName) { auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); SpanC ptrAndSize = iStageInProgress->getAttributeWr(m_stageInProgress, path, attrName); if (sizeof(T) == ptrAndSize.elementSize) { return reinterpret_cast<T*>(ptrAndSize.ptr); } else { CARB_LOG_WARN_ONCE("Trying to access %zu bytes from %s.%s, but flatcache has only %zu bytes", sizeof(T), path.getText(), attrName.getText(), ptrAndSize.elementSize); return nullptr; } } template <typename T> T* StageInProgress::getAttributeGpu(const Path& path, const Token& attrName) { auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); SpanC ptrAndSize = iStageInProgress->getAttributeGpu(m_stageInProgress, path, attrName); if (sizeof(T) == ptrAndSize.elementSize) { return reinterpret_cast<T*>(ptrAndSize.ptr); } else { CARB_LOG_WARN_ONCE("Trying to access %zu bytes from %s.%s, but flatcache has only %zu bytes", sizeof(T), path.getText(), attrName.getText(), ptrAndSize.elementSize); return nullptr; } } template <typename T> const T* StageInProgress::getAttributeRdGpu(const Path& path, const Token& attrName) { auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); ConstSpanC ptrAndSize = iStageInProgress->getAttributeRdGpu(m_stageInProgress, path, attrName); if (sizeof(T) == ptrAndSize.elementSize) { return reinterpret_cast<const T*>(ptrAndSize.ptr); } else { CARB_LOG_WARN_ONCE("Trying to access %zu bytes from %s.%s, but flatcache has only %zu bytes", sizeof(T), path.getText(), attrName.getText(), ptrAndSize.elementSize); return nullptr; } } template <typename T> T* StageInProgress::getAttributeWrGpu(const Path& path, const Token& attrName) { auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); SpanC ptrAndSize = iStageInProgress->getAttributeWrGpu(m_stageInProgress, path, attrName); if (sizeof(T*) == ptrAndSize.elementSize) { return reinterpret_cast<T*>(ptrAndSize.ptr); } else { CARB_LOG_WARN_ONCE("Trying to access %zu bytes from %s.%s, but flatcache has only %zu bytes", sizeof(T), path.getText(), attrName.getText(), ptrAndSize.elementSize); return nullptr; } } template <typename T> T& StageInProgress::getOrCreateAttributeWr(const Path& path, const Token& attrName, Type type) { auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); SpanC ptrAndSize = iStageInProgress->getOrCreateAttributeWr(m_stageInProgress, path, attrName, TypeC(type)); if (sizeof(T) != ptrAndSize.elementSize) { CARB_LOG_WARN_ONCE("Trying to access %zu bytes from %s.%s, but flatcache has only %zu bytes", sizeof(T), path.getText(), attrName.getText(), ptrAndSize.elementSize); } return *reinterpret_cast<T*>(ptrAndSize.ptr); } template <typename T> gsl::span<T> StageInProgress::getArrayAttribute(const Path& path, const Token& attrName) { auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); SpanC arrayData = iStageInProgress->getArrayAttributeWr(m_stageInProgress, path, attrName); if (sizeof(T) != arrayData.elementSize) { CARB_LOG_WARN_ONCE( "Trying to access array with elements of size %zu bytes from %s.%s, but flatcache has only %zu bytes", sizeof(T), path.getText(), attrName.getText(), arrayData.elementSize); return gsl::span<T>(); } gsl::span<T> retval(reinterpret_cast<T*>(arrayData.ptr), arrayData.elementCount); return retval; } template <typename T> gsl::span<const T> StageInProgress::getArrayAttributeRd(const Path& path, const Token& attrName) { auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); ConstSpanC arrayData = iStageInProgress->getArrayAttributeRd(m_stageInProgress, path, attrName); if (sizeof(T) != arrayData.elementSize) { CARB_LOG_WARN_ONCE( "Trying to access array with elements of size %zu bytes from %s.%s, but flatcache has only %zu bytes", sizeof(T), path.getText(), attrName.getText(), arrayData.elementSize); return gsl::span<const T>(); } gsl::span<const T> retval(reinterpret_cast<const T*>(arrayData.ptr), arrayData.elementCount); return retval; } template <typename T> gsl::span<T> StageInProgress::getArrayAttributeWr(const Path& path, const Token& attrName) { auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); SpanC arrayData = iStageInProgress->getArrayAttributeWr(m_stageInProgress, path, attrName); if (sizeof(T) != arrayData.elementSize) { CARB_LOG_WARN_ONCE( "Trying to access array with elements of size %zu bytes from %s.%s, but flatcache has only %zu bytes", sizeof(T), path.getText(), attrName.getText(), arrayData.elementSize); return gsl::span<T>(); } gsl::span<T> retval(reinterpret_cast<T*>(arrayData.ptr), arrayData.elementCount); return retval; } inline size_t StageInProgress::getArrayAttributeSize(const Path& path, const Token& attrName) { auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); return iStageInProgress->getArrayAttributeSize(m_stageInProgress, path, attrName); } inline void StageInProgress::setArrayAttributeSize(const Path& path, const Token& attrName, size_t elemCount) { auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); iStageInProgress->setArrayAttributeSize(m_stageInProgress, path, attrName, elemCount); } template <typename T> inline gsl::span<T> StageInProgress::setArrayAttributeSizeAndGet(const PrimBucketList& primBucketList, size_t primBucketListIndex, size_t indexInBucket, const Token& attrName, size_t newElemCount) { auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); SpanC newArrayC = iStageInProgress->setArrayAttributeSizeAndGet( m_stageInProgress, primBucketList.m_primBucketListId, primBucketListIndex, indexInBucket, attrName, newElemCount); T* typedElementsPtr = reinterpret_cast<T*>(newArrayC.ptr); return { typedElementsPtr, newArrayC.elementCount }; } inline void StageInProgress::createPrim(const Path& path) { auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); iStageInProgress->createPrim(m_stageInProgress, path); } inline void StageInProgress::destroyPrim(const Path& path) { auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); iStageInProgress->destroyPrim(m_stageInProgress, path); } inline void StageInProgress::createAttribute(const Path& path, const Token& attrName, Type type) { auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); iStageInProgress->createAttribute(m_stageInProgress, path, attrName, TypeC(type)); } template <int n> inline void StageInProgress::createAttributes(const Path& path, std::array<AttrNameAndType, n> attributes) { auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); std::array<TokenC, n> names; std::array<TypeC, n> types; for (int c = 0; c < n; ++c) { names[c] = attributes[c].name; types[c] = TypeC(attributes[c].type); } iStageInProgress->createAttributes(m_stageInProgress, path, names.data(), types.data(), n); } inline void StageInProgress::destroyAttribute(const Path& path, const Token& attrName, Type) { auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); iStageInProgress->destroyAttribute2(m_stageInProgress, path, attrName); } inline void StageInProgress::destroyAttribute(const Path& path, const Token& attrName) { auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); iStageInProgress->destroyAttribute2(m_stageInProgress, path, attrName); } template <int n> inline void StageInProgress::destroyAttributes(const Path& path, const std::array<Token, n>& attributes) { auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); std::array<TokenC, n> names; for (int c = 0; c < n; ++c) { names[c] = TokenC(attributes[c]); } iStageInProgress->destroyAttributes(m_stageInProgress, path, names.data(), n); } inline void StageInProgress::destroyAttributes(const Path& path, const std::vector<Token>& attributes) { auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); const size_t n = attributes.size(); std::vector<TokenC> names(n); for (size_t c = 0; c < n; ++c) { names[c] = TokenC(attributes[c]); } iStageInProgress->destroyAttributes(m_stageInProgress, path, names.data(), (uint32_t)n); } inline PrimBucketList StageInProgress::findPrims(const carb::flatcache::set<AttrNameAndType>& all, const carb::flatcache::set<AttrNameAndType>& any, const carb::flatcache::set<AttrNameAndType>& none) { auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); PrimBucketListId primBucketListId = iStageInProgress->findPrims(m_stageInProgress, all, any, none); return { primBucketListId }; } inline void StageInProgress::attributeEnableChangeTracking(const Token& attrName, ListenerId listenerId) { auto iChangeTrackerConfig = carb::getCachedInterface<carb::flatcache::IChangeTrackerConfig>(); iChangeTrackerConfig->attributeEnable(m_stageInProgress, attrName, listenerId); } inline void StageInProgress::enablePrimCreateTracking(ListenerId listenerId) { auto iChangeTrackerConfig = carb::getCachedInterface<carb::flatcache::IChangeTrackerConfig>(); iChangeTrackerConfig->enablePrimCreateTracking(m_stageInProgress, listenerId); } inline void StageInProgress::attributeDisableChangeTracking(const Token& attrName, ListenerId listenerId) { auto iChangeTrackerConfig = carb::getCachedInterface<carb::flatcache::IChangeTrackerConfig>(); iChangeTrackerConfig->attributeDisable(m_stageInProgress, attrName, listenerId); } inline void StageInProgress::pauseChangeTracking(ListenerId listenerId) { auto iChangeTrackerConfig = carb::getCachedInterface<carb::flatcache::IChangeTrackerConfig>(); iChangeTrackerConfig->pause(m_stageInProgress, listenerId); } inline void StageInProgress::resumeChangeTracking(ListenerId listenerId) { auto iChangeTrackerConfig = carb::getCachedInterface<carb::flatcache::IChangeTrackerConfig>(); iChangeTrackerConfig->resume(m_stageInProgress, listenerId); } inline bool StageInProgress::isChangeTrackingPaused(ListenerId listenerId) { auto iChangeTrackerConfig = carb::getCachedInterface<carb::flatcache::IChangeTrackerConfig>(); return iChangeTrackerConfig->isChangeTrackingPaused(m_stageInProgress, listenerId); } inline bool StageInProgress::isListenerAttached(ListenerId listenerId) { auto iChangeTrackerConfig = carb::getCachedInterface<carb::flatcache::IChangeTrackerConfig>(); return iChangeTrackerConfig->isListenerAttached(m_stageInProgress, listenerId); } inline void StageInProgress::detachListener(ListenerId listenerId) { auto iChangeTrackerConfig = carb::getCachedInterface<carb::flatcache::IChangeTrackerConfig>(); iChangeTrackerConfig->detachListener(m_stageInProgress, listenerId); } inline size_t StageInProgress::getListenerCount() { auto iChangeTrackerConfig = carb::getCachedInterface<carb::flatcache::IChangeTrackerConfig>(); return iChangeTrackerConfig->getListenerCount(m_stageInProgress); } inline ChangedPrimBucketList StageInProgress::getChanges(ListenerId listenerId) { auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); PrimBucketListId changeListId = iStageInProgress->getChanges(m_stageInProgress, listenerId); return ChangedPrimBucketList(changeListId); } inline void StageInProgress::popChanges(ListenerId listenerId) { auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); iStageInProgress->popChanges(m_stageInProgress, listenerId); } template <typename T> gsl::span<T> StageInProgress::getAttributeArray(const PrimBucketList& primBucketList, size_t primBucketListIndex, const Token& attrName) { SpanC array; auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); iStageInProgress->getAttributeArray( &array, m_stageInProgress, primBucketList.m_primBucketListId, primBucketListIndex, attrName); T* typedElementsPtr = reinterpret_cast<T*>(array.ptr); gsl::span<T> retval(typedElementsPtr, array.elementCount); return retval; } template <typename T> gsl::span<const T> StageInProgress::getAttributeArrayRd(const PrimBucketList& primBucketList, size_t primBucketListIndex, const Token& attrName) const { ConstSpanC array; auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); iStageInProgress->getAttributeArrayRd( &array, m_stageInProgress, primBucketList.m_primBucketListId, primBucketListIndex, attrName); const T* typedElementsPtr = reinterpret_cast<const T*>(array.ptr); gsl::span<const T> retval(typedElementsPtr, array.elementCount); return retval; } template <typename T> gsl::span<T> StageInProgress::getAttributeArrayWr(const PrimBucketList& primBucketList, size_t primBucketListIndex, const Token& attrName) { SpanC array; auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); iStageInProgress->getAttributeArrayWr( &array, m_stageInProgress, primBucketList.m_primBucketListId, primBucketListIndex, attrName); T* typedElementsPtr = reinterpret_cast<T*>(array.ptr); gsl::span<T> retval(typedElementsPtr, array.elementCount); return retval; } template <typename T> gsl::span<T> StageInProgress::getAttributeArrayGpu(const PrimBucketList& primBucketList, size_t primBucketListIndex, const Token& attrName) { SpanC array; auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); iStageInProgress->getAttributeArrayGpu( &array, m_stageInProgress, primBucketList.m_primBucketListId, primBucketListIndex, attrName); T* typedElementsPtr = reinterpret_cast<T*>(array.ptr); gsl::span<T> retval(typedElementsPtr, array.elementCount); return retval; } template <typename T> gsl::span<const T> StageInProgress::getAttributeArrayRdGpu(const PrimBucketList& primBucketList, size_t primBucketListIndex, const Token& attrName) const { ConstSpanC array; auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); iStageInProgress->getAttributeArrayRdGpu( &array, m_stageInProgress, primBucketList.m_primBucketListId, primBucketListIndex, attrName); const T* typedElementsPtr = reinterpret_cast<const T*>(array.ptr); gsl::span<const T> retval(typedElementsPtr, array.elementCount); return retval; } template <typename T> gsl::span<T> StageInProgress::getAttributeArrayWrGpu(const PrimBucketList& primBucketList, size_t primBucketListIndex, const Token& attrName) { SpanC array; auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); iStageInProgress->getAttributeArrayWrGpu( &array, m_stageInProgress, primBucketList.m_primBucketListId, primBucketListIndex, attrName); T* typedElementsPtr = reinterpret_cast<T*>(array.ptr); gsl::span<T> retval(typedElementsPtr, array.elementCount); return retval; } template <typename T> gsl::span<T> StageInProgress::getOrCreateAttributeArrayWr(const PrimBucketList& primBucketList, size_t primBucketListIndex, const Token& attrName, Type type) { SpanC array; auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); iStageInProgress->getOrCreateAttributeArrayWr( &array, m_stageInProgress, primBucketList.m_primBucketListId, primBucketListIndex, attrName, TypeC(type)); T* typedElementsPtr = reinterpret_cast<T*>(array.ptr); gsl::span<T> retval(typedElementsPtr, array.elementCount); return retval; } template <typename T> std::vector<gsl::span<T>> StageInProgress::getArrayAttributeArray(const PrimBucketList& primBucketList, size_t primBucketListIndex, const Token& attrName) const { auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); ArrayPointersAndSizesC pointersAndSizes = iStageInProgress->getArrayAttributeArrayWithSizes( m_stageInProgress, primBucketList.m_primBucketListId, primBucketListIndex, attrName); size_t primCount = pointersAndSizes.elementCount; std::vector<gsl::span<T>> arrays(primCount); for (size_t i = 0; i != primCount; i++) { T* typedElementsPtr = reinterpret_cast<T*>(pointersAndSizes.arrayPtrs[i]); arrays[i] = { typedElementsPtr, pointersAndSizes.sizes[i] }; } return arrays; } template <typename T> std::vector<gsl::span<const T>> StageInProgress::getArrayAttributeArrayRd(const PrimBucketList& primBucketList, size_t primBucketListIndex, const Token& attrName) const { auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); ConstArrayPointersAndSizesC pointersAndSizes = iStageInProgress->getArrayAttributeArrayWithSizesRd( m_stageInProgress, primBucketList.m_primBucketListId, primBucketListIndex, attrName); size_t primCount = pointersAndSizes.elementCount; std::vector<gsl::span<const T>> arrays(primCount); for (size_t i = 0; i != primCount; i++) { const T* typedElementsPtr = reinterpret_cast<const T*>(pointersAndSizes.arrayPtrs[i]); arrays[i] = { typedElementsPtr, pointersAndSizes.sizes[i] }; } return arrays; } template <typename T> std::vector<gsl::span<T>> StageInProgress::getArrayAttributeArrayWr(const PrimBucketList& primBucketList, size_t primBucketListIndex, const Token& attrName) const { auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); ArrayPointersAndSizesC pointersAndSizes = iStageInProgress->getArrayAttributeArrayWithSizesWr( m_stageInProgress, primBucketList.m_primBucketListId, primBucketListIndex, attrName); size_t primCount = pointersAndSizes.elementCount; std::vector<gsl::span<T>> arrays(primCount); for (size_t i = 0; i != primCount; i++) { T* typedElementsPtr = reinterpret_cast<T*>(pointersAndSizes.arrayPtrs[i]); arrays[i] = { typedElementsPtr, pointersAndSizes.sizes[i] }; } return arrays; } inline gsl::span<const Path> StageInProgress::getPathArray(const PrimBucketList& primBucketList, size_t primBucketListIndex) const { ConstPathCSpan arrayC; auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); iStageInProgress->getPathArray(&arrayC, m_stageInProgress, primBucketList.m_primBucketListId, primBucketListIndex); const Path* array = reinterpret_cast<const Path*>(arrayC.ptr); gsl::span<const Path> retval(array, arrayC.elementCount); return retval; } inline void StageInProgress::printBucketNames() const { auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); iStageInProgress->printBucketNames(m_stageInProgress); } inline void StageInProgress::logAttributeWriteForNotice(const Path& path, const Token& attrName) { auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); iStageInProgress->logAttributeWriteForNotice(m_stageInProgress, path, attrName); } inline flatcache::set<AttrNameAndType> StageInProgress::getAttributeNamesAndTypes(const PrimBucketList& primBucketList, size_t primBucketListIndex) const { auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); size_t attrCount = iStageInProgress->getBucketAttributeCount( m_stageInProgress, primBucketList.m_primBucketListId, primBucketListIndex); flatcache::set<AttrNameAndType> namesAndTypes; namesAndTypes.v.resize(attrCount); // getBucketAttributeNamesAndTypes is guaranteed to return an ordered vector, so we don't have to sort namesAndTypes iStageInProgress->getBucketAttributeNamesAndTypes( namesAndTypes.data(), attrCount, m_stageInProgress, primBucketList.m_primBucketListId, primBucketListIndex); return namesAndTypes; } // Connection API inline void StageInProgress::createConnection(const Path& path, const Token& connectionName, const Connection& connection) { auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); iStageInProgress->createConnection(m_stageInProgress, path, connectionName, connection); } inline void StageInProgress::createConnections(const Path& path, const gsl::span<Token>& connectionNames, const gsl::span<Connection>& connections ) { auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); if(connectionNames.size() != connections.size()) return; const TokenC* namesC = reinterpret_cast<const TokenC*>(connectionNames.data()); iStageInProgress->createConnections(m_stageInProgress, path, namesC, connections.data(), connectionNames.size()); } inline void StageInProgress::destroyConnection(const Path& path, const Token& connectionName) { auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); iStageInProgress->destroyConnection(m_stageInProgress, path, connectionName); } inline void StageInProgress::destroyConnections(const Path& path, const gsl::span<Token>& connectionNames) { auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); const TokenC* namesC = reinterpret_cast<const TokenC*>(connectionNames.data()); iStageInProgress->destroyConnections(m_stageInProgress, path, namesC, connectionNames.size()); } inline Connection* StageInProgress::getConnection(const Path& path, const Token& connectionName) { auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); return iStageInProgress->getConnection(m_stageInProgress, path, connectionName); } inline const Connection* StageInProgress::getConnectionRd(const Path& path, const Token& connectionName) { auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); return iStageInProgress->getConnectionRd(m_stageInProgress, path, connectionName); } inline Connection* StageInProgress::getConnectionWr(const Path& path, const Token& connectionName) { auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); return iStageInProgress->getConnectionWr(m_stageInProgress, path, connectionName); } inline void StageInProgress::copyAttributes(const Path& srcPath, const Path& dstPath) { auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); iStageInProgress->copyAllAttributes(m_stageInProgress, srcPath, dstPath); } inline void StageInProgress::copyAttributes(const Path& srcPath, const gsl::span<Token>& srcAttrs, const Path& dstPath) { auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); size_t n = srcAttrs.size(); const TokenC* srcAttrsC = reinterpret_cast<const TokenC*>(srcAttrs.data()); iStageInProgress->copySpecifiedAttributes(m_stageInProgress, srcPath, srcAttrsC, dstPath, srcAttrsC, n); } inline void StageInProgress::copyAttributes(const Path& srcPath, const gsl::span<Token>& srcAttrs, const Path& dstPath, const gsl::span<Token>& dstAttrs) { auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); if(srcAttrs.size() != dstAttrs.size()) { return; } size_t n = srcAttrs.size(); const TokenC* srcAttrsC = reinterpret_cast<const TokenC*>(srcAttrs.data()); const TokenC* dstAttrsC = reinterpret_cast<const TokenC*>(dstAttrs.data()); iStageInProgress->copySpecifiedAttributes(m_stageInProgress, srcPath, srcAttrsC, dstPath, dstAttrsC, n); } inline bool StageInProgress::primExists(const Path& path) { auto iStageReaderWriter = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); bool retval = iStageReaderWriter->getAttributeCount(m_stageInProgress, path) != 0; return retval; } // PrimBucketList implementation starts here inline carb::flatcache::IPrimBucketList* PrimBucketList::sIPrimBucketList() { // Acquire carbonite interface on first use return carb::getCachedInterface<carb::flatcache::IPrimBucketList>(); } inline size_t PrimBucketList::bucketCount() const { return sIPrimBucketList()->getBucketCount(m_primBucketListId); } inline size_t PrimBucketList::size() const { return sIPrimBucketList()->getBucketCount(m_primBucketListId); } inline void PrimBucketList::print() const { return sIPrimBucketList()->print(m_primBucketListId); } inline PrimBucketList::~PrimBucketList() { sIPrimBucketList()->destroy(m_primBucketListId); } inline BucketChanges ChangedPrimBucketList::getChanges(size_t index) { return BucketChanges(sIPrimBucketList()->getChanges(m_primBucketListId, index)); } inline AddedPrimIndices ChangedPrimBucketList::getAddedPrims(size_t index) { return AddedPrimIndices(sIPrimBucketList()->getAddedPrims(m_primBucketListId, index)); } // StageAtTimeInterval implementation starts here inline carb::flatcache::IStageAtTimeInterval* StageAtTimeInterval::sIStageAtTimeInterval() { return carb::getCachedInterface<carb::flatcache::IStageAtTimeInterval>(); } inline StageAtTimeInterval::StageAtTimeInterval(StageWithHistory& stageWithHistory, RationalTime beginTime, RationalTime endTime, bool includeEndTime) { m_stageAtTimeInterval = sIStageAtTimeInterval()->create(stageWithHistory.m_stageWithHistory, beginTime, endTime, includeEndTime); } inline StageAtTimeInterval::StageAtTimeInterval(StageWithHistoryId stageWithHistoryId, RationalTime beginTime, RationalTime endTime, bool includeEndTime) { m_stageAtTimeInterval = sIStageAtTimeInterval()->create(stageWithHistoryId, beginTime, endTime, includeEndTime); } inline ValidMirrors StageAtTimeInterval::getAttributeValidBits(const PathC& path, const TokenC& attrName) const { return sIStageAtTimeInterval()->getAttributeValidBits(m_stageAtTimeInterval, path, attrName); } template <typename T> std::vector<const T*> StageAtTimeInterval::getAttributeRd(const Path& path, const Token& attrName) const { size_t count = sIStageAtTimeInterval()->getTimesampleCount(m_stageAtTimeInterval); std::vector<const T*> retval(count); const void** retvalData = reinterpret_cast<const void**>(retval.data()); size_t bytesPerAttr = sIStageAtTimeInterval()->getAttributeRd(retvalData, count, m_stageAtTimeInterval, path, attrName); if (sizeof(T) == bytesPerAttr) { return retval; } else { CARB_LOG_WARN_ONCE("Trying to access %zu bytes from %s.%s, but flatcache has only %zu bytes", sizeof(T), path.getText(), attrName.getText(), bytesPerAttr); return std::vector<const T*>(); } } template <typename T> std::vector<const T*> StageAtTimeInterval::getAttributeRdGpu(const Path& path, const Token& attrName) const { size_t count = sIStageAtTimeInterval()->getTimesampleCount(m_stageAtTimeInterval); std::vector<const T*> retval(count); std::vector<ConstSpanC> arrays(count); sIStageAtTimeInterval()->getAttributeRdGpu(arrays.data(), count, m_stageAtTimeInterval, path, attrName); for (size_t i = 0; i != count; i++) { if (arrays[i].elementSize == sizeof(T)) { retval[i] = reinterpret_cast<const T*>(arrays[i].ptr); } else { retval[i] = nullptr; } } return retval; } inline std::vector<size_t> StageAtTimeInterval::getArrayAttributeSize(const Path& path, const Token& attrName) const { size_t count = sIStageAtTimeInterval()->getTimesampleCount(m_stageAtTimeInterval); std::vector<size_t> sizes(count); sIStageAtTimeInterval()->getArrayAttributeSize(sizes.data(), count, m_stageAtTimeInterval, path, attrName); return sizes; } template <typename T> std::vector<gsl::span<const T>> StageAtTimeInterval::getArrayAttributeRd(const Path& path, const Token& attrName) const { size_t count = sIStageAtTimeInterval()->getTimesampleCount(m_stageAtTimeInterval); std::vector<ConstSpanWithTypeC> arrays(count); std::vector<gsl::span<const T>> retval(count); sIStageAtTimeInterval()->getArrayAttributeWithSizeRd(arrays.data(), count, m_stageAtTimeInterval, path, attrName); for (size_t i = 0; i != count; i++) { if (arrays[i].elementSize != sizeof(T)) { retval[i] = gsl::span<T>(); continue; } const T* ptr = reinterpret_cast<const T*>(arrays[i].ptr); retval[i] = gsl::span<const T>(ptr, arrays[i].elementCount); } return retval; } inline std::vector<ConstArrayAsBytes> StageAtTimeInterval::getArrayAttributeRawRd(const Path& path, const Token& attrName) const { size_t count = sIStageAtTimeInterval()->getTimesampleCount(m_stageAtTimeInterval); std::vector<ConstSpanWithTypeC> arrays(count); std::vector<ConstArrayAsBytes> retval(count); sIStageAtTimeInterval()->getArrayAttributeWithSizeRd(arrays.data(), count, m_stageAtTimeInterval, path, attrName); for (size_t i = 0; i != count; i++) { const gsl::byte* ptr = reinterpret_cast<const gsl::byte*>(arrays[i].ptr); retval[i].arrayBytes = gsl::span<const gsl::byte>(ptr, arrays[i].elementCount * arrays[i].elementSize); retval[i].bytesPerElement = arrays[i].elementSize; retval[i].elementType = Type(arrays[i].type); } return retval; } inline std::vector<RationalTime> StageAtTimeInterval::getTimestamps() const { size_t count = sIStageAtTimeInterval()->getTimesampleCount(m_stageAtTimeInterval); std::vector<RationalTime> retval(count); sIStageAtTimeInterval()->getTimestamps(retval.data(), count, m_stageAtTimeInterval); return retval; } inline size_t StageAtTimeInterval::getTimeSampleCount() const { return sIStageAtTimeInterval()->getTimesampleCount(m_stageAtTimeInterval); } inline PrimBucketList StageAtTimeInterval::findPrims(const carb::flatcache::set<AttrNameAndType>& all, const carb::flatcache::set<AttrNameAndType>& any, const carb::flatcache::set<AttrNameAndType>& none) { PrimBucketListId primBucketListId = sIStageAtTimeInterval()->findPrims(m_stageAtTimeInterval, all, any, none); return { primBucketListId }; } template <typename T> std::vector<gsl::span<const T>> StageAtTimeInterval::getAttributeArrayRd(const PrimBucketList& primBucketList, size_t primBucketListIndex, const Token& attrName) const { size_t count = sIStageAtTimeInterval()->getTimesampleCount(m_stageAtTimeInterval); std::vector<ConstSpanC> outC(count); ConstSpanC* outCData = outC.data(); sIStageAtTimeInterval()->getAttributeArrayRd( outCData, count, m_stageAtTimeInterval, primBucketList.m_primBucketListId, primBucketListIndex, attrName); std::vector<gsl::span<const T>> retval(count); size_t i = 0; for (ConstSpanC array : outC) { const T* typedElementsPtr = reinterpret_cast<const T*>(array.ptr); retval[i] = gsl::span<const T>(typedElementsPtr, array.elementCount); i++; } return retval; } template <typename T> std::vector<gsl::span<const T>> StageAtTimeInterval::getAttributeArrayRdGpu(const PrimBucketList& primBucketList, size_t primBucketListIndex, const Token& attrName) const { size_t count = sIStageAtTimeInterval()->getTimesampleCount(m_stageAtTimeInterval); std::vector<ConstSpanC> outC(count); ConstSpanC* outCData = outC.data(); sIStageAtTimeInterval()->getAttributeArrayRdGpu( outCData, count, m_stageAtTimeInterval, primBucketList.m_primBucketListId, primBucketListIndex, attrName); std::vector<gsl::span<const T>> retval(count); size_t i = 0; for (ConstSpanC array : outC) { const T* typedElementsPtr = reinterpret_cast<const T*>(array.ptr); retval[i] = gsl::span<const T>(typedElementsPtr, array.elementCount); i++; } return retval; } template <typename T> std::vector<std::vector<gsl::span<const T>>> StageAtTimeInterval::getArrayAttributeArrayRd( const PrimBucketList& primBucketList, size_t primBucketListIndex, const Token& attrName) const { size_t count = sIStageAtTimeInterval()->getTimesampleCount(m_stageAtTimeInterval); std::vector<ConstArrayPointersAndSizesC> outC(count); ConstArrayPointersAndSizesC* outCData = outC.data(); sIStageAtTimeInterval()->getArrayAttributeArrayWithSizesRd( outCData, count, m_stageAtTimeInterval, primBucketList.m_primBucketListId, primBucketListIndex, attrName); std::vector<std::vector<gsl::span<const T>>> retval(count); size_t i = 0; for (ConstArrayPointersAndSizesC pointersAndSizes : outC) { size_t primCount = pointersAndSizes.elementCount; retval[i].resize(primCount); for (size_t j = 0; j != primCount; j++) { const T* typedElementsPtr = reinterpret_cast<const T*>(pointersAndSizes.arrayPtrs[j]); retval[i][j] = { typedElementsPtr, pointersAndSizes.sizes[j] }; } i++; } return retval; } inline std::vector<gsl::span<const char>> StageAtTimeInterval::getAttributeArrayRawRd( const PrimBucketList& primBucketList, size_t primBucketListIndex, const Token& attrName) const { size_t count = sIStageAtTimeInterval()->getTimesampleCount(m_stageAtTimeInterval); std::vector<ConstSpanC> outC(count); ConstSpanC* outCData = outC.data(); sIStageAtTimeInterval()->getAttributeArrayRd( outCData, count, m_stageAtTimeInterval, primBucketList.m_primBucketListId, primBucketListIndex, attrName); std::vector<gsl::span<const char>> retval(count); size_t i = 0; for (ConstSpanC array : outC) { const char* typedElementsPtr = reinterpret_cast<const char*>(array.ptr); retval[i] = gsl::span<const char>(typedElementsPtr, array.elementCount * array.elementSize); i++; } return retval; } inline std::vector<gsl::span<const Path>> StageAtTimeInterval::getPathArray(const PrimBucketList& primBucketList, size_t primBucketListIndex) const { size_t count = sIStageAtTimeInterval()->getTimesampleCount(m_stageAtTimeInterval); std::vector<ConstPathCSpan> outC(count); ConstPathCSpan* outCData = outC.data(); sIStageAtTimeInterval()->getPathArray( outCData, count, m_stageAtTimeInterval, primBucketList.m_primBucketListId, primBucketListIndex); std::vector<gsl::span<const Path>> retval(count); size_t i = 0; for (ConstPathCSpan arrayC : outC) { const Path* array = reinterpret_cast<const Path*>(arrayC.ptr); retval[i] = gsl::span<const Path>(array, arrayC.elementCount); i++; } return retval; } inline std::vector<const Connection*> StageAtTimeInterval::getConnectionRd(const Path& path, const Token& connectionName) { size_t count = sIStageAtTimeInterval()->getTimesampleCount(m_stageAtTimeInterval); std::vector<const Connection*> retval(count); const void** retvalData = reinterpret_cast<const void**>(retval.data()); sIStageAtTimeInterval()->getConnectionRd(retvalData, count, m_stageAtTimeInterval, path, connectionName); return retval; } inline void StageAtTimeInterval::printBucketNames() const { sIStageAtTimeInterval()->printBucketNames(m_stageAtTimeInterval); } inline std::vector<size_t> StageAtTimeInterval::getAttributeCounts(const PrimBucketList& primBucketList, size_t primBucketListIndex) const { std::vector<size_t> counts; size_t count = sIStageAtTimeInterval()->getTimesampleCount(m_stageAtTimeInterval); counts.resize(count); sIStageAtTimeInterval()->getAttributeCounts( m_stageAtTimeInterval, primBucketList.m_primBucketListId, primBucketListIndex, count, counts.data()); return counts; } inline std::pair<std::vector<std::vector<Token>>, std::vector<std::vector<Type>>> StageAtTimeInterval::getAttributeNamesAndTypes( const PrimBucketList& primBucketList, size_t primBucketListIndex) const { std::vector<std::vector<Token>> outNames; std::vector<std::vector<Type>> outTypes; size_t count = sIStageAtTimeInterval()->getTimesampleCount(m_stageAtTimeInterval); std::vector<size_t> outSizes; outSizes.resize(count); sIStageAtTimeInterval()->getAttributeCounts( m_stageAtTimeInterval, primBucketList.m_primBucketListId, primBucketListIndex, count, outSizes.data()); outNames.resize(count); outTypes.resize(count); // Make array of pointers to inner arrays to allow us to call // getAttributeNamesAndTypes, which takes a C-style 2D array // not a std::vector<std::vector>. // Also set size of inner arrays std::vector<Token*> outNamesPtrs(count); std::vector<Type*> outTypesPtrs(count); for (size_t i = 0; i < count; ++i) { outNames[i].resize(outSizes[i]); outTypes[i].resize(outSizes[i]); outNamesPtrs[i] = outNames[i].data(); outTypesPtrs[i] = outTypes[i].data(); } sIStageAtTimeInterval()->getAttributeNamesAndTypes(m_stageAtTimeInterval, primBucketList.m_primBucketListId, primBucketListIndex, count, outSizes.data(), outNamesPtrs.data(), outTypesPtrs.data()); return { outNames, outTypes }; } inline StageAtTimeInterval::~StageAtTimeInterval() { sIStageAtTimeInterval()->destroy(m_stageAtTimeInterval); } inline void StageAtTimeInterval::exportUsd(UsdStageId usdStageId) const { auto iStageAtTimeInterval = carb::getCachedInterface<carb::flatcache::IStageAtTimeInterval>(); iStageAtTimeInterval->exportUsd(m_stageAtTimeInterval, usdStageId); } /** * @brief Linear interpolation for carb types Double3, Float3, Float4 (color) * See InterpolationUsd.h for extended type support * * @details This is intended to be used internally by StageAtTime read methods in order * to calculate values that were not written by StageInProgress directly. * * Enables the decoupling of the sim and render threads by allowing them access * to ringbuffer values at various frequencies. */ template <typename T> const T interpolate(const T& a, const T& b, float theta) { T result = T(a * (1.0f - theta)) + T(b * theta); return result; // T result = std::lerp(a, b, theta); } template <> inline const carb::Double3 interpolate(const carb::Double3& a, const carb::Double3& b, float theta) { if (theta < 0.0 || theta > 1.0) { CARB_LOG_WARN_ONCE("WrapperImpl interpolate(): theta %f outside range [0.0, 1.0]", theta); } carb::Double3 result; double tmp = 1.0 - theta; result.x = (a.x * tmp) + (b.x * theta); result.y = (a.y * tmp) + (b.y * theta); result.z = (a.z * tmp) + (b.z * theta); return result; } template <> inline const carb::Float3 interpolate(const carb::Float3& a, const carb::Float3& b, float theta) { if (theta < 0.0f || theta > 1.0f) { CARB_LOG_WARN_ONCE("WrapperImpl interpolate(): theta %f outside range [0.0, 1.0]", theta); } carb::Float3 result; float tmp = 1.0f - theta; result.x = (a.x * tmp) + (b.x * theta); result.y = (a.y * tmp) + (b.y * theta); result.z = (a.z * tmp) + (b.z * theta); return result; } template <> inline const carb::Float4 interpolate(const carb::Float4& a, const carb::Float4& b, float theta) { if (theta < 0.0f || theta > 1.0f) { CARB_LOG_WARN_ONCE("WrapperImpl interpolate(): theta %f outside range [0.0, 1.0]", theta); } carb::Float4 result; float tmp = 1.0f - theta; result.x = (a.x * tmp) + (b.x * theta); result.y = (a.y * tmp) + (b.y * theta); result.z = (a.z * tmp) + (b.z * theta); result.w = (a.w * tmp) + (b.w * theta); return result; } template <> inline const carb::flatcache::Token interpolate(const carb::flatcache::Token& a, const carb::flatcache::Token& b, float theta) { if (theta < 0.0f || theta > 1.0f) { CARB_LOG_WARN_ONCE("WrapperImpl interpolate(): theta %f outside range [0.0, 1.0]", theta); } return theta < 0.5f ? a : b; } // Auxiliary function used when handling data that is not going to be interpolated (bool, string, int, uint) // Returns pair of values from first and second sampled frame, or the value found and nullptr if data is only available // in one frame template <typename T> inline optional<std::pair<optional<T>,optional<T>>> StageAtTime::getNonInterpolatableAttributeRd(const Path& path, const Token& attrName) const { auto rawSamples = m_historyWindow.getAttributeRd<T>(path, attrName); std::vector<RationalTime> sampleTimes = m_historyWindow.getTimestamps(); if (rawSamples.size() != sampleTimes.size()) { return carb::cpp17::nullopt; } // checking that if the rawSamples are not empty, we have something valid in rawSamples[0] CARB_ASSERT(rawSamples.empty() || rawSamples[0]); // Communicate zero samples found if ( rawSamples.empty() || !rawSamples[0] ) { return carb::cpp17::nullopt; } if (rawSamples.size() == 1) { std::pair<carb::cpp17::optional<T>, carb::cpp17::optional<T>> result(*rawSamples[0], carb::cpp17::nullopt); return result; } else if ( (rawSamples.size() == 2) && rawSamples[1] ) { std::pair<carb::cpp17::optional<T>, carb::cpp17::optional<T>> result(*rawSamples[0], *rawSamples[1]); return result; } return carb::cpp17::nullopt; } inline uint64_t StageAtTimeInterval::writeCacheToDisk(const char* file, uint8_t* workingBuffer, uint64_t workingBufferSize) const { return sIStageAtTimeInterval()->writeCacheToDisk(m_stageAtTimeInterval, file, workingBuffer, workingBufferSize); } inline void StageAtTimeInterval::addRefCount() { return sIStageAtTimeInterval()->addRefCount(m_stageAtTimeInterval); } inline bool StageAtTimeInterval::removeRefCount() { return sIStageAtTimeInterval()->removeRefCount(m_stageAtTimeInterval); } inline unsigned int StageAtTimeInterval::getRefCount() { return sIStageAtTimeInterval()->getRefCount(m_stageAtTimeInterval); } // StageAtTime implementation starts here // This is defined here rather than in Carbonite plugin to allow use of templates and inlining inline ValidMirrors StageAtTime::getAttributeValidBits(const PathC& path, const TokenC& attrName) const { return m_historyWindow.getAttributeValidBits(path, attrName); } // The method reports interpolatable data types, and is specialized as optional<pair<optional<T>,optional<T> // in order to report non-interpolatable data types as encountered in either or both samples template <typename T> inline optional<T> StageAtTime::getAttributeRd(const Path& path, const Token& attrName) const { auto rawSamples = m_historyWindow.getAttributeRd<T>(path, attrName); // Communicate zero samples found if (rawSamples.size() == 0) { return carb::cpp17::nullopt; } // Linear interpolation supports at most two samples CARB_ASSERT(rawSamples.size() <= 2); if (rawSamples.size() == 1) { CARB_ASSERT(rawSamples[0]); return *rawSamples[0]; } else if (rawSamples.size() == 2) { CARB_ASSERT(rawSamples[0]); CARB_ASSERT(rawSamples[1]); // Calculate linear approximation of f(time) T a_f = *rawSamples[0]; T b_f = *rawSamples[1]; return interpolate(a_f, b_f, (float)m_theta); } return carb::cpp17::nullopt; } // The following functions are marked for deletion since the specified types cannot be interpolated // StageAtTime reports the non-interpolatable types read from Flatcache as a pair<optional<T>, optional<T>> template <> inline optional<bool> StageAtTime::getAttributeRd(const Path& path, const Token& attrName) const = delete; template <> inline optional<int> StageAtTime::getAttributeRd(const Path& path, const Token& attrName) const = delete; template <> inline optional<unsigned int> StageAtTime::getAttributeRd(const Path& path, const Token& attrName) const = delete; template <> inline optional<unsigned char> StageAtTime::getAttributeRd(const Path& path, const Token& attrName) const = delete; template <> inline optional<int64_t> StageAtTime::getAttributeRd(const Path& path, const Token& attrName) const = delete; template <> inline optional<uint64_t> StageAtTime::getAttributeRd(const Path& path, const Token& attrName) const = delete; template <> inline optional<carb::flatcache::Token> StageAtTime::getAttributeRd(const Path& path, const Token& attrName) const = delete; // Specialize StageAtTime::getAttributeRd for non-interpolatable types: bool, int, uint // In these cases the returned type will be a pair of values from the samples found, or nullopt otherwise template <> inline optional<std::pair<optional<bool>, optional<bool>>> StageAtTime::getAttributeRd(const Path& path, const Token& attrName) const { auto result = getNonInterpolatableAttributeRd<bool>(path, attrName); return result; } template <> inline optional<std::pair<optional<int>, optional<int>>> StageAtTime::getAttributeRd(const Path& path, const Token& attrName) const { return getNonInterpolatableAttributeRd<int>(path, attrName); } template <> inline optional<std::pair<optional<unsigned int>, optional<unsigned int>>> StageAtTime::getAttributeRd(const Path& path, const Token& attrName) const { return getNonInterpolatableAttributeRd<unsigned int>(path, attrName); } template <> inline optional<std::pair<optional<unsigned char>, optional<unsigned char>>> StageAtTime::getAttributeRd(const Path& path, const Token& attrName) const { return getNonInterpolatableAttributeRd<unsigned char>(path, attrName); } template <> inline optional<std::pair<optional<int64_t>, optional<int64_t>>> StageAtTime::getAttributeRd(const Path& path, const Token& attrName) const { return getNonInterpolatableAttributeRd<int64_t>(path, attrName); } template <> inline optional<std::pair<optional<uint64_t>, optional<uint64_t>>> StageAtTime::getAttributeRd(const Path& path, const Token& attrName) const { return getNonInterpolatableAttributeRd<uint64_t>(path, attrName); } template <> inline optional<std::pair<optional<carb::flatcache::Token>, optional<carb::flatcache::Token>>> StageAtTime::getAttributeRd( const Path& path, const Token& attrName) const { return getNonInterpolatableAttributeRd<carb::flatcache::Token>(path, attrName); } template <typename T> const T* StageAtTime::getAttributeRdGpu(const Path& path, const Token& attrName) const { auto rawSamples = m_historyWindow.getAttributeRdGpu<T>(path, attrName); std::vector<RationalTime> sampleTimes = m_historyWindow.getTimestamps(); CARB_ASSERT(rawSamples.size() == sampleTimes.size()); // This API doesn't have a way to communicate zero samples found CARB_ASSERT(rawSamples.size() != 0); // Linear interpolation supports at most two samples CARB_ASSERT(rawSamples.size() <= 2); if (rawSamples.size() == 1) { CARB_ASSERT(rawSamples[0]); return rawSamples[0]; } else if (rawSamples.size() == 2) { // For GPU types there is no support for interpolation yet // Return first sample value instead for now CARB_LOG_WARN_ONCE("Support for interpolation of array attributes is not supported yet, returning first time sample instead!"); CARB_ASSERT(rawSamples[0]); return rawSamples[0]; } return nullptr; } inline size_t StageAtTime::getArrayAttributeSize(const Path& path, const Token& attrName) const { auto rawSamples = m_historyWindow.getArrayAttributeSize(path, attrName); std::vector<RationalTime> sampleTimes = m_historyWindow.getTimestamps(); CARB_ASSERT(rawSamples.size() == sampleTimes.size()); // This API doesn't have a way to communicate zero samples found CARB_ASSERT(rawSamples.size() != 0); // Linear interpolation supports at most two samples CARB_ASSERT(rawSamples.size() <= 2); if (rawSamples.size() == 1) { return rawSamples[0]; } else if (rawSamples.size() == 2) { // For GPU types there is no support for interpolation yet // Return first sample value instead for now return rawSamples[0]; } return 0; } template <typename T> inline gsl::span<const T> StageAtTime::getArrayAttributeRd(const Path& path, const Token& attrName) { auto rawSamples = m_historyWindow.getArrayAttributeRd<T>(path, attrName); std::vector<RationalTime> sampleTimes = m_historyWindow.getTimestamps(); CARB_ASSERT(rawSamples.size() == sampleTimes.size()); // This API doesn't have a way to communicate zero samples found CARB_ASSERT(rawSamples.size() != 0); // Linear interpolation supports at most two samples CARB_ASSERT(rawSamples.size() <= 2); if (rawSamples.size() == 1) { return rawSamples[0]; } else if (rawSamples.size() == 2) { // For array types there is no support for interpolation yet // Return first sample value instead for now CARB_LOG_WARN_ONCE("Support for interpolation of array attributes is not supported yet, returning first time sample instead!"); return rawSamples[0]; } return gsl::span<const T>(); } /** * @brief Auxiliary function used by AttributeArrayResult<T> and AttributeArrayResult<std::vector<T>> * * @details Used to assess if a prim is present in both of the sampled frames */ inline bool checkPathCorrespondence(std::vector<gsl::span<const carb::flatcache::Path>> paths, size_t index, size_t& pos_f0, size_t& pos_f1) { if (paths.size() > 1) { // in the common case, the prim exists in both frames if ((index < paths[1].size()) && (paths[0][index] == paths[1][index])) { pos_f0 = pos_f1 = index; return true; } auto pathIt = std::find(paths[1].begin(), paths[1].end(), paths[0][index]); if (pathIt != paths[1].end()) { pos_f0 = index; // TODO: this isn't needed, can infer it pos_f1 = std::distance(paths[1].begin(), pathIt); return true; } } return false; } /** * @brief Returned by StageAtTime.getAttributeArrayRd * * @details Holds at most two samples (one from frame n, and one from frame n+1) * checkPathCorrespondence verifies if the path in frame n exists in frame n+1 * If no corresponding path exists, the value will be returned and not interpolated */ template <typename T> class AttributeArrayResult { public: size_t size() const { return m_samples[0].size(); } bool empty() const { return (size() == 0); } std::vector<gsl::span<const T>> const* data() const { return &m_samples; } std::vector<gsl::span<const T>>* data() { return &m_samples; } T operator[](const size_t valueIndex) const { { if (valueIndex >= m_samples[0].size() || m_samples[0].empty()) { CARB_LOG_WARN_ONCE("AttributeArrayResult[] out of bounds"); return T(); } if (m_samples.size() == 1) { return m_samples[0][valueIndex]; } else if (m_samples.size() == 2) { size_t pos0, pos1; if (checkPathCorrespondence(m_paths, valueIndex, pos0, pos1)) { T a = (m_samples[0][pos0]); T b = (m_samples[1][pos1]); T result = interpolate<T>(a, b, m_theta); return result; } return m_samples[0][valueIndex]; } } return T(); }; std::vector<gsl::span<const carb::flatcache::Path>> m_paths; std::vector<gsl::span<const T>> m_samples; float m_theta; }; /** * @brief Returned by StageAtTime.getArrayAttributeArrayRd * * @details Enables access to a vector of readily interpolated attribute values */ template <typename T> class AttributeArrayResult<std::vector<T>> { public: size_t size() const { return m_samples[0].size(); } bool empty() const { return (size() == 0); } std::vector<std::vector<gsl::span<const T>>> const* data() const { return m_samples; } std::vector<std::vector<gsl::span<const T>>>* data() { return m_samples; } std::vector<T> operator[](const size_t primIndex) { std::vector<T> interpolatedAttributeValues; if (m_samples.size() == 1) { interpolatedAttributeValues.resize(m_samples[0][primIndex].size()); std::copy(m_samples[0][primIndex].begin(), m_samples[0][primIndex].end(), interpolatedAttributeValues.begin()); return interpolatedAttributeValues; } else if (m_samples.size() == 2) { size_t pos0, pos1; if (checkPathCorrespondence(m_paths, primIndex, pos0, pos1)) { auto values_f0 = m_samples[0][primIndex]; auto values_f1 = m_samples[1][primIndex]; interpolatedAttributeValues.reserve(values_f0.size()); // interpolate attrib values for the requested {prim index : attrib val index} for (size_t valueIndex = 0; valueIndex < values_f0.size(); ++valueIndex) { T a = (values_f0[valueIndex]); T b = (values_f1[valueIndex]); T result = interpolate<T>(a, b, m_theta); interpolatedAttributeValues.emplace_back(result); } return interpolatedAttributeValues; } interpolatedAttributeValues.resize(m_samples[0][primIndex].size()); std::copy(m_samples[0][primIndex].begin(), m_samples[0][primIndex].end(), interpolatedAttributeValues.begin()); return interpolatedAttributeValues; } return std::vector<T>(); } std::vector<gsl::span<const carb::flatcache::Path>> m_paths; std::vector<std::vector<gsl::span<const T>>> m_samples; float m_theta; }; template <typename T> AttributeArrayResult<T> StageAtTime::getAttributeArrayRd(const PrimBucketList& primBucketList, size_t primBucketListIndex, const Token& attrName) const { size_t sampleCount = m_historyWindow.getTimeSampleCount(); if (sampleCount > 0) { AttributeArrayResult<T> arrAttRes; arrAttRes.m_samples = m_historyWindow.getAttributeArrayRd<T>(primBucketList, primBucketListIndex, attrName); arrAttRes.m_paths = m_historyWindow.getPathArray(primBucketList, primBucketListIndex); arrAttRes.m_theta = (float)m_theta; return arrAttRes; } else { CARB_LOG_WARN_ONCE( "getAttributeArrayRd %s: Data not available at time, possible dropped frame", attrName.getText()); return AttributeArrayResult<T>(); } } template <typename T> AttributeArrayResult<T> StageAtTime::getAttributeArrayRdGpu(const PrimBucketList& primBucketList, size_t primBucketListIndex, const Token& attrName) const { size_t sampleCount = m_historyWindow.getTimeSampleCount(); if (sampleCount > 0) { AttributeArrayResult<T> arrAttRes; arrAttRes.m_samples = m_historyWindow.getAttributeArrayRdGpu<T>(primBucketList, primBucketListIndex, attrName); arrAttRes.m_paths = m_historyWindow.getPathArray(primBucketList, primBucketListIndex); arrAttRes.m_theta = (float)m_theta; return arrAttRes; } else { CARB_LOG_WARN_ONCE( "getAttributeArrayRdGpu %s: Data not available at time, possible dropped frame", attrName.getText()); return AttributeArrayResult<T>(); } } inline std::vector<gsl::span<const char>> StageAtTime::getAttributeArrayRawRd(const PrimBucketList& primBucketList, size_t primBucketListIndex, const Token& attrName) const { return m_historyWindow.getAttributeArrayRawRd(primBucketList, primBucketListIndex, attrName); } template <typename T> AttributeArrayResult<std::vector<T>> StageAtTime::getArrayAttributeArrayRd( const PrimBucketList& primBucketList, size_t primBucketListIndex, const Token& attrName) const { size_t sampleCount = m_historyWindow.getTimeSampleCount(); AttributeArrayResult<std::vector<T>> result; if (sampleCount > 0) { result.m_samples = m_historyWindow.getArrayAttributeArrayRd<T>(primBucketList, primBucketListIndex, attrName); result.m_paths = m_historyWindow.getPathArray(primBucketList, primBucketListIndex); result.m_theta = (float)m_theta; return result; } else { CARB_LOG_WARN_ONCE( "getAttributeArrayRd %s: Data not available at time, possible dropped frame", attrName.getText()); return AttributeArrayResult<std::vector<T>>(); } } inline gsl::span<const Path> StageAtTime::getPathArray(const PrimBucketList& primBucketList, size_t primBucketListIndex) const { size_t sampleCount = m_historyWindow.getTimeSampleCount(); if (sampleCount == 1) { return m_historyWindow.getPathArray(primBucketList, primBucketListIndex)[0]; } else if (sampleCount == 0) { CARB_LOG_WARN_ONCE("getPathArray: Data not available at time, possible dropped frame"); return gsl::span<const Path>(); } else if (sampleCount == 2) { // TODO: make this correct when prims are being added and deleted // To do this we need to make a new array out: // out[i] = in0[i] , if in0[i] == in1[i] // = kUninitializedPath, otherwise return m_historyWindow.getPathArray(primBucketList, primBucketListIndex)[0]; #if 0 gsl::span<const Path> in0 = m_historyWindow.getPathArray(primBucketList, primBucketListIndex)[0]; gsl::span<const Path> in1 = m_historyWindow.getPathArray(primBucketList, primBucketListIndex)[1]; std::vector<Path> multiframePaths; for (size_t i = 0; i < in0.size(); ++i) in0[i] == in1[i] ? multiframePaths.emplace_back(in0[i]) : multiframePaths.emplace_back(flatcache::kUninitializedPath); return multiframePaths; #endif } return gsl::span<const Path>(); } inline std::vector<const Connection*> StageAtTime::getConnectionRd(const Path& path, const Token& connectionName) { return m_historyWindow.getConnectionRd(path, connectionName); } inline void StageAtTime::printBucketNames() const { m_historyWindow.printBucketNames(); } inline size_t StageAtTime::getAttributeCount(const PrimBucketList& primBucketList, size_t primBucketListIndex) const { std::vector<size_t> counts = m_historyWindow.getAttributeCounts(primBucketList, primBucketListIndex); if (counts.size() == 1) { return counts[0]; } // Perform a set intersection to get a valid count size; if (counts.size() == 2) { // // TODO: The attributes are internally sorted vectors, see flatcache::set. // Ideally we'd make a C-ABI type that makes it clear that these are sorted, // wrap with flatcache::set in the C++ wrapper and then use the standard library set intersection. // auto namesAndTypes = m_historyWindow.getAttributeNamesAndTypes(primBucketList, primBucketListIndex); const std::vector<std::vector<Token>>& names = namesAndTypes.first; const std::vector<std::vector<Type>>& types = namesAndTypes.second; std::vector<Token> intersection; // Perform a set intersection but we need to track the types as we intersect const std::vector<Token>& workingNames = names[0].size() < names[1].size() ? names[0] : names[1]; const std::vector<Type>& workingTypes = names[0].size() < names[1].size() ? types[0] : types[1]; const std::vector<Token>& testingNames = names[0].size() < names[1].size() ? names[1] : names[0]; const std::vector<Type>& testingTypes = names[0].size() < names[1].size() ? types[1] : types[0]; // Since attribute vectors are sorted we can track last spotted locations to be more efficient. size_t last = 0; for (size_t i = 0; i < workingNames.size(); ++i) { for (size_t j = last; j < testingNames.size(); ++j) { if (workingNames[i] == testingNames[j]) { if (workingTypes[i] == testingTypes[j]) { intersection.push_back(workingNames[i]); } // Store hit location to start next search last = j; break; } } } return intersection.size(); } return 0; } inline std::pair<std::vector<Token>, std::vector<Type>> StageAtTime::getAttributeNamesAndTypes( const PrimBucketList& primBucketList, size_t primBucketListIndex) const { std::vector<Token> outNames; std::vector<Type> outTypes; std::vector<std::vector<Token>> names; std::vector<std::vector<Type>> types; std::tie(names, types) = m_historyWindow.getAttributeNamesAndTypes(primBucketList, primBucketListIndex); if (names.size() == 1) { outNames = std::move(names[0]); outTypes = std::move(types[0]); } if (names.size() == 2) { // Assuming that the invariant that names and types of the same slot are the same count holds. outNames.reserve(std::min(names[0].size(), names[1].size())); outTypes.reserve(std::min(types[0].size(), types[1].size())); // Perform a set intersection but we need to track the types as we intersect std::vector<Token>& workingNames = names[0].size() < names[1].size() ? names[0] : names[1]; std::vector<Type>& workingTypes = names[0].size() < names[1].size() ? types[0] : types[1]; std::vector<Token>& testingNames = names[0].size() < names[1].size() ? names[1] : names[0]; std::vector<Type>& testingTypes = names[0].size() < names[1].size() ? types[1] : types[0]; // Since attribute vectors are sorted we can track last spotted locations to be more efficient. size_t last = 0; for (size_t i = 0; i < workingNames.size(); ++i) { for (size_t j = last; j < testingNames.size(); ++j) { if (workingNames[i] == testingNames[j]) { if (workingTypes[i] == testingTypes[j]) { outNames.push_back(workingNames[i]); outTypes.push_back(workingTypes[i]); } // Store hit location to start next search last = j; break; } } } } return { outNames, outTypes }; } inline uint64_t StageAtTime::writeCacheToDisk(const char* file, uint8_t* workingBuffer, uint64_t workingBufferSize) const { size_t sampleCount = m_historyWindow.getTimeSampleCount(); if (sampleCount != 1) { CARB_LOG_ERROR_ONCE("Can't call StageAtTime::WriteCacheToDisk for interpolated values"); return 0; } return m_historyWindow.writeCacheToDisk(file, workingBuffer, workingBufferSize); } inline void StageAtTime::addRefCount() { m_historyWindow.addRefCount(); } inline bool StageAtTime::removeRefCount() { return m_historyWindow.removeRefCount(); } inline unsigned int StageAtTime::getRefCount() { return m_historyWindow.getRefCount(); } // StageWithHistory implementation starts here inline StageWithHistory::StageWithHistory(UsdStageId usdStageId, size_t historyFrameCount, RationalTime simPeriod, bool withCuda) { auto iStageWithHistory = carb::getCachedInterface<carb::flatcache::IStageWithHistory>(); m_stageWithHistory = iStageWithHistory->create2(usdStageId, historyFrameCount, simPeriod, withCuda); m_usdStageId = usdStageId; } inline StageWithHistory::~StageWithHistory() { auto iStageWithHistory = carb::getCachedInterface<carb::flatcache::IStageWithHistory>(); iStageWithHistory->destroy(m_usdStageId); } inline ListenerId StageWithHistory::createListener() { auto iChangeTrackerConfig = carb::getCachedInterface<carb::flatcache::IStageWithHistory>(); ListenerId newId = iChangeTrackerConfig->createListener(); return newId; } // Templated methods do not get compiled unless they are instantiated. // The following code is not intended to be executed, it just instantiates each // templated method once to make sure that they compile. inline void instantiationTest(StageInProgress& stage, StageAtTimeInterval& stageAtInterval, StageAtTime& stageAtTime, const Path& path, const Token& attrName) { int* x0 = stage.getAttribute<int>(path, attrName); CARB_UNUSED(x0); const int* x1 = stage.getAttributeRd<int>(path, attrName); CARB_UNUSED(x1); int* x2 = stage.getAttributeWr<int>(path, attrName); CARB_UNUSED(x2); gsl::span<int> x3 = stage.getArrayAttribute<int>(path, attrName); CARB_UNUSED(x3); gsl::span<const int> x4 = stage.getArrayAttributeRd<int>(path, attrName); CARB_UNUSED(x4); gsl::span<int> x5 = stage.getArrayAttributeWr<int>(path, attrName); CARB_UNUSED(x5); PrimBucketList pbl = stage.findPrims({}, {}, {}); gsl::span<int> x6 = stage.getAttributeArray<int>(pbl, 0, attrName); CARB_UNUSED(x6); std::vector<const int*> x7 = stageAtInterval.getAttributeRd<int>(path, attrName); CARB_UNUSED(x7); std::vector<gsl::span<const int>> x8 = stageAtInterval.getAttributeArrayRd<int>(pbl, 0, attrName); CARB_UNUSED(x8); optional<float> x9 = stageAtTime.getAttributeRd<float>(path, attrName); CARB_UNUSED(x9); optional<std::pair<optional<int>, optional<int>>> x10 = stageAtTime.getAttributeRd <std::pair<optional<int>, optional<int>>>(path, attrName); CARB_UNUSED(x10); carb::flatcache::AttributeArrayResult<int> x11 = stageAtTime.getAttributeArrayRd<int>(pbl, 0, attrName); CARB_UNUSED(x11); carb::flatcache::AttributeArrayResult<std::vector<int>> x12 = stageAtTime.getArrayAttributeArrayRd<int>(pbl, 0, attrName); CARB_UNUSED(x12); } } // namespace flatcache } // namespace carb
omniverse-code/kit/fabric/include/carb/flatcache/IPath.h
// Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <carb/Framework.h> #include <carb/Interface.h> #include <carb/flatcache/IToken.h> #include <carb/flatcache/Intrinsics.h> #include <functional> // Set to empty macro when IPath::iPath static member is removed #define FLATCACHE_IPATH_INIT \ const carb::flatcache::IPath* carb::flatcache::Path::iPath = nullptr; namespace carb { namespace flatcache { // PathC are integer keys that identify paths to C-ABI interfaces struct PathC { uint64_t path; constexpr bool operator<(const PathC& other) const { return path < other.path; } constexpr bool operator==(const PathC& other) const { return path == other.path; } constexpr bool operator!=(const PathC& other) const { return path != other.path; } }; static_assert(std::is_standard_layout<PathC>::value, "Struct must be standard layout as it is used in C-ABI interfaces"); // We don't reference count the uninitialized (or empty) path, and we use // this fact to avoid unnecessary dll calls to addRef()/removeRef(), for // example during std::vector resize. To do this we need to check whether a // path is uninitialized without the dll call getEmptyPath(), so we store // its value here in a constant. // We run automated test "IPath::getEmptyPath() dll call can be replaced with // constant, Path::kUninitializedPath" to ensure that this constant never // changes. static constexpr PathC kUninitializedPath{0}; // C-ABI interface to pxr::SdfPath struct IPath { CARB_PLUGIN_INTERFACE("carb::flatcache::IPath", 0, 1); PathC (*getHandle)(const char* name); const char* (*getText)(PathC handle); PathC (*getParent)(PathC handle); PathC (*appendChild)(PathC handle, TokenC childName); void (*addRef)(PathC handle); void (*removeRef)(PathC handle); PathC (*getEmptyPath)(); // Creates a path by appending a given relative path to this path. PathC (*appendPath)(PathC handle, PathC path); // Returns the number of path elements in this path. uint32_t (*getPathElementCount)(PathC handle); }; // C++ wrapper for IPath class Path { static carb::flatcache::IPath& sIPath(); public: // DEPRECATED: keeping for binary compatibility // Will be removed in October 2021 - @TODO set FLATCACHE_IPATH_INIT to empty macro when removed! // Still safe to use if initialized in a given dll static const carb::flatcache::IPath* iPath; Path() : mHandle(kUninitializedPath) { } Path(const char* path) { mHandle = sIPath().getHandle(path); } // Needs to be noexcept for std::vector::resize() to move instead of copy ~Path() noexcept { // We see the compiler construct and destruct many uninitialized // temporaries, for example when resizing std::vector. // We don't want to do an IPath dll call for these, so skip if handle // is uninitialized. if (mHandle != kUninitializedPath) { sIPath().removeRef(mHandle); } } // Copy constructor Path(const Path& other) : mHandle(other.mHandle) { if (mHandle != kUninitializedPath) { sIPath().addRef(mHandle); } } // Copy construct from integer Path(PathC handle) : mHandle(handle) { if (mHandle != kUninitializedPath) { sIPath().addRef(mHandle); } } // Move constructor // Needs to be noexcept for std::vector::resize() to move instead of copy Path(Path&& other) noexcept { // We are moving the src handle so don't need to change its refcount mHandle = other.mHandle; // Make source invalid other.mHandle = kUninitializedPath; } // Copy assignment Path& operator=(const Path& other) { if (this != &other) { if (mHandle != kUninitializedPath) { sIPath().removeRef(mHandle); } if (other.mHandle != kUninitializedPath) { sIPath().addRef(other.mHandle); } } mHandle = other.mHandle; return *this; } // Move assignment Path& operator=(Path&& other) noexcept { if (&other == this) return *this; // We are about to overwrite the dest handle, so decrease its refcount if (mHandle != kUninitializedPath) { sIPath().removeRef(mHandle); } // We are moving the src handle so don't need to change its refcount mHandle = other.mHandle; other.mHandle = kUninitializedPath; return *this; } const char* getText() const { return sIPath().getText(mHandle); } constexpr bool operator<(const Path& other) const { return mHandle < other.mHandle; } constexpr bool operator!=(const Path& other) const { return mHandle != other.mHandle; } constexpr bool operator==(const Path& other) const { return mHandle == other.mHandle; } constexpr operator PathC() const { return mHandle; } private: PathC mHandle; }; static_assert(std::is_standard_layout<Path>::value, "Path must be standard layout as it is used in C-ABI interfaces"); #ifndef __CUDACC__ inline carb::flatcache::IPath& Path::sIPath() { // Acquire carbonite interface on first use carb::flatcache::IPath* iPath = carb::getCachedInterface<carb::flatcache::IPath>(); CARB_ASSERT(iPath); return *iPath; } #endif // __CUDACC__ } } namespace std { template <> class hash<carb::flatcache::PathC> { public: inline size_t operator()(const carb::flatcache::PathC& key) const { // lower 8 bits have no entropy, so just remove the useless bits return key.path >> 8; } }; template <> class hash<carb::flatcache::Path> { public: inline size_t operator()(const carb::flatcache::Path& key) const { return std::hash<carb::flatcache::PathC>()(carb::flatcache::PathC(key)); } }; }
omniverse-code/kit/fabric/include/carb/flatcache/FlatCacheUSD.h
// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <carb/flatcache/IPath.h> #include <carb/logging/Log.h> #include <pxr/base/tf/token.h> #include <pxr/usd/sdf/path.h> namespace carb { namespace flatcache { // asInt() is the same as SdfPath::_AsInt() // Flatcache relies on asInt(a)==asInt(b) <=> a is same path as b, // which is how SdfPath::operator== is currently defined. // If USD changes sizeof(pxr::SdfPath), we will need to change PathC to make it // the same size. inline PathC asInt(const pxr::SdfPath& path) { static_assert(sizeof(pxr::SdfPath) == sizeof(PathC), "Change PathC to make the same size as pxr::SdfPath"); PathC ret; std::memcpy(&ret, &path, sizeof(pxr::SdfPath)); return ret; } inline const PathC* asInt(const pxr::SdfPath* path) { static_assert(sizeof(pxr::SdfPath) == sizeof(PathC), "Change PathC to make the same size as pxr::SdfPath"); return reinterpret_cast<const PathC*>(path); } inline TokenC asInt(const pxr::TfToken& token) { static_assert(sizeof(pxr::TfToken) == sizeof(TokenC), "Change TokenC to make the same size as pxr::TfToken"); TokenC ret; std::memcpy(&ret, &token, sizeof(pxr::TfToken)); return ret; } inline const TokenC* asInt(const pxr::TfToken* token) { static_assert(sizeof(pxr::TfToken) == sizeof(TokenC), "Change TokenC to make the same size as pxr::TfToken"); return reinterpret_cast<const TokenC*>(token); } // Return reference to ensure that reference count doesn't change inline const pxr::TfToken& intToToken(const TokenC& token) { static_assert(sizeof(pxr::TfToken) == sizeof(TokenC), "Change TokenC to make the same size as pxr::TfToken"); return reinterpret_cast<const pxr::TfToken&>(token); } inline const pxr::SdfPath& intToPath(const PathC& path) { static_assert(sizeof(pxr::SdfPath) == sizeof(PathC), "Change PathC to make the same size as pxr::SdfPath"); return reinterpret_cast<const pxr::SdfPath&>(path); } inline const pxr::SdfPath* intToPath(const Path* path) { static_assert(sizeof(pxr::SdfPath) == sizeof(Path), "Change Path to make the same size as pxr::SdfPath"); return reinterpret_cast<const pxr::SdfPath*>(path); } } }
omniverse-code/kit/fabric/include/carb/flatcache/PrimChanges.h
// Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <carb/flatcache/AttrNameAndType.h> #include <carb/flatcache/IFlatcache.h> #include <carb/flatcache/IPath.h> #include <gsl/span> #include <cstddef> namespace carb { namespace flatcache { struct AttrAndChangedIndices { AttrNameAndType attr; // For which prims did this attribute change? bool allIndicesChanged; gsl::span<const size_t> changedIndices; }; struct BucketChanges { // For each attribute, which prims changed? std::vector<AttrAndChangedIndices> attrChangedIndices; gsl::span<const Path> pathArray; BucketChanges() = default; BucketChanges(BucketChangesC in) : pathArray({ in.pathArray.ptr,in.pathArray.elementCount }) { size_t count = in.changedIndices.elementCount; attrChangedIndices.resize(count); for (size_t i = 0; i != count; i++) { const ConstChangedIndicesC& inAttrChanges = in.changedIndices.ptr[i]; attrChangedIndices[i].attr = in.changedAttributes.ptr[i]; attrChangedIndices[i].allIndicesChanged = inAttrChanges.allIndicesChanged; attrChangedIndices[i].changedIndices = gsl::span<const size_t>(inAttrChanges.changedIndices.ptr, inAttrChanges.changedIndices.elementCount); } } }; class AddedPrimIndices { // Which prims were added? gsl::span<const size_t> addedIndices; public: AddedPrimIndices(AddedPrimIndicesC in) { addedIndices = gsl::span<const size_t>(in.addedIndices.ptr, in.addedIndices.elementCount); } size_t size() const { return addedIndices.size(); } // This iterator first iterates over the deletedElements that were replaced // by new elements, then the contiguous range of elements added at the end // of the bucket struct iterator { using iterator_category = std::input_iterator_tag; using difference_type = size_t; using value_type = size_t; using reference = size_t; iterator( gsl::span<const size_t>::iterator _addedIndicesIterator, gsl::span<const size_t>::iterator _addedIndicesEnd) : addedIndicesIterator(_addedIndicesIterator), addedIndicesEnd(_addedIndicesEnd) {} reference operator*() const { return *addedIndicesIterator; } iterator& operator++() { addedIndicesIterator++; return *this; } bool operator==(iterator other) const { return addedIndicesIterator == other.addedIndicesIterator; } bool operator!=(iterator other) const { return !(*this == other); } difference_type operator-(iterator other) { return addedIndicesIterator - other.addedIndicesIterator; } private: gsl::span<const size_t>::iterator addedIndicesIterator; gsl::span<const size_t>::iterator addedIndicesEnd; }; iterator begin() { return iterator(addedIndices.begin(), addedIndices.end()); } iterator end() { return iterator(addedIndices.end(), addedIndices.end()); } }; } }
omniverse-code/kit/fabric/include/carb/flatcache/Intrinsics.h
// Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <cstdint> #include <cstdlib> #include <cstddef> #include <carb/flatcache/Defines.h> #if USING( WINDOWS_BUILD ) #include <intrin.h> #elif USING( LINUX_BUILD ) // #if USING( WINDOWS_BUILD ) // no linux-specific includes at this time #else // #if USING( WINDOWS_BUILD ) #error "Unsupported platform" #endif // #if USING( WINDOWS_BUILD ) namespace carb { namespace flatcache { inline uint32_t clz32( const uint32_t x ) { #if USING( WINDOWS_BUILD ) DWORD z; return _BitScanReverse( &z, x ) ? 31 - z : 32; #elif USING( LINUX_BUILD ) // #if USING( WINDOWS_BUILD ) return x ? __builtin_clz( x ) : 32; #else // #if USING( WINDOWS_BUILD ) #error "Unsupported platform" #endif // #if USING( WINDOWS_BUILD ) } inline uint32_t clz64( const uint64_t x ) { #if USING( WINDOWS_BUILD ) DWORD z; return _BitScanReverse64( &z, x ) ? 63 - z : 64; #elif USING( LINUX_BUILD ) // #if USING( WINDOWS_BUILD ) return x ? __builtin_clzll( x ) : 64; #else // #if USING( WINDOWS_BUILD ) #error "Unsupported platform" #endif // #if USING( WINDOWS_BUILD ) } inline uint32_t ctz32( const uint32_t x ) { #if USING( WINDOWS_BUILD ) DWORD z; return _BitScanForward( &z, x ) ? z : 32; #elif USING( LINUX_BUILD ) // #if USING( WINDOWS_BUILD ) return x ? __builtin_ctz( x ) : 32; #else // #if USING( WINDOWS_BUILD ) #error "Unsupported platform" #endif // #if USING( WINDOWS_BUILD ) } inline uint32_t ctz64( const uint64_t x ) { #if USING( WINDOWS_BUILD ) DWORD z; return _BitScanForward64( &z, x ) ? z : 64; #elif USING( LINUX_BUILD ) // #if USING( WINDOWS_BUILD ) return x ? __builtin_ctzll( x ) : 64; #else // #if USING( WINDOWS_BUILD ) #error "Unsupported platform" #endif // #if USING( WINDOWS_BUILD ) } inline uint64_t bswap64( const uint64_t x ) { #if USING( WINDOWS_BUILD ) return _byteswap_uint64( x ); #elif USING( LINUX_BUILD ) // #if USING( WINDOWS_BUILD ) return __builtin_bswap64 ( x ); #else // #if USING( WINDOWS_BUILD ) #error "Unsupported platform" #endif // #if USING( WINDOWS_BUILD ) } inline uint64_t rotr64( const uint64_t value, const int shift ) { #if USING( WINDOWS_BUILD ) return _rotr64( value, shift ); #elif USING( LINUX_BUILD ) // #if USING( WINDOWS_BUILD ) return (value >> shift) | (value << (64 - shift)); #else // #if USING( WINDOWS_BUILD ) #error "Unsupported platform" #endif // #if USING( WINDOWS_BUILD ) } inline uint64_t rotl64( const uint64_t value, const int shift ) { #if USING( WINDOWS_BUILD ) return _rotl64( value, shift ); #elif USING( LINUX_BUILD ) // #if USING( WINDOWS_BUILD ) return (value << shift) | (value >> (64 - shift)); #else // #if USING( WINDOWS_BUILD ) #error "Unsupported platform" #endif // #if USING( WINDOWS_BUILD ) } } // namespace flatcache } // namespace carb
omniverse-code/kit/fabric/include/carb/flatcache/FlatCache.h
// Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <carb/Interface.h> #include <carb/flatcache/PathToAttributesMap.h> namespace carb { namespace flatcache { // Callers of createCache() and getCache() can store anything they want in // UserId. For example, OmniGraph uses it to store the OmniGraph pointer. struct UserId { uint64_t id; bool operator<(UserId b) const { return id < b.id; } bool operator==(UserId b) const { return id == b.id; } bool operator!=(UserId b) const { return id != b.id; } }; constexpr UserId kDefaultUserId = { 0 }; constexpr UserId kInvalidUserId = { ~uint64_t(0) }; // Flatcache has the option to save a finite number of frames of history, // organized as a ringbuffer. This is typically used to buffer data between // simulation rendering. The simplest case, double buffering, allows simulation // and rendering to run in parallel, each running for the full frame. // Longer buffers can be used to feed one or more renderers running at // different rates to simulation. // To enable this feature, pass CacheType::eWithHistory to createCache(), // otherwise pass CacheType::eWithoutHistory. // Multiple caches can be created for each UsdStageId, but at most one can have // history. enum class CacheType { eWithHistory, eWithoutHistory, eWithoutHistoryAndWithCuda, eWithHistoryAndCuda }; struct FlatCache { CARB_PLUGIN_INTERFACE("carb::flatcache::FlatCache", 0, 4); // Abstractly, a flatcache maps USD paths to USD attributes, just like a // UsdStage does. // Concretely we represent a flatcache by objects of type PathToAttributesMap. // This method creates a PathToAttributesMap for a given stage, but doesn't // populate it with values. This allows the cache to be filled lazily as // values are needed. // Instead, it traverses the given Usd stage making an index of paths to // attributes. // The cache uses the index to organize data into contiguous arrays, // and also allows you to find prims by type and/or attribute without // traversing the stage. // This method also specifies the stage to be used by calls to usdToCache() // and cacheToUsd(). PathToAttributesMap&(CARB_ABI* createCache)(UsdStageId usdStageId, UserId userId, CacheType cacheType); void(CARB_ABI* addPrimToCache)(PathToAttributesMap& cache, const pxr::UsdPrim& prim, const std::set<TokenC>& filter); // Destroy the cache associated with the given stage. void(CARB_ABI* destroyCache)(UsdStageId usdStageId, UserId userId); // Prefetch the whole USD stage to the cache // Typically you only call this at stage load time, because the USD notify // handler updates the cache if the stage changes. void(CARB_ABI* usdToCache)(PathToAttributesMap& cache); // Write back all dirty cached data to the USD stage. // If your renderer doesn't use the cache then you need to do this // before rendering. void(CARB_ABI* cacheToUsd)(PathToAttributesMap& cache); // Write back only one bucket to usd void(CARB_ABI* cacheBucketToUsd)(PathToAttributesMap& cache, BucketId bucketId, bool skipMeshPoints); TypeC(CARB_ABI* usdTypeToTypeC)(pxr::SdfValueTypeName usdType); PathToAttributesMap*(CARB_ABI* getCache)(UsdStageId usdStageId, UserId userId); pxr::SdfValueTypeName(CARB_ABI* typeCtoUsdType)(TypeC typeC); size_t(CARB_ABI* getUsdTypeCount)(); void(CARB_ABI* getAllUsdTypes)(TypeC* outArray, size_t outArraySize); /** @brief Import a prim in cache */ void(CARB_ABI* addPrimToCacheNoOverwrite)(PathToAttributesMap& cache, const pxr::UsdPrim& prim, const std::set<TokenC>& filter); void(CARB_ABI* initStaticVariables)(); void(CARB_ABI* exportUsd)(PathToAttributesMap& cache, pxr::UsdStageRefPtr usdStage, const double* timeCode, const double* prevTimeCode); /** @brief Attempt to serialize the cache into the specified buffer. * * @cache[in] The cache to serialize * @dest[in/out] Pointer to buffer to be written to, will start writing to head * of pointer. dest will be left pointing to the point after the last write * @destSize Size of buffer that was allocated for the data (in bytes) * @pathStringCache - looking up strings is slow, yo * * @return Number of bytes written success is determined by (return <= @destSize) * * * @invariant It is safe to write to any memory within[dest, dest+size] for the * duration of the function call. * * @note If the cache will not fit into the size of memory allocated in * @dest then it will stop writing, but continue to run the serialize * algorithm to calculate the actual amount of data that needs to be * written * * @Todo : make cache const - not possible because serializeMirroredAray is not * const, however, that is because getArraySpanC is used which also doesn't * have a const version, so that needs to be addressed first, this is because * in the call stack we end up with a copy from GPU -> CPU which would need to * be avoided */ uint64_t(CARB_ABI* serializeCache)(PathToAttributesMap& cache, uint8_t* dest, size_t destSize, SerializationCache& pathStringCache); /** @brief Given a buffer that has the serialized version of a cache written * using the serialize function, this function will override all the data * in the cache with the data from the buffer * * @cache[in/out] Reference to the cache to be populated * @pathCache[in/out] Looking up SDFPath via string can be expensive to it * is worthwhile to cache this data across many repeated * calls. * @input[in] Pointer to buffer of data containing serialized cache * @inputSize[in] Size of data in the buffer * @skipStageConfirmation[in] Whether we should skip making sure the destination stage is open. * * @return True if buffer was successfully de-serialized * * @note : this currently has to clear the cache before it is populated which is a possibly * expensive operation * * @TODO: whould we care that it came from the same version of the USD file? */ bool(CARB_ABI* deserializeCache)( PathToAttributesMap& destStage, DeserializationCache& pathCache, const uint8_t* input, const size_t inputSize, bool skipStageConfirmation); /** @brief Write a cache file to disk at a specified location * * @note many parameters to this function are optional * @cache[in] That cache to be written to disk * @file[in The location the file is desired to be written to * @workingBuffer[in] [Optional] In order to avoid costly reallocations * the code will attempt to re-use the memory at the buffer * location if it is large enough. If the buffer isn't larg * enough the cost of allocation, and re-traversal may be paid * @workingBufferSize[in] [Optional] If workingBuffer is non null, then this desrcibes the length * of the buffer * @return The amount of data needed to serialize the cache, a return value of 0 indicates an error * */ uint64_t(CARB_ABI* writeCacheToDisk)(PathToAttributesMap& cache, const char* file, uint8_t* workingBuffer, uint64_t workingBufferSize); /** @brief Read a cache file from the specified location * * @file[in] The location the file is desired to be written to * @cache[in/out] That cache to be populates * @pathCache[in/out] Looking up SDFPath via string can be expensive to it * is worthwhile to cache this data across many repeated * calls. * @buffer[in/out] Buffer to use to read the cache file in to, passed to * allow reuse than allocate per call. Will be resized if not large enough. * @return Whether the read was successful * */ bool(CARB_ABI* readCacheFromDisk)(PathToAttributesMap& cache, const char* fileName, DeserializationCache& pathCache, std::vector<uint8_t>& buffer); /** @brief Enable/Disable change notifications on USD changes. * * @enable[in] True/False enable notifications * */ void(CARB_ABI* setEnableChangeNotifies)(bool enable); /** @brief Return whether change notifications on USD changes is enabled. * * @return True if change notifications on USD changes is enabled, else False. * */ bool(CARB_ABI* getEnableChangeNotifies)(); /** @brief make buckets for all prims on a USD stage, but only if this * hasn't been done before. * * This is used to lazily create an index of all prims on a stage, without * the time or memory cost of fetching all the attribute values. The user * can then use findPrims to, for example, find all the prims of a * particular type. * * If a SimStageWithHistory hasn't been created for this stage then a * warning will be printed and no population will be done. * * @cache[in] The PathToAttributesMap to populate */ void(CARB_ABI* minimalPopulateIfNecessary)(PathToAttributesMap& cache); }; } }
omniverse-code/kit/fabric/include/carb/flatcache/Allocator.h
#pragma once #include <cmath> #include <carb/logging/Log.h> #include <carb/Defines.h> #include <carb/flatcache/Defines.h> #include <carb/flatcache/Intrinsics.h> #define ALLOCATOR_HEADER USE_IF( USING( DEVELOPMENT_BUILD ) ) #define ALLOCATOR_STATS USE_IF( USING( ALLOCATOR_HEADER ) ) // requires Header's byte tracking per-allocation #define ALLOCATOR_LEAK_CHECK USE_IF( USING( ALLOCATOR_HEADER ) ) // requires Header's byte tracking per-allocation namespace carb { namespace flatcache { inline const char* humanReadableSize( const uint64_t bytes ) noexcept { auto va = [](auto ...params) -> const char* { static char tmp[1024]; #ifdef _WIN32 _snprintf_s(tmp, sizeof(tmp), params...); #else snprintf(tmp, sizeof(tmp), params...); #endif return (const char*)&tmp; }; constexpr const char SIZE_UNITS[64][3]{ " B", " B", " B", " B", " B", " B", " B", " B", " B", " B", "KB", "KB", "KB", "KB", "KB", "KB", "KB", "KB", "KB", "KB", "MB", "MB", "MB", "MB", "MB", "MB", "MB", "MB", "MB", "MB", "GB", "GB", "GB", "GB", "GB", "GB", "GB", "GB", "GB", "GB", "TB", "TB", "TB", "TB", "TB", "TB", "TB", "TB", "TB", "TB", "PB", "PB", "PB", "PB", "PB", "PB", "PB", "PB", "PB", "PB", "EB", "EB", "EB", "EB" }; static constexpr size_t B = 1ull; static constexpr size_t KB = 1024ull; static constexpr size_t MB = (1024ull*1024ull); static constexpr size_t GB = (1024ull*1024ull*1024ull); static constexpr size_t TB = (1024ull*1024ull*1024ull*1024ull); static constexpr size_t PB = (1024ull*1024ull*1024ull*1024ull*1024ull); static constexpr size_t EB = (1024ull*1024ull*1024ull*1024ull*1024ull*1024ull); constexpr const size_t SIZE_BASE[64]{ B, B, B, B, B, B, B, B, B, B, KB, KB, KB, KB, KB, KB, KB, KB, KB, KB, MB, MB, MB, MB, MB, MB, MB, MB, MB, MB, GB, GB, GB, GB, GB, GB, GB, GB, GB, GB, TB, TB, TB, TB, TB, TB, TB, TB, TB, TB, PB, PB, PB, PB, PB, PB, PB, PB, PB, PB, EB, EB, EB, EB }; const uint32_t power = bytes ? ( 64u - clz64( bytes ) ) - 1u : 0; const char *const units = SIZE_UNITS[power]; const size_t base = SIZE_BASE[power]; const size_t count = bytes / base; return va("%zu %s", count, units); } // A wrapper around malloc/free that aims to: // // * Cheaply track allocation counts and bytes, and detect leaks automatically at ~Allocator() // // * Cheaply track usage in terms of peak memory usage, and total lifetime usage broken down by size. Sample output: // dumped to console appears like so: // // == Allocator 0x000000E67BEFCEA0 Stats == // allocCount: 0 // allocBytes: 0 B // peakAllocCount: 4002 // peakAllocBytes: 4 GB // minAllocBytes: 312 B // maxAllocBytes: 6 MB // // Lifetime Allocation Histogram: // Normalized over TOTAL allocations: 13956 // < 512 B|***** 29% 4002 // < 1 KB| 0% 0 // < 2 KB| 0% 0 // < 4 KB| 0% 0 // < 8 KB|*** 14% 2000 // < 16 KB| 0% 0 // < 32 KB|*** 14% 1994 // < 64 KB| 0% 0 // < 128 KB|*** 14% 1976 // < 256 KB| 0% 0 // < 512 KB|*** 14% 1904 // < 1 MB| 0% 0 // < 2 MB|** 12% 1616 // < 4 MB| 0% 0 // < 8 MB|* 3% 464 // ======================== struct Allocator { Allocator(); ~Allocator(); void* alloc(const size_t bytes); void free(void *const ptr); template<typename T, typename ...Params> T* new_(Params&& ...params); template<typename T> void delete_(T*const t); void resetStats() noexcept; void reportUsage() noexcept; bool checkLeaks() noexcept; private: #if USING( ALLOCATOR_HEADER ) struct BlockHeader { size_t bytes; }; #endif // #if USING( ALLOCATOR_HEADER ) #if USING( ALLOCATOR_STATS ) || USING( ALLOCATOR_LEAK_CHECK ) size_t allocCount; size_t allocBytes; #endif // #if USING( ALLOCATOR_STATS ) || USING( ALLOCATOR_LEAK_CHECK ) #if USING( ALLOCATOR_STATS ) size_t peakAllocCount; size_t peakAllocBytes; size_t minAllocBytes; size_t maxAllocBytes; static constexpr size_t ALLOC_BUCKET_COUNT = 65; size_t lifetimeAllocCount; size_t lifetimeAllocBuckets[ALLOC_BUCKET_COUNT]; #endif // #if USING( ALLOCATOR_STATS ) }; struct AllocFunctor { Allocator *allocator; void* operator()(const size_t bytes) { CARB_ASSERT(allocator); return allocator->alloc(bytes); } }; struct FreeFunctor { Allocator *allocator; void operator()(void *const ptr) { CARB_ASSERT(allocator); return allocator->free(ptr); } }; inline Allocator::Allocator() { #if USING( ALLOCATOR_STATS ) || USING( ALLOCATOR_LEAK_CHECK ) allocCount = 0; allocBytes = 0; #endif // #if USING( ALLOCATOR_STATS ) || USING( ALLOCATOR_LEAK_CHECK ) resetStats(); } inline Allocator::~Allocator() { checkLeaks(); reportUsage(); } inline void* Allocator::alloc(const size_t bytes) { #if USING( ALLOCATOR_HEADER ) const size_t totalBytes = bytes + sizeof(BlockHeader); #endif // #if USING( ALLOCATOR_HEADER ) #if USING( ALLOCATOR_STATS ) || USING( ALLOCATOR_LEAK_CHECK ) CARB_ASSERT((allocCount + 1) > allocCount); CARB_ASSERT((allocBytes + totalBytes) > allocBytes); ++allocCount; allocBytes += totalBytes; #endif // #if USING( ALLOCATOR_STATS ) || USING( ALLOCATOR_LEAK_CHECK ) #if USING( ALLOCATOR_STATS ) if ( allocBytes > peakAllocBytes ) { peakAllocBytes = allocBytes; peakAllocCount = allocCount; } if ( totalBytes < minAllocBytes ) { minAllocBytes = totalBytes; } if ( totalBytes > maxAllocBytes ) { maxAllocBytes = totalBytes; } const uint32_t bucket = ( 64u - clz64( totalBytes - 1ull ) ); CARB_ASSERT(lifetimeAllocBuckets[bucket] + 1 > lifetimeAllocBuckets[bucket]); ++lifetimeAllocBuckets[bucket]; ++lifetimeAllocCount; #endif // #if USING( ALLOCATOR_STATS ) #if USING( ALLOCATOR_HEADER ) BlockHeader *const header = (BlockHeader*)malloc(totalBytes); CARB_ASSERT(header); header->bytes = totalBytes; return header+1; #else // #if USING( ALLOCATOR_HEADER ) return malloc(bytes); #endif // #if USING( ALLOCATOR_STATS ) } inline void Allocator::free(void *const ptr) { #if USING( ALLOCATOR_HEADER ) CARB_ASSERT(ptr); BlockHeader *header = (BlockHeader*)ptr; --header; #if USING( ALLOCATOR_STATS ) || USING( ALLOCATOR_LEAK_CHECK ) const size_t totalBytes = header->bytes; CARB_ASSERT((allocCount - 1) < allocCount); CARB_ASSERT((allocBytes - totalBytes) < allocBytes); --allocCount; allocBytes -= totalBytes; #endif // #if USING( ALLOCATOR_STATS ) || USING( ALLOCATOR_LEAK_CHECK ) ::free(header); #else // #if USING( ALLOCATOR_STATS ) ::free(ptr); #endif // #if USING( ALLOCATOR_STATS ) } template<typename T, typename ...Params> inline T* Allocator::new_(Params&& ...params) { T *const t = (T*)Allocator::alloc(sizeof(T)); new (t) T(std::forward<Params>(params)...); return t; } template<typename T> inline void Allocator::delete_(T*const t) { CARB_ASSERT(t); t->~T(); #if USING( ALLOCATOR_HEADER ) BlockHeader *header = (BlockHeader*)t; header--; CARB_ASSERT(header->bytes == (sizeof(BlockHeader) + sizeof(T))); #endif // #if USING( ALLOCATOR_HEADER ) Allocator::free(t); } inline void Allocator::resetStats() noexcept { #if USING( ALLOCATOR_STATS ) peakAllocCount = 0; peakAllocBytes = 0; minAllocBytes = SIZE_MAX; maxAllocBytes = 0; lifetimeAllocCount = 0; for ( size_t i = 0; i < ALLOC_BUCKET_COUNT; ++i ) { lifetimeAllocBuckets[i] = 0; } #endif // #if USING( ALLOCATOR_STATS ) } inline void Allocator::reportUsage() noexcept { #if USING( ALLOCATOR_STATS ) CARB_LOG_INFO("== Allocator 0x%p Stats ==", this); if (!lifetimeAllocCount) { CARB_LOG_INFO("<no stats to report; unused allocator>"); CARB_LOG_INFO("========================"); return; } CARB_LOG_INFO("allocCount: %12zu", allocCount); CARB_LOG_INFO("allocBytes: %15s", humanReadableSize(allocBytes)); CARB_LOG_INFO("peakAllocCount: %12zu", peakAllocCount); CARB_LOG_INFO("peakAllocBytes: %15s", humanReadableSize(peakAllocBytes)); CARB_LOG_INFO("minAllocBytes: %15s", humanReadableSize(minAllocBytes)); CARB_LOG_INFO("maxAllocBytes: %15s", humanReadableSize(maxAllocBytes)); CARB_LOG_INFO(""); CARB_LOG_INFO("Lifetime Allocation Histogram:"); size_t begin = 0; for ( ; begin < ALLOC_BUCKET_COUNT; ++begin ) { if ( lifetimeAllocBuckets[begin] ) { break; } } size_t end = 0; for ( ; end < ALLOC_BUCKET_COUNT; ++end ) { if ( lifetimeAllocBuckets[ALLOC_BUCKET_COUNT - end - 1] ) { end = ALLOC_BUCKET_COUNT - end; break; } } CARB_LOG_INFO(" Normalized over TOTAL allocations: %zu", lifetimeAllocCount); size_t i; float normalized[ALLOC_BUCKET_COUNT]; for ( i = begin; i < end; ++i ) { normalized[i] = (float)lifetimeAllocBuckets[i] / (float)lifetimeAllocCount; } constexpr size_t WIDTH = 16; for ( i = begin; i < end; ++i ) { char buf[WIDTH+1] = {}; const size_t w = ( size_t )std::ceil(WIDTH * normalized[i]); for( size_t j = 0; j < w; ++j) { buf[j] = '*'; } static_assert(WIDTH == 16, "Fix CARB_LOG_INFO below"); CARB_LOG_INFO(" <%7s|%-16s %3.0f%% %12zu", humanReadableSize(1ull<<i), buf, (normalized[i] * 100.f), lifetimeAllocBuckets[i]); } CARB_LOG_INFO("========================"); #endif // #if USING( ALLOCATOR_STATS ) } inline bool Allocator::checkLeaks() noexcept { #if USING( ALLOCATOR_LEAK_CHECK ) if (allocCount || allocBytes) { CARB_LOG_ERROR("PathToAttributesMap detected a memory leak of %s!\n", humanReadableSize(allocBytes)); CARB_ASSERT(false, "PathToAttributesMap detected a memory leak of %s!\n", humanReadableSize(allocBytes)); return true; } #endif // #if USING( ALLOCATOR_LEAK_CHECK ) return false; } } // namespace flatcache } // namespace carb
omniverse-code/kit/fabric/include/carb/flatcache/InterpolationUsd.h
// Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once // This include must come first // clang-format off #include "UsdPCH.h" // clang-format on #include <pxr/base/gf/matrix4d.h> #include <pxr/base/gf/quatf.h> #include "carb/logging/Log.h" /** * @brief Defined in a separate location to the other lerp functions * in order to avoid breaking C-ABI compatibility */ namespace carb { namespace flatcache { /** * @brief Spherical interpolation specialization relying on pxr native * interpolation for quaternions */ template <> inline const pxr::GfQuatf interpolate(const pxr::GfQuatf& q0, const pxr::GfQuatf& q1, float theta) { if (theta < 0.0f || theta > 1.0f) { CARB_LOG_WARN_ONCE("InterpolationUsd interpolate(): theta %f outside range [0.0, 1.0]", theta); } pxr::GfQuatf result = pxr::GfSlerp(theta, q0, q1); return result; } /** * @brief pxr::Matrix4d interpolation specialization Used in Kit by OmniHydraDelegate */ template <> inline const pxr::GfMatrix4d interpolate(const pxr::GfMatrix4d& m0, const pxr::GfMatrix4d& m1, float theta) { if (theta < 0.0f || theta > 1.0f) { CARB_LOG_WARN_ONCE("InterpolationUsd interpolate(): theta %f outside range [0.0, 1.0]", theta); } pxr::GfMatrix4d r0, r1; // rotations, where -r is inverse of r pxr::GfVec3d s0, s1; // scale pxr::GfMatrix4d u0, u1; // rotations, may contain shear info pxr::GfVec3d t0, t1; // translations pxr::GfMatrix4d p0, p1; // p is never modified; can contain projection info // Account for rotation, translation, scale // (order is mat = r * s * -r * u * t), eps=1e-10 used to avoid zero values m0.Factor(&r0, &s0, &u0, &t0, &p0); m1.Factor(&r1, &s1, &u1, &t1, &p1); // Interpolate component-wise pxr::GfVec3d tResult = pxr::GfLerp(theta, t0, t1); pxr::GfVec3d sResult = pxr::GfLerp(theta, s0, s1); pxr::GfQuatd rResult = pxr::GfSlerp(u0.ExtractRotationQuat(), u1.ExtractRotationQuat(), theta); pxr::GfMatrix4d result = pxr::GfMatrix4d(pxr::GfRotation(rResult), pxr::GfCompMult(sResult, tResult)); return result; } } }
omniverse-code/kit/fabric/include/carb/flatcache/RationalTime.h
// Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <map> #include <stdint.h> namespace carb { namespace flatcache { // Each frame in the history buffer is timestamped with a frame time, stored as // a rational number to minimize rounding issues. See threadgate::TimeRatio. struct RationalTime { int64_t numerator; uint64_t denominator; // Minimize denominator small by dividing by gcd(numerator,denominator) RationalTime reduce() const { RationalTime result{0, 0}; int64_t gcdNumDen = gcd(numerator, denominator); if (gcdNumDen != 0) { result.numerator = numerator / gcdNumDen; result.denominator = denominator / gcdNumDen; } return result; } bool operator==(RationalTime rhs) const { RationalTime thisReduced = reduce(); RationalTime rhsReduced = rhs.reduce(); return (thisReduced.numerator == rhsReduced.numerator) && (thisReduced.denominator == rhsReduced.denominator); } bool operator!=(RationalTime rhs) const { return !(*this == rhs); } static int64_t gcd(int64_t a, int64_t b) { while (b != 0) { int64_t t = b; b = a % b; a = t; } return std::max(a, -a); } RationalTime operator-(RationalTime b) const { RationalTime result; result.numerator = numerator * int64_t(b.denominator) - b.numerator * int64_t(denominator); result.denominator = denominator * b.denominator; return result.reduce(); } RationalTime operator*(int64_t b) const { RationalTime result; result.numerator = numerator * b; result.denominator = denominator; return result.reduce(); } }; static const RationalTime kInvalidTime = { 0, 0 }; } // namespace flatcache } // namespace carb
omniverse-code/kit/fabric/include/carb/flatcache/ApiLogger.h
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <carb/flatcache/IPath.h> #include <carb/flatcache/IToken.h> #include <iostream> // To log all FlatCache methods that access a particular path and attribute, // set the following three defines #define ENABLE_FLATCACHE_API_LOG 0 #if ENABLE_FLATCACHE_API_LOG #define attrToTrace "attrToLog" #define pathToTrace "/primToLog" namespace carb { namespace flatcache { struct ApiLogger { bool& enabled; const char* desc; ApiLogger(const char* desc, bool& enabled, const TokenC& attrNameC) : desc(desc), enabled(enabled) { Token attrName(attrNameC); if (attrName == Token(attrToTrace)) { std::cout << "begin " << desc << "\n"; enabled = true; } } ApiLogger(const char* desc, bool& enabled, const PathC& pathC, const TokenC& attrNameC) : desc(desc), enabled(enabled) { Path path(pathC); Token attrName(attrNameC); if (path == Path(pathToTrace) && attrName == Token(attrToTrace)) { std::cout << "begin " << desc << "\n"; enabled = true; } } ~ApiLogger() { if (enabled) { std::cout << "end " << desc << "\n"; } enabled = false; } }; #define APILOGGER(...) ApiLogger logger(__VA_ARGS__) } } #else #define APILOGGER(...) #endif
omniverse-code/kit/fabric/include/carb/flatcache/underlying.h
#pragma once #include <type_traits> namespace carb { namespace flatcache { template <typename EnumT> constexpr inline typename std::underlying_type<EnumT>::type underlying(const EnumT& t) { return static_cast<typename std::underlying_type<EnumT>::type>(t); } } // namespace flatcache } // namespace carb
omniverse-code/kit/fabric/include/carb/flatcache/Ordered_Set.h
// Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <algorithm> #include <functional> #include <initializer_list> #include <vector> namespace carb { namespace flatcache { template <class T, class Compare = std::less<T>> struct set { using value_type = T; std::vector<T> v; Compare cmp; using iterator =typename std::vector<T>::iterator; using const_iterator =typename std::vector<T>::const_iterator; iterator begin() { return v.begin(); } iterator end() { return v.end(); } const_iterator begin() const { return v.begin(); } const_iterator end() const { return v.end(); } set(const Compare& c = Compare()) : v(), cmp(c) { } template <class InputIterator> set(InputIterator first, InputIterator last, const Compare& c = Compare()) : v(first, last), cmp(c) { std::sort(begin(), end(), cmp); } set(std::initializer_list<T> _Ilist) : set(_Ilist.begin(), _Ilist.end()) { } void reserve(size_t newCapacity) { v.reserve(newCapacity); } void clear() { v.clear(); } iterator insert(const T& t) { iterator i = std::lower_bound(begin(), end(), t, cmp); if (i == end() || cmp(t, *i)) i = v.insert(i, t); return i; } iterator insert(T&& t) { iterator i = std::lower_bound(begin(), end(), t, cmp); if (i == end() || cmp(t, *i)) i = v.insert(i, std::move(t)); return i; } template <class _Iter> void insert(_Iter _First, _Iter _Last) { // insert [_First, _Last) one at a time for (; _First != _Last; ++_First) { insert(*_First); } } iterator insert(const_iterator hint, const value_type& value) { // Measurements show it is faster to ignore hint in this application return insert(value); } void insert(std::initializer_list<T> _Ilist) { insert(_Ilist.begin(), _Ilist.end()); } size_t erase(const T& key) { iterator removeElement = find(key); if (removeElement != v.end()) { v.erase(removeElement); return 1; } else { return 0; } } iterator erase(iterator iter) { return v.erase(iter); } const_iterator find(const T& t) const { const_iterator i = std::lower_bound(begin(), end(), t, cmp); return i == end() || cmp(t, *i) ? end() : i; } iterator find(const T& t) { iterator i = std::lower_bound(begin(), end(), t, cmp); return i == end() || cmp(t, *i) ? end() : i; } bool contains(const T& t) const { const_iterator i = std::lower_bound(begin(), end(), t, cmp); return i != end() && !cmp(t, *i); } bool operator==(const set<T>& other) const { return v == other.v; } bool operator!=(const set<T>& other) const { return v != other.v; } size_t size() const { return v.size(); } T* data() { return v.data(); } const T* data() const { return v.data(); } }; template <class T, class Compare = std::less<T>> bool operator<(const set<T, Compare>& left, const set<T, Compare>& right) { return left.v < right.v; } template<typename T> flatcache::set<T> nWayUnion(std::vector<flatcache::set<T>>& srcBuckets) { flatcache::set<T> retval; // Calculate the maximum number of destination attributes // We could instead calculate it exactly by finding union of attribute names size_t maxDestAttrCount = 0; for (flatcache::set<T>& srcBucket : srcBuckets) { maxDestAttrCount += srcBucket.size(); } retval.reserve(maxDestAttrCount); auto currentDest = std::back_inserter(retval.v); size_t bucketCount = srcBuckets.size(); // Initialize invariant that nonEmpty is the vector of buckets that have // non-zero attribute counts struct NonEmptySegment { // Invariant is current!=end typename std::vector<T>::iterator current; typename std::vector<T>::iterator end; }; std::vector<NonEmptySegment> nonEmpty; nonEmpty.reserve(bucketCount); for (size_t i = 0; i != bucketCount; i++) { if (srcBuckets[i].begin() != srcBuckets[i].end()) { nonEmpty.push_back({ srcBuckets[i].begin(), srcBuckets[i].end() }); } } // Keep going until there's only 1 non-empty bucket // At that point we can just copy its attributes to the output while (1 < nonEmpty.size()) { // Find all the buckets that have the minimum element // These are the ones whose iterators will get advanced // By the loop guard and the invariant, we know that nonEmpty[0] exists // and that nonEmpty[0].current!=nonEmpty[0].end. // So *nonEmpty[0].current is a safe dereference T minSoFar = *nonEmpty[0].current; std::vector<size_t> indicesAtMin; indicesAtMin.reserve(nonEmpty.size()); indicesAtMin.push_back(0); for (size_t i = 1; i != nonEmpty.size(); i++) { if (*nonEmpty[i].current < minSoFar) { minSoFar = *nonEmpty[i].current; indicesAtMin = { i }; } else if (*nonEmpty[i].current == minSoFar) { indicesAtMin.push_back(i); } } // Copy minimum element to the output *currentDest = minSoFar; ++(*currentDest); // Advance the iterators that pointed to the min std::vector<NonEmptySegment> tempNonEmpty; tempNonEmpty.reserve(indicesAtMin.size()); for (size_t i = 0; i != indicesAtMin.size(); i++) { nonEmpty[indicesAtMin[i]].current++; } // Maintain the invariant that nonEmpty are the non empty ones // Replace with O(n) copy into a temporary if necessary auto it = nonEmpty.begin(); while (it != nonEmpty.end()) { if (it->current == it->end) { it = nonEmpty.erase(it); } else { ++it; } } } // By the negation of the guard we know that nonEmpty has zero or one elements if (nonEmpty.size() == 1) { // If one bucket is left, copy its elements to the output std::copy(nonEmpty[0].current, nonEmpty[0].end, currentDest); } return retval; } } }
omniverse-code/kit/fabric/include/carb/flatcache/StageWithHistory.h
// Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include "IFlatcache.h" #include <carb/Framework.h> #include <carb/Interface.h> #include <carb/flatcache/AttrNameAndType.h> #include <carb/flatcache/IPath.h> #include <carb/flatcache/IToken.h> #include <carb/flatcache/PrimChanges.h> #include <carb/logging/Log.h> #include <gsl/span> #include <carb/flatcache/Type.h> #include <map> #include <stdint.h> #include "carb/cpp17/Optional.h" using carb::cpp17::optional; namespace carb { namespace flatcache { // The comments in this file are intended to be read top to bottom, like a book. // This file defines flatcache::StageWithHistory, an instance of which stores // the current stage and a configurable number of frames of state history. It // is thread safe, in the sense that the current state can be safely read and // written in parallel with multiple threads reading the history. The class // definition is towards the end of this file. We first define some basic types, // a read/write accessor class for use by the main/game/sim thread, and two // read-only accessor classes for use on render threads. These classes provide // access to an interpolated state and a window of state history respectively. // To specify paths, attribute names, and attribute types we use // flatcache::Path, flatcache::Token and graph::Type types, rather than // USD's SdfPath, TfToken and TfType. This allows us to access the stage and // history without including USD headers. // The main class is this file is StageWithHistory, which is defined towards // the end of the file. class StageWithHistory; template<typename T> class AttributeArrayResult; /** * @invariant arrayBytes.size() must be a multiple of bytesPerElement */ class ConstArrayAsBytes { public: gsl::span<const gsl::byte> arrayBytes; size_t bytesPerElement; Type elementType; }; // findPrims() returns a list of buckets of prims, represented by PrimBucketList. class PrimBucketList { friend class StageAtTimeInterval; friend class StageInProgress; protected: PrimBucketListId m_primBucketListId; static carb::flatcache::IPrimBucketList* sIPrimBucketList(); PrimBucketList(PrimBucketListId id) : m_primBucketListId(id) { } public: // PrimBucketList is opaque, you have to use the getAttributesArray methods // of StageInProgress, StageAtTime or StageAtTimeInterval to read the // attributes of its elements. size_t bucketCount() const; size_t size() const; void print() const; PrimBucketListId getId() const { return m_primBucketListId; } ~PrimBucketList(); }; // ChangedPrimBucketList is a PrimBucketList that has changes stored for a // particular listener. It is returned by StageInProgress::getChanges(). class ChangedPrimBucketList : public PrimBucketList { ChangedPrimBucketList(PrimBucketListId id) : PrimBucketList(id) {} friend class StageInProgress; public: BucketChanges getChanges(size_t index); AddedPrimIndices getAddedPrims(size_t index); }; // The main/game/sim thread uses the following class to read and write the // state at the current frame. // // StageInProgress can either be used RAII style, you construct it from a frameNumber, // or non-RAII style, where you construct it from an existing stageInProgressId. class StageInProgress { StageInProgressId m_stageInProgress; bool m_createdFromId; UsdStageId m_usdStageId; // Only valid if m_createFromId == false public: // The constructor creates a new frame and locks it for read/write StageInProgress(StageWithHistory& stageWithHistory, size_t simFrameNumber); // Create from an already locked frame StageInProgress(StageInProgressId stageInProgressId); // Returns the frame number allocated by constructor size_t getFrameNumber(); // Returns the frame time allocated by constructor RationalTime getFrameTime(); // Returns which mirrored array is valid: CPU, GPU, etc. ValidMirrors getAttributeValidBits(const Path& path, const Token& attrName) const; // getAttribute returns a read/write pointer to a non-array attribute // If it returns nullptr then the attribute doesn't exist in the stage template <typename T> T* getAttribute(const Path& path, const Token& attrName); // getAttribute returns a read-only pointer to a non-array attribute // If it returns nullptr then the attribute doesn't exist in the stage template <typename T> const T* getAttributeRd(const Path& path, const Token& attrName); // getAttribute returns a write-only pointer to a non-array attribute // If it returns nullptr then the attribute doesn't exist in the stage template <typename T> T* getAttributeWr(const Path& path, const Token& attrName); // getAttribute returns a read/write pointer to a non-array attribute // If it returns nullptr then the attribute doesn't exist in the stage template <typename T> T* getAttributeGpu(const Path& path, const Token& attrName); // getAttribute returns a read-only pointer to a non-array attribute // If it returns nullptr then the attribute doesn't exist in the stage template <typename T> const T* getAttributeRdGpu(const Path& path, const Token& attrName); // getAttribute returns a write-only pointer to a non-array attribute // If it returns nullptr then the attribute doesn't exist in the stage template <typename T> T* getAttributeWrGpu(const Path& path, const Token& attrName); // getOrCreateAttributeWr returns a write-only pointer to a non-array // attribute. If the attribute doesn't exist, then it will create it. // The return type is a reference rather than a pointer because the // attribute is guaranteed to exist on exit template <typename T> T& getOrCreateAttributeWr(const Path& path, const Token& attrName, Type type); // getAttribute returns a read/write span of an array attribute // The span allows the array size to be read, but not written // To set the array size, use setArrayAttributeSize template <typename T> gsl::span<T> getArrayAttribute(const Path& path, const Token& attrName); // getAttributeRd returns a read-only span of an array attribute // The array size is also read only template <typename T> gsl::span<const T> getArrayAttributeRd(const Path& path, const Token& attrName); // getAttributeRd returns a write-only span of an array attribute // The array size is read only, to resize use setArrayAttributeSize template <typename T> gsl::span<T> getArrayAttributeWr(const Path& path, const Token& attrName); // Get the size of an array attribute. When writing CPU code, it isn't // normally necessary to use this method, as getArrayAttribute returns a // span containing the data pointer and the size. // However, when writing mixed CPU/GPU code it is wasteful to copy the // array data from GPU to CPU when just the size is required, so use this // method in that case. size_t getArrayAttributeSize(const Path& path, const Token& attrName); // Set the size of an array attribute void setArrayAttributeSize(const Path& path, const Token& attrName, size_t elemCount); template <typename T> gsl::span<T> setArrayAttributeSizeAndGet(const PrimBucketList& primBucketList, size_t primBucketListIndex, size_t indexInBucket, const Token& attrName, size_t newElemCount); // createPrim, destroyPrim, createAttribute and destroyAttribute do what // you'd expect void createPrim(const Path& path); void destroyPrim(const Path& path); void createAttribute(const Path& path, const Token& attrName, Type type); template<int n> void createAttributes(const Path& path, std::array<AttrNameAndType, n> attributes); // Deprecated: type argument is not used. void destroyAttribute(const Path& path, const Token& attrName, Type type); void destroyAttribute(const Path& path, const Token& attrName); template <int n> void destroyAttributes(const Path& path, const std::array<Token, n>& attributes); void destroyAttributes(const Path& path, const std::vector<Token>& attributes); // findPrims() finds prims that have all the attributes in "all", and any // of the attributes in "any", and none of the attributes in "none". // The attributes of the resulting prims can be accessed as piecewise // contiguous arrays, using getAttributeArray() below, which is typically // faster than calling getAttribute for each prim. PrimBucketList findPrims(const carb::flatcache::set<AttrNameAndType>& all, const carb::flatcache::set<AttrNameAndType>& any = {}, const carb::flatcache::set<AttrNameAndType>& none = {}); /** * Tell a listener to log changes for an attribute. * Attaches listener to stage if not already attached * * @param[in] attrName The attribute's name * @param[in] listenerId The listener */ void attributeEnableChangeTracking(const Token& attrName, ListenerId listenerId); /** * Tell a listener to stop logging changes for an attribute. * Attaches listener to stage if not already attached * * @param[in] attrName The attribute's name * @param[in] listenerId The listener */ void attributeDisableChangeTracking(const Token& attrName, ListenerId listenerId); /** * Tell a listener to log prim creates * Attaches listener to stage if not already attached * * @param[in] attrName The attribute's name * @param[in] listenerId The listener */ void enablePrimCreateTracking(ListenerId listenerId); /** * Pause change tracking. * * @param[in] listenerId The listener to pause */ void pauseChangeTracking(ListenerId listenerId); /** * Resume change tracking. * * @param[in] listenerId The listener to resume */ void resumeChangeTracking(ListenerId listenerId); /** * Is change tracking paused? * * @param[in] listenerId The listener * @return Whether the listener is paused */ bool isChangeTrackingPaused(ListenerId listenerId); /** * Get changes * * @param[in] listenerId The listener * @return The changes that occured since the last time the listener was popped */ ChangedPrimBucketList getChanges(ListenerId listenerId); /** * Clear the list of changes * * @param[in] listenerId The listener */ void popChanges(ListenerId listenerId); /** * Get the number of listeners * * @return The number of listeners listening to this stage */ size_t getListenerCount(); /** * Is the listener attached to this stage * * @return Whether the listener is attached to this stage */ bool isListenerAttached(ListenerId listenerId); /** * Detach the listener from the stage. Future changes will not be logged for this listener. * * @param[in] listenerId The listener * @return Whether the listener is attached to this stage */ void detachListener(ListenerId listenerId); // getAttributeArray(primBucketList, index, attrName) returns a read/write // contiguous array of the values of attribute "attrName" for each prim of // bucket "index" of "primBucketList". // "index" must be in the range [0..primBucketList.getBucketCount()) template <typename T> gsl::span<T> getAttributeArray(const PrimBucketList& primBucketList, size_t primBucketListIndex, const Token& attrName); template <typename T> gsl::span<const T> getAttributeArrayRd(const PrimBucketList& primBucketList, size_t primBucketListIndex, const Token& attrName) const; template <typename T> gsl::span<T> getAttributeArrayWr(const PrimBucketList& primBucketList, size_t primBucketListIndex, const Token& attrName); template <typename T> gsl::span<T> getAttributeArrayGpu(const PrimBucketList& primBucketList, size_t primBucketListIndex, const Token& attrName); template <typename T> gsl::span<const T> getAttributeArrayRdGpu(const PrimBucketList& primBucketList, size_t primBucketListIndex, const Token& attrName) const; template <typename T> gsl::span<T> getAttributeArrayWrGpu(const PrimBucketList& primBucketList, size_t primBucketListIndex, const Token& attrName); template <typename T> gsl::span<T> getOrCreateAttributeArrayWr(const PrimBucketList& primBucketList, size_t primBucketListIndex, const Token& attrName, Type type); // getAttributeArray(primBucketList, index, attrName) returns a vector of // array-valued attributes "attrName" for the prims of bucket "index" of // "primBucketList". "index" must be in the range [0..primBucketList.getBucketCount()) // It gives read/write access to the values of each prim's array template <typename T> std::vector<gsl::span<T>> getArrayAttributeArray(const PrimBucketList& primBucketList, size_t primBucketListIndex, const Token& attrName) const; // getAttributeArray(primBucketList, index, attrName) returns a vector of // array-valued attributes "attrName" for the prims of bucket "index" of // "primBucketList". "index" must be in the range [0..primBucketList.getBucketCount()) // It gives read-only access to the values of each prim's array template <typename T> std::vector<gsl::span<const T>> getArrayAttributeArrayRd(const PrimBucketList& primBucketList, size_t primBucketListIndex, const Token& attrName) const; // getAttributeArray(primBucketList, index, attrName) returns a vector of // array-valued attributes "attrName" for the prims of bucket "index" of // "primBucketList". "index" must be in the range [0..primBucketList.getBucketCount()) // It gives write-only access to the values of each prim's array template <typename T> std::vector<gsl::span<T>> getArrayAttributeArrayWr(const PrimBucketList& primBucketList, size_t primBucketListIndex, const Token& attrName) const; // getPathArray(primBucketList, index) returns a read-only contiguous array // of the paths of the prims of bucket "index" of "primBucketList". // "index" must be in the range [0..primBucketList.getBucketCount()) gsl::span<const Path> getPathArray(const PrimBucketList& primBucketList, size_t primBucketListIndex) const; flatcache::set<AttrNameAndType> getAttributeNamesAndTypes(const PrimBucketList& primBucketList, size_t primBucketListIndex) const; // TODO: replace with an iterator for iterating over bucket names void printBucketNames() const; // Record that the attribute at path.attrName has been modified. Right now this is // done explicitly to give a high degree of control over which attributes get // passed to the notice. void logAttributeWriteForNotice(const Path& path, const Token& attrName); // Construct and send a TfNotice with a vector of objects paths // that have changed, much like the ObjectsChanged notice from USD void broadcastTfNoticeForAttributesChanged() const; // Connection API /** * @brief Create a connection on the target prim * * @param path the target prim on which to create a connection * @param connectionName specifies the connections attribute name on the prim * @param connection specifies the target prim and attribute of the connection */ void createConnection(const Path& path, const Token& connectionName, const Connection& connection); /** * @brief Create an arbitrary number of connections on the target prim * * @param path the target prim on which to create a connection * @param connectionNames a span of attribute names. Must match the size of the connections vector * @param connections a span of connections. Must match the size of the connectionNames vector */ void createConnections(const Path& path, const gsl::span<Token>& connectionNames, const gsl::span<Connection>& connections ); /** * @brief removes a connection from a prim * * @param path the target prim from which to remove a connection * @param connectionName the name of the connection to remove */ void destroyConnection(const Path& path, const Token& connectionName); /** * @brief removes an arbitary number of connections from a prim * * @param path the target prim from which to remove the connections * @param connectionNames the names of the connections to be removed */ void destroyConnections(const Path& path, const gsl::span<Token>& connectionNames); /** * @brief Get a R/W pointer to a connection on the target prim * * @param path the target prim * @param connectionName the target connection name * @return a R/W pointer to the connection */ Connection* getConnection(const Path& path, const Token& connectionName); /** * @brief Get a read only pointer to a connection on the target prim * * @param path the target prim * @param connectionName the target connection name * @return a read only pointer to the connection */ const Connection* getConnectionRd(const Path& path, const Token& connectionName); /** * @brief Get a write only pointer to a connection on the target prim * * @param path the target prim * @param connectionName the target connection name * @return a write only pointer to the connection */ Connection* getConnectionWr(const Path& path, const Token& connectionName); /** * @brief Copy all attributes from the source prim to the destination prim * Will create attributes if they do not exist on the destination prim * If an attribute exists on both prims they must have compatible types to copy. * * @param[in] srcPath the source prim * @param[in] dstPath the destination prim */ void copyAttributes(const Path& srcPath, const Path& dstPath); /** * @brief Copy the specified attributes from the source prim to the destination prim * Will create attributes if they do not exist on the destination prim * If an attribute exists on both prims they must have compatible types to copy. * * @param[in] srcPath the source prim * @param[in] srcAttrs a span of attributes to be copied. * @param[in] dstPath the destination prim */ void copyAttributes(const Path& srcPath, const gsl::span<Token>& srcAttrs, const Path& dstPath); /** * @brief Copy the specified attributes from the source prim to the the specified * attributes on the destination prim * Will create attributes if they do not exist on the destination prim * If an attribute exists on both prims they must have compatible types to copy. * Note: The srcAttrs and dstAttrs must be the same size as the function assumes * that the copy is 1 to 1 in terms of name alignment * * @param[in] srcPath the source prim * @param[in] srcAttrs a span of attributes to be copied. * @param[in] dstPath the destination prim * @param[in] dstAttrs a span of attributes to be copied. */ void copyAttributes(const Path& srcPath, const gsl::span<Token>& srcAttrs, const Path& dstPath, const gsl::span<Token>& dstAttrs); StageInProgressId getId() const { return m_stageInProgress; } /** * @brief Check whether a prim exists at a given path * @param[in] the path * @return true if a prim exists at the path */ bool primExists(const Path& path); // If StageInProgress was created from an Id, then do nothing // Else unlock the current sim frame, allowing it to be read by // other threads ~StageInProgress(); }; // The following two classes, StageAtTime and StageAtTimeInterval // are used by reader threads to read the history. StageAtTime is // used when the state of a stage is needed at a particular point in time. // StageAtTimeInterval is used when we need all the stage history in a given time // window. // // There can be multiple threads reading the history buffer, for example // multiple sensor renderers running at different rates. We use shared locks // to allow multiple threads to read the same frame of history. // // StageAtTimeInterval takes an RAII approach to locking, constructing one locks // a range of slots for reading, and destructing unlocks them. class StageAtTimeInterval { StageAtTimeIntervalId m_stageAtTimeInterval; static carb::flatcache::IStageAtTimeInterval* sIStageAtTimeInterval(); public: // The constructor locks frames of history StageAtTimeInterval(StageWithHistory& stageWithHistory, RationalTime beginTime, RationalTime endTime, bool includeEndTime = false); StageAtTimeInterval(StageWithHistoryId stageWithHistoryId, RationalTime beginTime, RationalTime endTime, bool includeEndTime = false); ValidMirrors getAttributeValidBits(const PathC& path, const TokenC& attrName) const; // Get values of locked elements template <typename T> std::vector<const T*> getAttributeRd(const Path& path, const Token& attrName) const; // Get GPU pointer and size of locked elements template <typename T> std::vector<const T*> getAttributeRdGpu(const Path& path, const Token& attrName) const; // Get the size of an array attribute. When writing CPU code, it isn't // normally necessary to use this method, as getArrayAttribute returns a // span containing the data pointer and the size. // However, when writing mixed CPU/GPU code it is wasteful to copy the // array data from GPU to CPU when just the size is required, so use this // method in that case. std::vector<size_t> getArrayAttributeSize(const Path& path, const Token& attrName) const; /** * @brief Get an array-valued attribute for reading from a single prim * * @param path The path of the prim * @param attrName The name of the attribute * * @return a vector of array spans, one for each time sample within the current StageAtTimeInterval */ template <typename T> std::vector<gsl::span<const T>> getArrayAttributeRd(const Path& path, const Token& attrName) const; /** * @brief Get an array-valued attribute as bytes for reading from a single prim. * This is useful for converting to VtValue * * @param path The path of the prim * @param attrName The name of the attribute * * @return a vector of array spans, one for each time sample within the * current StageAtTimeInterval */ std::vector<ConstArrayAsBytes> getArrayAttributeRawRd(const Path& path, const Token& attrName) const; // Get timestamps of locked elements std::vector<RationalTime> getTimestamps() const; size_t getTimeSampleCount() const; // findPrims() finds prims that have all the attributes in "all", and any // of the attributes in "any", and none of the attributes in "none". // The attributes of the resulting prims can be accessed as piecewise // contiguous arrays, using getAttributeArray() below, which is typically // faster than calling getAttribute for each prim. PrimBucketList findPrims(const carb::flatcache::set<AttrNameAndType>& all, const carb::flatcache::set<AttrNameAndType>& any = {}, const carb::flatcache::set<AttrNameAndType>& none = {}); // getAttributeArray(primBucketList, index, attrName) returns for each // timesample, a read-only, contiguous array of the values of attribute // "attrName" for each prim of bucket "index" of "primBucketList". // "index" must be in the range [0..primBucketList.getBucketCount()) template <typename T> std::vector<gsl::span<const T>> getAttributeArrayRd(const PrimBucketList& primBucketList, size_t primBucketListIndex, const Token& attrName) const; template <typename T> std::vector<gsl::span<const T>> getAttributeArrayRdGpu(const PrimBucketList& primBucketList, size_t primBucketListIndex, const Token& attrName) const; template <typename T> std::vector<std::vector<gsl::span<const T>>> getArrayAttributeArrayRd(const PrimBucketList& primBucketList, size_t primBucketListIndex, const Token& attrName) const; /** * @brief Read a raw byte representation for a given attribute from a given bucket. This is useful for doing things such as batched type conversions. * * @param primBucketList the list of buckets * @param primBucketListIndex the specific bucket to search * @param attrName the token describing the desired attribute * * @return a vector of byte arrays, one for each time sample within the current StageAtTimeInterval */ std::vector<gsl::span<const char>> getAttributeArrayRawRd(const PrimBucketList& primBucketList, size_t primBucketListIndex, const Token& attrName) const; // getPathArray(primBucketList, index) returns for each timesample a // read-only contiguous array of the paths of the prims of bucket "index" // of "primBucketList". // "index" must be in the range [0..primBucketList.getBucketCount()) // The reason a separate path array is returned per sample is that prims // can be added and deleted from frame to frame, and we need to check which // prim a sample corresponds to when interpolating. std::vector<gsl::span<const Path>> getPathArray(const PrimBucketList& primBucketList, size_t primBucketListIndex) const; /** * @brief Get a Connection on a target prim * * @param path the target prim * @param connectionName the connection name * @return a vector of read only pointers to connections */ std::vector<const Connection*> getConnectionRd(const Path& path, const Token& connectionName); // TODO: replace with an iterator for iterating over bucket names void printBucketNames() const; /** * @brief write the current data for this stageInProgress to the specified UsdStage * this will write all attributes at the currentTime in getFrameNumber() * * @param usdStageId Valid usdStage in the stage cache * * @return none */ void exportUsd(UsdStageId usdStageId) const; // Get the number of attributes for a given bucket. std::vector<size_t> getAttributeCounts(const PrimBucketList& primBucketList, size_t primBucketListIndex) const; // Get the name and type of each attribute for a given bucket. std::pair< std::vector<std::vector<Token>>, std::vector<std::vector<Type>>> getAttributeNamesAndTypes(const PrimBucketList& primBucketList, size_t primBucketListIndex) const; /** @brief Write a cache file to disk at a specified location * * @note Many parameters to this function are optional * @note This currently only writes the first time in the interval * * @file[in The location the file is desired to be written to * @workingBuffer[in] [Optional] In order to avoid costly reallocations * the code will attempt to re-use the memory at the buffer * location if it is large enough. If the buffer isn't larg * enough the cost of allocation, and re-traversal may be paid * @workingBufferSize[in] [Optional] If workingBuffer is non null, then this desrcibes the length * of the buffer * @return The amount of data needed to serialize the cache, a return value of 0 indicates an error * */ uint64_t writeCacheToDisk(const char* file, uint8_t* workingBuffer, uint64_t workingBufferSize) const; /** @brief Add a ref count to any data backed by the StageAtTimeIntercal * * @note The ref count will not enforce any behavior currently, but will * print a warning if backing data is deleted before all ref counts * are cleared * * @return None * */ void addRefCount(); /** @brief Remove a ref count from an existing timeInterval * * @return True if ref count was removed successfully, failure conditions may * include * (1) StageAtTimeInterval doesn't exist * (2) RefCount was already 0 */ bool removeRefCount(); /** @brief Query ref count for a stage at time * * @note A stage at time might be represented by multiple actual data sources * in that case we return the largest refcount of all the data sources * * @return number of reference counts */ unsigned int getRefCount(); // Unlocks elements to allow them to be reused. ~StageAtTimeInterval(); }; // StageAtTime is used when the state of a stage is needed at // a particular point in time, which may or may not be one of the times sampled // in the history. If it is, then getAttributeRd returns the exact value sampled. // If not, it linearly interpolates using the two closest samples in the history. // // StageAtTime takes an RAII approach to locking, constructing one // locks one or two frames in the history (depending on whether interpolation // is needed), and destructing unlocks them. class StageAtTime { // Invariants: // I0: if sampleTimes.size()==2, m_theta = (m_time - sampleTimes[0]) / // (sampleTimes[1] - sampleTimes[0]) // where sampleTimes = m_historyWindow.getTimestamps() // // In particular, m_theta increases linearly from 0 to 1 as m_time // increases from sampleTimes[0] to sampleTimes[1] // // TODO: do we need to delay conversion from rational number to double? StageAtTimeInterval m_historyWindow; RationalTime m_time; double m_theta; void initInterpolation() { std::vector<RationalTime> sampleTimes = m_historyWindow.getTimestamps(); if (sampleTimes.size() == 2) { if ((double)sampleTimes[0].denominator == 0.0 || (double)sampleTimes[1].denominator == 0.0) { CARB_LOG_WARN_ONCE("StageWithHistory initInterpolation(): cannot divide by a denominator with a value of zero."); m_theta = 0.0; } else { double a_t = (double)sampleTimes[0].numerator / (double)sampleTimes[0].denominator; double b_t = (double)sampleTimes[1].numerator / (double)sampleTimes[1].denominator; if (a_t == b_t) m_theta = 0.0; else { double c_t = (double)m_time.numerator / (double)m_time.denominator; m_theta = (c_t - a_t) / (b_t - a_t); } } } else if (sampleTimes.size() == 1) m_theta = 0.0; } public: // Locks one or two history elements for read. StageAtTime(StageWithHistory& stageWithHistory, RationalTime time) : m_historyWindow(stageWithHistory, time, time, true), m_time(time) { initInterpolation(); } StageAtTime(StageWithHistoryId stageWithHistoryId, RationalTime time) : m_historyWindow(stageWithHistoryId, time, time, true), m_time(time) { initInterpolation(); } // Auxiliary method to communicate attributes of types which will not be interpolated // Supported types: bool, int, uint // no samples found: return nullopt // samples found: return pair{value of sample in frame n, value of sample in frame n+1} template <typename T> optional<std::pair<optional<T>, optional<T>>> getNonInterpolatableAttributeRd(const Path& path, const Token& attrName) const; ValidMirrors getAttributeValidBits(const PathC& path, const TokenC& attrName) const; // Read interpolated elements template <typename T> optional<T> getAttributeRd(const Path& path, const Token& attrName) const; // Read GPU elements (interpolation not supported yet!) template <typename T> const T* getAttributeRdGpu(const Path& path, const Token& attrName) const; // Get array attribute size, useful for GPU attributes size_t getArrayAttributeSize(const Path& path, const Token& attrName) const; // Get arrau attribute read template <typename T> gsl::span<const T> getArrayAttributeRd(const Path& path, const Token& attrName); // findPrims() finds prims that have all the attributes in "all", and any // of the attributes in "any", and none of the attributes in "none". // The attributes of the resulting prims can be accessed as piecewise // contiguous arrays, using getAttributeArray() below, which is typically // faster than calling getAttribute for each prim. PrimBucketList findPrims(const carb::flatcache::set<AttrNameAndType>& all, const carb::flatcache::set<AttrNameAndType>& any = {}, const carb::flatcache::set<AttrNameAndType>& none = {}) { return m_historyWindow.findPrims(all, any, none); } // getAttributeArray(primBucketList, index, attrName) returns a read-only // contiguous array of the values of attribute "attrName" for each prim of // bucket "index" of "primBucketList". // "index" must be in the range [0..primBucketList.getBucketCount()) template <typename T> AttributeArrayResult<T> getAttributeArrayRd(const PrimBucketList& primBucketList, size_t primBucketListIndex, const Token& attrName) const; template <typename T> AttributeArrayResult<T> getAttributeArrayRdGpu(const PrimBucketList& primBucketList, size_t primBucketListIndex, const Token& attrName) const; template <typename T> AttributeArrayResult<std::vector<T>> getArrayAttributeArrayRd(const PrimBucketList& primBucketList, size_t primBucketListIndex, const Token& attrName) const; /** * @brief Read a raw byte representation for a given attribute from a given bucket. This is useful for doing things such as batched type conversions. * * @param primBucketList the list of buckets * @param primBucketListIndex the specific bucket to search * @param attrName the token describing the desired attribute * * @return a vector of byte arrays, one for each time sample underlying the current StageAtTime. Note: Does not perform any interpolation. */ std::vector<gsl::span<const char>> getAttributeArrayRawRd(const PrimBucketList& primBucketList, size_t primBucketListIndex, const Token& attrName) const; // getPathArray(primBucketList, index) returns a read-only contiguous array // of the paths of the prims of bucket "index" of "primBucketList". // "index" must be in the range [0..primBucketList.getBucketCount()) gsl::span<const Path> getPathArray(const PrimBucketList& primBucketList, size_t primBucketListIndex) const; /** * @brief Get a read only pointer to a connection on a prim * * @param path the target prim * @param connectionName the target connection * @return returns a vector read only pointers to connections, one per time sample */ std::vector<const Connection*> getConnectionRd(const Path& path, const Token& connectionName); // TODO: replace with an iterator for iterating over bucket names void printBucketNames() const; // Get the number of attributes for a given bucket. size_t getAttributeCount(const PrimBucketList& primBucketList, size_t primBucketListIndex) const; // Get the name and type of each attribute for a given bucket. std::pair< std::vector<Token>, std::vector<Type>> getAttributeNamesAndTypes(const PrimBucketList& primBucketList, size_t primBucketListIndex) const; // Unlocks elements to allow them to be reused. ~StageAtTime() = default; /** @brief Write a cache file to disk at a specified location * * @note Many parameters to this function are optional * * @file[in The location the file is desired to be written to * @workingBuffer[in] [Optional] In order to avoid costly reallocations * the code will attempt to re-use the memory at the buffer * location if it is large enough. If the buffer isn't larg * enough the cost of allocation, and re-traversal may be paid * @workingBufferSize[in] [Optional] If workingBuffer is non null, then this desrcibes the length * of the buffer * @return The amount of data needed to serialize the cache, a return value of 0 indicates an error * */ uint64_t writeCacheToDisk(const char* file, uint8_t* workingBuffer, uint64_t workingBufferSize) const; /** @brief Add a ref count to any data backed by the StageAtTimeInterval * * @note The ref count will not enforce any behavior currently, but will * print a warning if backing data is deleted before all ref counts * are cleared * * @return None * */ void addRefCount(); /** @brief Remove a ref count from an existing timeInterval * * @return True if ref count was removed successfully, failure conditions may * include * (1) StageAtTimeInterval doesn't exist * (2) RefCount was already 0 */ bool removeRefCount(); /** @brief Query ref count for a stage at time * * @note A stage at time might be represented by multiple actual data sources * in that case we return the largest refcount of all the data sources * * @return number of reference counts */ unsigned int getRefCount(); }; // Finally, here is the main class, StageWithHistory. class StageWithHistory { StageWithHistoryId m_stageWithHistory; UsdStageId m_usdStageId; friend class StageInProgress; friend class StageAtTimeInterval; public: StageWithHistory(UsdStageId usdStageId, size_t historyFrameCount, RationalTime simPeriod, bool withCuda=false); ~StageWithHistory(); /** * Create a listener * This just creates a listener ID, you have to attach it to a stage to use it. * Note that there is no destroyListener method. To stop using an ID, detach it from all stages it is attached to. * @return The listener */ ListenerId createListener(); }; const ListenerId kInvalidListenerId = { 0 }; } // namespace flatcache } // namespace carb // Implement above C++ methods by calling C-ABI interfaces #include "WrapperImpl.h"
omniverse-code/kit/fabric/include/carb/flatcache/USDValueAccessors.h
// Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include "UsdPCH.h" #include <carb/Defines.h> #include <carb/InterfaceUtils.h> #include <carb/Types.h> #include <carb/flatcache/FlatCacheUSD.h> #include <carb/flatcache/IPath.h> #include <carb/flatcache/PathToAttributesMap.h> #include <carb/flatcache/StageWithHistory.h> #include <iostream> #include <vector> namespace carb { namespace flatcache { // A TfNotice sent with a vector of paths for attribute that // have changed. Sent by StateInProgress upon request, contains // the paths of attributes that the StageInProgress has flagged // as modified during it's lifetime, which is typically one frame. // // primPaths and attributeNames are required to the same length, // the prim and attribute name within that prim whose value // changed class AttributeValuesChangedNotice : public pxr::TfNotice { public: AttributeValuesChangedNotice(const std::vector<pxr::SdfPath>& primPaths, const std::vector<pxr::TfToken>& attributeNames) : _primPaths(primPaths), _attributeNames(attributeNames) { } ~AttributeValuesChangedNotice() { } const std::vector<pxr::SdfPath>& GetPrimPaths() const { return _primPaths; } const std::vector<pxr::TfToken>& GetAttributeNames() const { return _attributeNames; } private: const std::vector<pxr::SdfPath> _primPaths; const std::vector<pxr::TfToken> _attributeNames; }; void broadcastTfNoticeForAttributesChanged(StageInProgressId stageInProgressId); template <typename T> T getValue(const pxr::UsdAttribute& attribute, const pxr::UsdTimeCode& timeCode) { // First, look in flatcache to see if a value is present. If not, fall back // to read USD's composed attribute value. { // read from flatcache via StageInProgress, this is called during a run // loop where extensions are modifying one timeslice within StageWithHisotry // Look up the long int identifier for the attribute's UsdStage auto usdStageId = PXR_NS::UsdUtilsStageCache::Get().GetId(attribute.GetStage()).ToLongInt(); // grab the carb interface for StageInProgress and use it to access the // (potentially NULL) current stageInProgress for the UsdStage auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); auto stageInProgress = iStageInProgress->get(usdStageId); if (stageInProgress.id) { // Grab a pointer to in-memory representation for the attribute value, in this // case a pointer to a T. Will be NULL if attribute doesn't exist in flatcache auto valueSpan = iStageInProgress->getAttribute(stageInProgress, carb::flatcache::asInt(attribute.GetPrimPath()), carb::flatcache::asInt(attribute.GetName())); T* valuePtr = (T*)valueSpan.ptr; if (valuePtr) { // We have a value stored for this attribute in flatcache, return it return *valuePtr; } } } // If we get here we didn't find a value stored for this attribute in flatcache, // so call USD API pxr::VtValue val; attribute.Get(&val, timeCode); return val.UncheckedGet<T>(); } template <typename T_VALUETYPE> void setFlatCacheValue(const pxr::UsdAttribute& attribute, T_VALUETYPE value, bool writeToUSD) { if (writeToUSD) { // write to the USD layer attribute.Set(value); } else { // write to flatcache, via StageInProgress // grab const references to the path of the attribute's parent // prim and the name of the attribute. Avoid copies here. const pxr::SdfPath& path = attribute.GetPrimPath(); const pxr::TfToken& name = attribute.GetName(); const pxr::SdfPath& attrPath = attribute.GetPath(); // Convert the bits into a carb-safe value auto pathId = carb::flatcache::asInt(path); auto nameId = carb::flatcache::asInt(name); // Look up the long int identifier for the attribute's UsdSage auto usdStageId = carb::flatcache::UsdStageId{ (uint64_t)PXR_NS::UsdUtilsStageCache::Get().GetId(attribute.GetStage()).ToLongInt() }; // grab the carb interface for StageInProgress and use it to access the // (potentially NULL) current stageInProgress for the UsdStage auto iStageInProgress = carb::getCachedInterface<carb::flatcache::IStageInProgress>(); auto stageInProgress = iStageInProgress->get(usdStageId); if (!stageInProgress.id) { // No one created a stageInProgress, we're expecting this // to be created by another extension or run loop // // XXX: warn, or return falsse? return; } // Grab a pointer to in-memory representation for the attribute value, in this // case a pointer to a float auto valuePtr = iStageInProgress->getAttribute(stageInProgress, pathId, nameId); // Set the value within stageInProgress ((T_VALUETYPE*)valuePtr.ptr)[0] = value; } } // This should be in UsdValueAccessors.cpp, but when it goes there // clients in DriveSim can't find the symbol. Needs fixing. inline void setFlatCacheValueFloat(const pxr::UsdAttribute& attribute, float value, bool writeToUSD) { setFlatCacheValue<float>(attribute, value, writeToUSD); } } }
omniverse-code/kit/fabric/include/carb/flatcache/IFlatcache.h
// Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include "IPath.h" #include <carb/Interface.h> #include <carb/flatcache/AttrNameAndType.h> #include <carb/flatcache/IdTypes.h> #include <carb/flatcache/Ordered_Set.h> #include <carb/flatcache/RationalTime.h> #include <map> #include <stdint.h> namespace carb { namespace flatcache { struct UsdStageId { uint64_t id; constexpr bool operator<(const UsdStageId& other) const { return id < other.id; } constexpr bool operator==(const UsdStageId& other) const { return id == other.id; } constexpr bool operator!=(const UsdStageId& other) const { return id != other.id; } }; static_assert(std::is_standard_layout<UsdStageId>::value, "Struct must be standard layout as it is used in C-ABI interfaces"); static constexpr UsdStageId kUninitializedStage{ 0 }; } } namespace std { template <> class hash<carb::flatcache::UsdStageId> { public: size_t operator()(const carb::flatcache::UsdStageId& key) const { return key.id; } }; } namespace carb { namespace flatcache { struct BucketId { uint64_t id; constexpr bool operator<(const BucketId& other) const { return id < other.id; } constexpr bool operator<=(const BucketId& other) const { return id <= other.id; } constexpr bool operator==(const BucketId& other) const { return id == other.id; } constexpr bool operator!=(const BucketId& other) const { return id != other.id; } constexpr BucketId& operator++() { ++id; return *this; } constexpr BucketId& operator--() { --id; return *this; } constexpr explicit operator size_t() const { return id; } }; static_assert(std::is_standard_layout<BucketId>::value, "Struct must be standard layout as it is used in C-ABI interfaces"); static constexpr BucketId kInvalidBucketId{ 0xffff'ffff'ffff'ffff }; // A struct that represents a subset of a bucket struct BucketSubset { BucketId bucket; // The target bucket from which we define a subset set<TokenC>* attributes; // The subset of attributes to consider - only used if allAttributes == false, MUST be set otherwise set<PathC>* paths; // The subset of paths to consider - only used if allPaths == false, MUST be set otherwise bool allAttributes; //attribute filtering or not bool allPaths; //path filtering or not }; static_assert(std::is_standard_layout<BucketSubset>::value, "BucketSubset must be standard layout as it is used in C-ABI interfaces"); } } namespace std { template <> class hash<carb::flatcache::BucketId> { public: size_t operator()(const carb::flatcache::BucketId& key) const { return key.id; } }; } namespace carb { namespace flatcache { // Flatcache stores data in untyped (byte) arrays. // For conversion back to typed arrays, getArraySpan methods return the // element size in bytes. They also return elementCount to allow the caller to // wrap the array in std::span, or bounds check array access themselves. // Flatcache methods can't return std::span or gsl::span directly, because they // are not C-ABI compatible. So we define SpanC/ConstSpanC, which are. struct ConstSpanC { const uint8_t* ptr; size_t elementCount; size_t elementSize; }; struct SpanC { uint8_t* ptr; size_t elementCount; size_t elementSize; // Casting SpanC to ConstSpanC is allowed, but not vice versa operator ConstSpanC() const { return { ptr, elementCount, elementSize }; } }; struct ConstSpanWithTypeC { const uint8_t* ptr; size_t elementCount; size_t elementSize; TypeC type; }; struct SpanWithTypeC { uint8_t* ptr; size_t elementCount; size_t elementSize; TypeC type; // Casting SpanWithTypeC to ConstSpanWithTypeC is allowed, but not vice versa operator ConstSpanWithTypeC() const { return { ptr, elementCount, elementSize, type }; } }; struct SpanSizeC { size_t* ptr; size_t elementCount; }; struct ConstSpanSizeC { const size_t* ptr; size_t elementCount; }; // An ArrayPointersAndSizesC is an array of immutably sized mutable // data arrays // // Rules (enforced by const): // { // ArrayPointersAndSizesC ps; // // // Allowed: Changing inner array values // ps.arrayPtrs[0][0] = 1 // // // Disallowed: Changing array pointers // ps.arrayPtrs[0] = (uint8_t*)p; // // // Disallowed: Changing inner array sizes // ps.sizes[0] = 1; // } struct ArrayPointersAndSizesC { uint8_t* const* arrayPtrs; const size_t* sizes; const size_t elementCount; }; // A ConstArrayPointersAndSizesC is an array of immutably sized immutable // data arrays // // Rules (enforced by const): // { // ConstArrayPointersAndSizesC ps; // // // Disallowed: Changing inner array values // ps.arrayPtrs[0][0] = 1 // // // Disallowed: Changing array pointers // ps.arrayPtrs[0] = (uint8_t*)p; // // // Disallowed: Changing inner array sizes // ps.sizes[0] = 1; // } struct ConstArrayPointersAndSizesC { const uint8_t* const* arrayPtrs; const size_t* sizes; size_t elementCount; }; static_assert(std::is_standard_layout<Path>::value, "Path must be standard layout as it is used in C-ABI interfaces"); struct ConstPathCSpan { const Path* ptr; size_t elementCount; }; struct ConstAttrNameAndTypeSpanC { const AttrNameAndType* ptr; size_t elementCount; }; struct ConstChangedIndicesC { bool allIndicesChanged; ConstSpanSizeC changedIndices; }; struct ConstChangedIndicesSpanC { const ConstChangedIndicesC* ptr; size_t elementCount; }; struct BucketChangesC { // Which attributes changed flatcache::ConstAttrNameAndTypeSpanC changedAttributes; // For each attribute, which prims changed? flatcache::ConstChangedIndicesSpanC changedIndices; flatcache::ConstPathCSpan pathArray; }; struct AddedPrimIndicesC { // Which prims were added? flatcache::ConstSpanSizeC addedIndices; }; struct StageWithHistorySnapshot { bool valid; size_t id; }; enum class ValidMirrors { eNone = 0, eCPU = 1, eCudaGPU = 2, eGfxGPU = 4 }; constexpr enum ValidMirrors operator|(const enum ValidMirrors a, const enum ValidMirrors b) { return (enum ValidMirrors)(uint32_t(a) | uint32_t(b)); } constexpr enum ValidMirrors operator&(const enum ValidMirrors a, const enum ValidMirrors b) { return (enum ValidMirrors)(uint32_t(a) & uint32_t(b)); } using PrimBucket = carb::flatcache::set<AttrNameAndType>; // // Note when extending the interface please add to the end so // that dependencies don't break as easily before they are rebuilt // struct IStageInProgress { CARB_PLUGIN_INTERFACE("carb::flatcache::IStageInProgress", 0, 2); StageInProgressId(CARB_ABI* create)(UsdStageId usdStageId, size_t simFrameNumber); StageInProgressId(CARB_ABI* get)(UsdStageId usdStageId); void(CARB_ABI* destroy)(UsdStageId usdStageId); size_t(CARB_ABI* getFrameNumber)(StageInProgressId stageId); // Prefetch prim from USD stage // This guarantees that subsequent gets of the prim from the cache will succeed void(CARB_ABI* prefetchPrim)(UsdStageId usdStageId, PathC path); // Get attribute for read/write access SpanC(CARB_ABI* getAttribute)(StageInProgressId stageId, PathC path, TokenC attrName); // Get attribute for read only access ConstSpanC(CARB_ABI* getAttributeRd)(StageInProgressId stageId, PathC path, TokenC attrName); // Get attribute for write only access SpanC(CARB_ABI* getAttributeWr)(StageInProgressId stageId, PathC path, TokenC attrName); // Get attribute for write only access, creating it if necessary SpanC(CARB_ABI* getOrCreateAttributeWr)(StageInProgressId stageId, PathC path, TokenC attrName, TypeC typeC); size_t(CARB_ABI* getArrayAttributeSize)(StageInProgressId stageId, PathC path, TokenC attrName); void(CARB_ABI* setArrayAttributeSize)(StageInProgressId stageId, PathC path, TokenC attrName, size_t elemCount); SpanC(CARB_ABI* setArrayAttributeSizeAndGet)(StageInProgressId stageId, PrimBucketListId primBucketList, size_t primBucketListIndex, size_t indexInBucket, TokenC attrName, size_t newElemCount); // Get an attribute's type Type(CARB_ABI* getType)(StageInProgressId stageId, PathC path, TokenC attrName); // Get prim's attribute count size_t(CARB_ABI* getAttributeCount)(StageInProgressId stageId, PathC path); // Get the names of a prim's attributes void(CARB_ABI* getAttributeNamesAndTypes)(Token* outNames, Type* outTypes, size_t outCount, StageInProgressId stageInProgressId, PathC path); // Attribute/prim create/destroy void(CARB_ABI* createPrim)(StageInProgressId stageId, PathC path); void(CARB_ABI* destroyPrim)(StageInProgressId stageId, PathC path); void(CARB_ABI* createAttribute)(StageInProgressId stageId, PathC path, TokenC attrName, TypeC type); void(CARB_ABI* createAttributes)( StageInProgressId stageId, PathC path, TokenC* attrNames, TypeC* types, uint32_t attrNameAndTypeCount); // Deprecated as type attribute is not required! void(CARB_ABI* destroyAttribute)(StageInProgressId stageId, PathC path, TokenC attrName, TypeC type); // see new destroyAttribute and destroyAttributes functions at end of IFlatcache // Attribute SOA accessors PrimBucketListId(CARB_ABI* findPrims)(StageInProgressId stageInProgressId, const carb::flatcache::set<AttrNameAndType>& all, const carb::flatcache::set<AttrNameAndType>& any, const carb::flatcache::set<AttrNameAndType>& none); void(CARB_ABI* getAttributeArray)(SpanC* out, StageInProgressId stageInProgressId, PrimBucketListId primBucketList, size_t primBucketListIndex, TokenC attrName); void(CARB_ABI* getAttributeArrayRd)(ConstSpanC* out, StageInProgressId stageInProgressId, PrimBucketListId primBucketList, size_t primBucketListIndex, TokenC attrName); void(CARB_ABI* getAttributeArrayWr)(SpanC* out, StageInProgressId stageInProgressId, PrimBucketListId primBucketList, size_t primBucketListIndex, TokenC attrName); void(CARB_ABI* getOrCreateAttributeArrayWr)(SpanC* out, StageInProgressId stageInProgressId, PrimBucketListId primBucketList, size_t primBucketListIndex, TokenC attrName, TypeC typeC); size_t(CARB_ABI* getBucketPrimCount)(StageInProgressId stageInProgressId, PrimBucketListId primBucketList, size_t primBucketListIndex, TokenC attrName); size_t(CARB_ABI* getBucketAttributeCount)(StageInProgressId stageInProgressId, PrimBucketListId primBucketList, size_t primBucketListIndex); void(CARB_ABI* getBucketAttributeNamesAndTypes)(AttrNameAndType* out, size_t outCount, StageInProgressId stageInProgressId, PrimBucketListId primBucketList, size_t primBucketListIndex); ConstSpanSizeC(CARB_ABI* getArrayAttributeSizeArrayRd)(StageInProgressId stageInProgressId, PrimBucketListId primBucketList, size_t primBucketListIndex, TokenC attrName); ArrayPointersAndSizesC(CARB_ABI* getArrayAttributeArrayWithSizes)(StageInProgressId stageInProgressId, PrimBucketListId primBucketList, size_t primBucketListIndex, TokenC attrName); ConstArrayPointersAndSizesC(CARB_ABI* getArrayAttributeArrayWithSizesRd)(StageInProgressId stageInProgressId, PrimBucketListId primBucketList, size_t primBucketListIndex, TokenC attrName); ArrayPointersAndSizesC(CARB_ABI* getArrayAttributeArrayWithSizesWr)(StageInProgressId stageInProgressId, PrimBucketListId primBucketList, size_t primBucketListIndex, TokenC attrName); void(CARB_ABI* getPathArray)(ConstPathCSpan* out, StageInProgressId stageInProgressId, PrimBucketListId primBucketList, size_t primBucketListIndex); void(CARB_ABI* printBucketNames)(StageInProgressId stageInProgressId); void(CARB_ABI* createForAllStages)(size_t simFrameNumber); void(CARB_ABI* destroyForAllStages)(); void(CARB_ABI* logAttributeWriteForNotice)(StageInProgressId stageId, PathC path, TokenC attrName); // Broadcast a USD TfNotice to all registered listeners containing paths of // all attributes passed to logAttributeWriteForNotice since this StageInProgress was constructed. // This is used, for example, to send changes to PhysX. void(CARB_ABI* broadcastTfNoticeForAttributesChanged)(StageInProgressId stageInProgressId); PrimBucketListId(CARB_ABI* getChanges)(StageInProgressId stageInProgressId, ListenerId listenerId); void(CARB_ABI* popChanges)(StageInProgressId stageInProgressId, ListenerId listenerId); RationalTime(CARB_ABI* getFrameTime)(StageInProgressId stageId); /** @brief Get a Span with a pointer to the head of the relevant array of data * with elementCount and elementSize reflecting the underlying data * * @stageId[in] Id for the stage to look in * @path[in] Path to the prim holding the attribute * @name[in] Name of the array attribute * * @return If a valid prim/attribute that hold an array returns a valid span, otherwise * returns an empty span. * */ SpanC(CARB_ABI* getArrayAttribute)(StageInProgressId stageId, PathC path, TokenC attrName); /** @brief Get a const Span with a pointer to the head of the relevant array of data * with elementCount and elementSize reflecting the underlying data * * @stageId[in] Id for the stage to look in * @path[in] Path to the prim holding the attribute * @name[in] Name of the array attribute * * @return If a valid prim/attribute that hold an array returns a valid span, otherwise * returns an empty span. * */ ConstSpanC(CARB_ABI* getArrayAttributeRd)(StageInProgressId stageId, PathC path, TokenC attrName); /** @brief Get a Span with a pointer to the head of the relevant array of data * with elementCount and elementSize reflecting the underlying data * * @stageId[in] Id for the stage to look in * @path[in] Path to the prim holding the attribute * @name[in] Name of the array attribute * * @return If a valid prim/attribute that hold an array returns a valid span, otherwise * returns an empty span. * */ SpanC(CARB_ABI* getArrayAttributeWr)(StageInProgressId stageId, PathC path, TokenC attrName); /** @brief Destroy attribute with matching name * * Overloads and superseeds destroyAttribute which takes a unnecessary attribute type. * * @stageId[in] Id for the stage to look in * @path[in] Path to the prim holding the attribute * @attrNames[in] Attribute name * */ void(CARB_ABI* destroyAttribute2)(StageInProgressId stageId, PathC path, TokenC attrName); /** @brief Destroy all attributes with matching names * * @stageId[in] Id for the stage to look in * @path[in] Path to the prim holding the attribute * @attrNames[in] Attribute name array * @attrNames[in] Attribute name array count * */ void(CARB_ABI* destroyAttributes)(StageInProgressId stageId, PathC path, TokenC* attrNames, uint32_t attrNameCount); void(CARB_ABI* getAttributeArrayGpu)(SpanC* out, StageInProgressId stageInProgressId, PrimBucketListId primBucketList, size_t primBucketListIndex, TokenC attrName); void(CARB_ABI* getAttributeArrayRdGpu)(ConstSpanC* out, StageInProgressId stageInProgressId, PrimBucketListId primBucketList, size_t primBucketListIndex, TokenC attrName); void(CARB_ABI* getAttributeArrayWrGpu)(SpanC* out, StageInProgressId stageInProgressId, PrimBucketListId primBucketList, size_t primBucketListIndex, TokenC attrName); // Get GPU attribute for read/write access SpanC(CARB_ABI* getAttributeGpu)(StageInProgressId stageId, PathC path, TokenC attrName); // Get GPU attribute for read only access ConstSpanC(CARB_ABI* getAttributeRdGpu)(StageInProgressId stageId, PathC path, TokenC attrName); // Get GPU attribute for write only access SpanC(CARB_ABI* getAttributeWrGpu)(StageInProgressId stageId, PathC path, TokenC attrName); /** @brief Returns which mirrors of the array are valid: CPU, GPU, etc. * * @stageId[in] The stage to query validity from * @path[in] The prim path * @attrName[in] The attribute name * * @return ValidMirrors struct * */ ValidMirrors(CARB_ABI* getAttributeValidBits)(StageInProgressId stageId, const PathC& path, const TokenC& attrName); // Connection API /** * @brief Create a connection on a prim * * @param[in] stageId the stage to work on * @param[in] path the prim to create the connection on * @param[in] connectionName the name of the connection attribute * @param[in] connection the target prim and attribute for the connection */ void(CARB_ABI* createConnection)(StageInProgressId stageId, PathC path, TokenC connectionName, Connection connection); /** * @brief Create multiple connections on a prim * * @param[in] stageId the stage to work on * @param[in] path the prim to create the connection on * @param[in] connectionNames the name of the connection attributes to create * @param[in] connection the target prim and attribute for the connections * @param[in] connectionCount the number of connections to be created. */ void(CARB_ABI* createConnections)(StageInProgressId stageId, PathC path, const TokenC* connectionNames, const Connection* connections, size_t connectionCount); /** * @brief remove a connection on a prim * * @param[in] stageId the stage to work on * @param[in] path the prim to remove the connection from * @param[in] connectionName the name of the connection attribute */ void(CARB_ABI* destroyConnection)(StageInProgressId stageId, PathC path, TokenC connectionName); /** * @brief Remove multiple connections from a prim * * @param[in] stageId the stage to work on * @param[in] path the prim to remove the connections from * @param[in] connectionNames the name of the connection attributes to be removed * @param[in] connectionCount the number of connections to be removed. */ void(CARB_ABI* destroyConnections)(StageInProgressId stageId, PathC path, const TokenC* connectionNames, size_t connectionCount); /** * @brief Retrieves a connection attribute from a prim * * @param[in] stageId the stage to work on * @param[in] path the prim to fetch the connection from * @param[in] connectionName the name of the connection attribute to fetch * @return a read/write pointer to the connection */ Connection*(CARB_ABI* getConnection)(StageInProgressId stageId, PathC path, TokenC connectionName); /** * @brief Retrieves a connection attribute from a prim * * @param[in] stageId the stage to work on * @param[in] path the prim to fetch the connection from * @param[in] connectionName the name of the connection attribute to fetch * @return a read only pointer to the connection */ const Connection*(CARB_ABI* getConnectionRd)(StageInProgressId stageId, PathC path, TokenC connectionName); /** * @brief Retrieves a connection attribute from a prim * * @param[in] stageId the stage to work on * @param[in] path the prim to fetch the connection from * @param[in] connectionName the name of the connection attribute to fetch * @return a write only pointer to the connection */ Connection*(CARB_ABI* getConnectionWr)(StageInProgressId stageId, PathC path, TokenC connectionName); /** * @brief Copy all attributes from the source prim to the destination prim * Will create attributes if they do not exist on the destination prim * If an attribute exists on both prims they must have compatible types to copy. * * @param[in] stageId the stage id to use for copying * @param[in] srcPath the source prim * @param[in] dstPath the destination prim */ void(CARB_ABI* copyAllAttributes)(StageInProgressId stageId, PathC srcPath, PathC dstPath); /** * @brief Copy the specified attributes from the source prim to the the specified * attributes on the destination prim * Will create attributes if they do not exist on the destination prim * If an attribute exists on both prims they must have compatible types to copy. * Note: The srcAttrs and dstAttrs must be the same size as the function assumes * that the copy is 1 to 1 in terms of name alignment * * @param[in] stageId the stage id to use for copying * @param[in] srcPath the source prim * @param[in] srcAttrs a vector of attributes to be copied. * @param[in] dstPath the destination prim * @param[in] dstAttrs a vector of attributes to be copied. * @param[in] count the number of attributes to copy */ void(CARB_ABI* copySpecifiedAttributes)(StageInProgressId stageId, PathC srcPath, const TokenC* srcAttrs, PathC dstPath, const TokenC* dstAttrs, size_t count); }; struct IStageAtTimeInterval { CARB_PLUGIN_INTERFACE("carb::flatcache::IStageAtTimeInterval", 0, 1); StageAtTimeIntervalId(CARB_ABI* create)(StageWithHistoryId stageWithHistoryId, RationalTime beginTime, RationalTime endTime, bool includeEndTime); void(CARB_ABI* destroy)(StageAtTimeIntervalId stageAtTimeIntervalId); size_t(CARB_ABI* getTimesampleCount)(StageAtTimeIntervalId stageAtTimeIntervalId); void(CARB_ABI* getTimestamps)(RationalTime* out, size_t outCount, StageAtTimeIntervalId stageAtTimeIntervalId); // Single attribute accessor size_t(CARB_ABI* getAttributeRd)(const void** out, size_t outCount, StageAtTimeIntervalId stageAtTimeIntervalId, PathC path, TokenC attrName); // Attribute SOA accessors PrimBucketListId(CARB_ABI* findPrims)(StageAtTimeIntervalId stageAtTimeIntervalId, const carb::flatcache::set<AttrNameAndType>& all, const carb::flatcache::set<AttrNameAndType>& any, const carb::flatcache::set<AttrNameAndType>& none); void(CARB_ABI* getAttributeArrayRd)(ConstSpanC* out, size_t outCount, StageAtTimeIntervalId stageAtTimeIntervalId, PrimBucketListId primBucketList, size_t primBucketListIndex, TokenC attrName); void(CARB_ABI* getArrayAttributeArrayWithSizesRd)(ConstArrayPointersAndSizesC* out, size_t outCount, StageAtTimeIntervalId stageAtTimeIntervalId, PrimBucketListId primBucketList, size_t primBucketListIndex, TokenC attrName); void(CARB_ABI* getPathArray)(ConstPathCSpan* out, size_t outCount, StageAtTimeIntervalId stageAtTimeIntervalId, PrimBucketListId primBucketList, size_t primBucketListIndex); void(CARB_ABI* printBucketNames)(StageAtTimeIntervalId stageAtTimeIntervalId); void(CARB_ABI* exportUsd)(StageAtTimeIntervalId stageAtTimeIntervalId, UsdStageId usdStageId); RationalTime(CARB_ABI* getSimPeriod)(UsdStageId usdStageId); void(CARB_ABI* getAttributeCounts)(StageAtTimeIntervalId stageAtTimeIntervalId, PrimBucketListId primBucketList, size_t primBucketListIndex, size_t timesamples, size_t* outCounts); void(CARB_ABI* getAttributeNamesAndTypes)(StageAtTimeIntervalId stageAtTimeIntervalId, PrimBucketListId primBucketList, size_t primBucketListIndex, size_t timesamples, const size_t* attributeCounts, Token** outNames, Type** outTypes); size_t(CARB_ABI* getAttributeCountForTimesample)(StageAtTimeIntervalId stageAtTimeIntervalId, PrimBucketListId primBucketList, size_t primBucketListIndex, size_t timesampleIndex); void(CARB_ABI* getAttributeNamesAndTypesForTimesample)(StageAtTimeIntervalId stageAtTimeIntervalId, PrimBucketListId primBucketList, size_t primBucketListIndex, size_t timesampleIndex, size_t attributeCount, Token* outNames, Type* outTypes); void(CARB_ABI* getArrayAttributeWithSizeRd)(ConstSpanWithTypeC* out, size_t outCount, StageAtTimeIntervalId stageAtTimeIntervalId, carb::flatcache::PathC path, carb::flatcache::TokenC attrName); /** @brief Write a cache file to disk at a specified location * * @note Many parameters to this function are optional * @note This currently only writes the first time in the interval * @stageAtTimeIntervalId[in] The stage at time to be written to disk * @file[in The location the file is desired to be written to * @workingBuffer[in] [Optional] In order to avoid costly reallocations * the code will attempt to re-use the memory at the buffer * location if it is large enough. If the buffer isn't larg * enough the cost of allocation, and re-traversal may be paid * @workingBufferSize[in] [Optional] If workingBuffer is non null, then this desrcibes the length * of the buffer * @return The amount of data needed to serialize the cache, a return value of 0 indicates an error * */ uint64_t(CARB_ABI* writeCacheToDisk)( StageAtTimeIntervalId stageAtTimeIntervalId, const char* file, uint8_t* workingBuffer, uint64_t workingBufferSize); /** @brief Add a ref count to any data backed by the StageAtTimeIntercal * * @note The ref count will not enforce any behavior currently, but will * print a warning if backing data is deleted before all ref counts * are cleared * * @stageAtTimeIntervalId[in] The stage at time tracked for the ref counting * * @return None * */ void(CARB_ABI* addRefCount)(StageAtTimeIntervalId stageAtTimeIntervalId); /** @brief Remove a ref count from an existing timeInterval * * * @stageAtTimeIntervalId[in] The stage at time tracked for the ref counting * * @return True if ref count was removed successfully, failure conditions may * include * (1) StageAtTimeInterval doesn't exist * (2) RefCount was already 0 * */ bool(CARB_ABI* removeRefCount)(StageAtTimeIntervalId stageAtTimeIntervalId); /** @brief Query ref count for a stage at time * * @note A stage at time might be represented by multiple actual data sources * in that case we return the largest refcount of all the data sources * * @stageAtTimeIntervalId[in] The stage at time tracked for the ref counting * * @return number of reference counts * */ unsigned int(CARB_ABI* getRefCount)(StageAtTimeIntervalId stageAtTimeIntervalId); // Access GPU Array attribute void(CARB_ABI* getAttributeArrayRdGpu)(ConstSpanC* out, size_t outCount, StageAtTimeIntervalId stageAtTimeIntervalId, PrimBucketListId primBucketList, size_t primBucketListIndex, TokenC attrName); // Access GPU pointer attribute void(CARB_ABI* getAttributeRdGpu)(ConstSpanC* out, size_t outCount, StageAtTimeIntervalId stageAtTimeIntervalId, PathC path, TokenC attrName); // Get array size, useful for GPU attributes size_t(CARB_ABI* getArrayAttributeSize)(size_t* out, size_t outCount, StageAtTimeIntervalId stageAtTimeIntervalId, PathC path, TokenC attrName); /** @brief Returns which mirrors of the array are valid: CPU, GPU, etc. * * @stageAtTimeIntervalId[in] The stage to query validity from * @path[in] The prim path * @attrName[in] The attribute name * * @return ValidMirrors struct * */ ValidMirrors(CARB_ABI* getAttributeValidBits)(StageAtTimeIntervalId stageAtTimeIntervalId, const PathC& path, const TokenC& attrName); /** * @brief * */ void(CARB_ABI* getConnectionRd)(const void** out, size_t outCount, StageAtTimeIntervalId stageAtTimeIntervalId, PathC path, TokenC connectionName); }; struct IStageWithHistory { CARB_PLUGIN_INTERFACE("carb::flatcache::IStageWithHistory", 0, 1); StageWithHistoryId(CARB_ABI* create)(UsdStageId usdStageId, size_t historyFrameCount, RationalTime simPeriod); StageWithHistoryId(CARB_ABI* get)(UsdStageId usdStageId); void(CARB_ABI* destroy)(UsdStageId usdStageId); // // Create a snapshot of the stageWIthHistory for the usdStageId, this currently just resets // the stage in progress, but it probably should be extended to copy the entire ringbuffer if we intend to // so anything other than reset to the start frame. // StageWithHistorySnapshot(CARB_ABI* saveSnapshot)(UsdStageId usdStageId); bool(CARB_ABI* deleteSnapshot)(UsdStageId usdStageId, size_t snapshotId); bool(CARB_ABI* restoreFromSnapshot)(UsdStageId usdStageId, size_t snapshotId); RationalTime(CARB_ABI* getSimPeriod)(UsdStageId usdStageId); // For multi-process replication. Stores the link between the stage id on the master process and the local stage id. void(CARB_ABI* setStageIdMapping)(UsdStageId usdStageIdMaster, UsdStageId usdStageIdLocal); ListenerId(CARB_ABI* createListener)(); /** @brief Get the last frame that was written to the StageWithHistory * * @usdStageId[in] The identifier for the statge * * @return the time, and period of the last valid data written to the StageWithHistory * */ RationalTime(CARB_ABI* getLatestFrame)(UsdStageId usdStageId); StageWithHistoryId(CARB_ABI* create2)(UsdStageId usdStageId, size_t historyFrameCount, RationalTime simPeriod, bool withCuda); UsdStageId(CARB_ABI* getLocalStageId)(UsdStageId usdStageIdMaster); }; struct IStageWithHistoryDefaults { CARB_PLUGIN_INTERFACE("carb::flatcache::IStageWithHistoryDefaults", 0, 1); void(CARB_ABI* setStageHistoryFrameCount)(size_t historyFrameCount); void(CARB_ABI* setStageHistoryUpdatePeriod)(uint64_t periodNumerator, uint64_t periodDenominator); }; struct IPrimBucketList { CARB_PLUGIN_INTERFACE("carb::flatcache::IPrimBucketList", 0, 2); void(CARB_ABI* destroy)(PrimBucketListId primBucketListId); size_t(CARB_ABI* getBucketCount)(PrimBucketListId primBucketListId); void(CARB_ABI* print)(PrimBucketListId primBucketListId); BucketChangesC(CARB_ABI* getChanges)(PrimBucketListId changeListId, size_t index); AddedPrimIndicesC(CARB_ABI* getAddedPrims)(PrimBucketListId changeListId, size_t index); }; struct IChangeTrackerConfig { CARB_PLUGIN_INTERFACE("carb::flatcache::IChangeTrackerConfig", 0, 3); void(CARB_ABI* pause)(StageInProgressId stageInProgressId, ListenerId listenerId); void(CARB_ABI* resume)(StageInProgressId stageInProgressId, ListenerId listenerId); bool(CARB_ABI* isChangeTrackingPaused)(StageInProgressId stageInProgressId, ListenerId listenerId); void(CARB_ABI* attributeEnable)(StageInProgressId stageInProgressId, TokenC attrName, ListenerId listenerId); void(CARB_ABI* attributeDisable)(StageInProgressId stageInProgressId, TokenC attrName, ListenerId listenerId); bool(CARB_ABI* isListenerAttached)(StageInProgressId stageInProgressId, ListenerId listenerId); void(CARB_ABI* detachListener)(StageInProgressId stageInProgressId, ListenerId listenerId); size_t(CARB_ABI* getListenerCount)(StageInProgressId stageInProgressId); void(CARB_ABI* enablePrimCreateTracking)(StageInProgressId stageInProgressId, ListenerId listenerId); }; /** @brief The Serializer interface provides the C-ABI compatible functions for * working with all serialization of SWH and workflows. This covers * (1) In-memory serialization/deserialization * (2) Serialization to Disk and From * (3) Functions to support replication based on serialization * Because of the nature of SWH there are multiple places one might want to * actually serialize the cache from, we provide some convenience functions * that wrap this up, but also the direct functionality to serialize a * PathToAttributesMap directly to/from a buffer for convenience. * */ struct ISerializer { CARB_PLUGIN_INTERFACE("carb::flatcache::ISerializer", 0, 2); // // deprecated for more appropriately named serializeRingBuffer // uint64_t(CARB_ABI* serializeStage)(StageWithHistoryId stageWithHistoryId, size_t slot, uint8_t* dest, size_t destSize); // // deprecated for more appropriately named deserializeIntoRingBuffer // bool (CARB_ABI* deserializeStage)(StageWithHistoryId stageWithHistoryId, size_t slot, const uint8_t* input, const size_t inputSize, size_t simFrameNumber, carb::flatcache::RationalTime simFrameTime); /** @brief Attempt to serialize the stage into the provided buffer. This function * is intended to be used when you want to serialize all the data within a * ring buffer entry, however this is often more data than needs to be sent. * * @stage[in] The StageWithHistory with the ring buffer to be serialized * @slot[in] The slot from the ring buffer to send * @dest[in/out] Pointer to buffer to be written to, will start writing to head * of pointer. dest will be left pointing to the point after the last write * @destSize Size of buffer that was allocated for the data (in bytes) * * @return Number of bytes written success is determined by (return <= @destSize) * * * @invariant It is safe to write to any memory within[dest, dest+size] for the * duration of the function call. * * @note If the cache will not fit into the size of memory allocated in * @dest then it will stop writing, but continue to run the serialize * algorithm to calculate the actual amount of data that needs to be * written * */ uint64_t(CARB_ABI* serializeRingBuffer)(StageWithHistoryId stageWithHistoryId, size_t slot, uint8_t* dest, size_t destSize); /** @brief Given a buffer that has the serialized version of a cache written * using the serialize function, this function will override all the data * in the ringbuffer at the requested slot with the data encoded in the * buffer. This function will only succeed if the StageWithHistory that * is passed in was created from the same UsdStage (opened at the same root layer) * that was used to create the original serialized cache. * * * @stageWithHistoryId[in] The stage to write the data to * @slot[in] The index in the ring buffer to pull to * @input[in] Pointer to buffer of data containing serialized cache * @inputSize[in] Size of data in the buffer * @simFrameNumber[in] The frame of the simulation to set the ring buffer entry to * @simFrameTime[in] The simFrame time to set the ring buffer to * * @return True if buffer was successfully de-serialized * * @TODO: whould we care that it came from the same version of the USD file? */ bool (CARB_ABI* deserializeIntoRingBuffer)(StageWithHistoryId stageWithHistoryId, size_t slot, const uint8_t* input, const size_t inputSize, size_t simFrameNumber, carb::flatcache::RationalTime simFrameTime); /** @brief Replicate the ring buffers from the master to the workers when running * multiple processes. Data is serialized into buffers allocated and broadcast * by Realm, followed by deserialization into the remote ring buffers. This * function is synchronous, i.e., the remote FlatCaches have finished updating * when this function returns. */ void (CARB_ABI* replicateRingBuffers)(); }; struct Platform; struct IPlatform { CARB_PLUGIN_INTERFACE("carb::flatcache::IPlatform", 0, 1); const Platform& (CARB_ABI* get)(const PlatformId& platformId); Platform& (CARB_ABI* getMutable)(const PlatformId& platformId); void (CARB_ABI* reset)(const PlatformId& platformId); void (CARB_ABI* resetAll)(); }; } // namespace flatcache } // namespace carb
omniverse-code/kit/fabric/include/carb/flatcache/GetArrayGPU.h
// Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include "PathToAttributesMap.h" #include <carb/profiler/Profile.h> namespace carb { namespace flatcache { const uint64_t kProfilerMask = 1; // If this is an array-of-arrays: // array.cpuData - array of CPU pointers on CPU // gpuPointerArray->cpuData() - array of GPU pointers on CPU inline void PathToAttributesMap::enableGpuRead(MirroredArray& array, const size_t* elemToArraySize, MirroredArray* elemToArrayCpuCapacity, MirroredArray* elemToArrayGpuCapacity, MirroredArray* gpuPointerArray) { CARB_PROFILE_ZONE(kProfilerMask, "enableGpuRead"); using omni::gpucompute::MemcpyKind; log("begin enableGpuRead\n"); bool& cpuValid = array.cpuValid; bool& gpuValid = array.gpuValid; bool& gpuAllocedWithCuda = array.gpuAllocedWithCuda; uint8_t* cpuArray = array.cpuData(); uint8_t*& gpuArray = array.gpuArray; if (gpuValid) { // Nothing to do } else if (cpuValid) { std::lock_guard<MirroredArray::AttributeMutex> hostDeviceLock(array.attributeMutex); const TypeC type = array.type; const Typeinfo &typeInfo = getTypeInfo(type); if (typeInfo.isArray) { const size_t elemCount = array.count; uint8_t** cpuPointers = reinterpret_cast<uint8_t**>(cpuArray); uint8_t** gpuPointers = reinterpret_cast<uint8_t**>(gpuPointerArray->cpuData()); for (size_t elem = 0; elem != elemCount; elem++) { const size_t desiredCapacity = elemToArraySize[elem]; const size_t cpuCapacity = reinterpret_cast<size_t*>(elemToArrayCpuCapacity->cpuData())[elem]; size_t& gpuCapacity = reinterpret_cast<size_t*>(elemToArrayGpuCapacity->cpuData())[elem]; if(gpuCapacity != desiredCapacity) { destructiveResizeIfNecessaryGPU(*gpuPointerArray, elem, gpuCapacity, desiredCapacity, typeInfo.arrayElemSize, platform.gpuCuda, platform.gpuCudaCtx); } const size_t copyByteCount = std::min(desiredCapacity, cpuCapacity) * typeInfo.arrayElemSize; if (copyByteCount > 0) { void* cpuPointer = cpuPointers[elem]; void* gpuPointer = gpuPointers[elem]; CARB_ASSERT(cpuPointer); CARB_ASSERT(gpuPointer); platform.gpuCuda->memcpyAsync( *platform.gpuCudaCtx, gpuPointer, cpuPointer, copyByteCount, MemcpyKind::hostToDevice); } } gpuPointerArray->cpuValid = true; } else { // Copy the outer array from CPU to GPU size_t byteCount = array.size(); allocGpuMemIfNecessary(array, byteCount, typeInfo.size, platform.gpuCuda, platform.gpuCudaCtx); log("array values: to GPU\n"); uint8_t* cpuArray = array.cpuData(); carb::profiler::ZoneId zoneId = CARB_PROFILE_BEGIN(kProfilerMask, "outer array values"); platform.gpuCuda->memcpyAsync(*platform.gpuCudaCtx, gpuArray, cpuArray, byteCount, MemcpyKind::hostToDevice); CARB_PROFILE_END(kProfilerMask, zoneId); } // New state cpuValid = true; gpuValid = true; gpuAllocedWithCuda = true; } } inline void PathToAttributesMap::enableGpuWrite(PathToAttributesMap::MirroredArray& array, const size_t* elemToArraySize, PathToAttributesMap::MirroredArray* elemToArrayCpuCapacity, PathToAttributesMap::MirroredArray* elemToArrayGpuCapacity, PathToAttributesMap::MirroredArray* arrayGpuDataArray) { CARB_PROFILE_ZONE(kProfilerMask, "enableGpuWrite"); using omni::gpucompute::MemcpyKind; bool& usdValid = array.usdValid; bool& cpuValid = array.cpuValid; bool& gpuValid = array.gpuValid; bool& gpuAllocedWithCuda = array.gpuAllocedWithCuda; const TypeC type = array.type; const Typeinfo &typeInfo = getTypeInfo(type); std::lock_guard<MirroredArray::AttributeMutex> hostDeviceLock(array.attributeMutex); if (!typeInfo.isArray && !gpuValid) { size_t byteCount = array.size(); allocGpuMemIfNecessary(array, byteCount, typeInfo.size, platform.gpuCuda, platform.gpuCudaCtx); } else if (typeInfo.isArray) { // Array-valued elements are lazily allocated, meaning they are only // resized when write access is requested. // Write access has been requested, so resize if necessary size_t elemCount = array.count; for (size_t elem = 0; elem != elemCount; elem++) { size_t& gpuCapacity = reinterpret_cast<size_t*>(elemToArrayGpuCapacity->cpuData())[elem]; size_t desiredElemCount = elemToArraySize[elem]; resizeIfNecessaryGPU( *arrayGpuDataArray, elem, gpuCapacity, desiredElemCount, typeInfo.arrayElemSize, platform.gpuCuda, platform.gpuCudaCtx); } // Upload of allocated pointers to GPU happens outside this function } // New state usdValid = false; cpuValid = false; gpuValid = true; gpuAllocedWithCuda = true; if (elemToArrayCpuCapacity) elemToArrayCpuCapacity->usdValid = false; if (elemToArrayGpuCapacity) elemToArrayGpuCapacity->usdValid = false; if (arrayGpuDataArray) arrayGpuDataArray->usdValid = false; } inline ConstSpanC PathToAttributesMap::getArraySpanRdGpuC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix) { APILOGGER("getArraySpanRdGpuC", apiLogEnabled, attrName); BucketId bucketId = findBucketId(bucket); if (bucketId == kInvalidBucketId) return { nullptr, 0, 0 }; // We don't set dirty indices here because this method gives read-only access return getArraySpanC(bucketId, attrName, CudaReadConfig(), suffix).array; } inline const void* PathToAttributesMap::getArrayRdGpuC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix) { APILOGGER("getArrayRdGpuC", apiLogEnabled, attrName); BucketId bucketId = findBucketId(bucket); if (bucketId == kInvalidBucketId) return nullptr; // We don't set dirty indices here because this method gives read-only access return getArraySpanC(bucketId, attrName, CudaReadConfig(), suffix).array.ptr; } inline SpanC PathToAttributesMap::getArrayGpuC(BucketId bucketId, const TokenC& attrName, NameSuffix suffix) { APILOGGER("getArrayGpuC", apiLogEnabled, attrName); ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(bucketId, attrName, CudaReadWriteConfig(), suffix); setArrayDirty(arrayAndDirtyIndices); return arrayAndDirtyIndices.array; } inline ConstSpanC PathToAttributesMap::getArrayRdGpuC(BucketId bucketId, const TokenC& attrName, NameSuffix suffix) { APILOGGER("getArrayRdGpuC", apiLogEnabled, attrName); const ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(bucketId, attrName, CudaReadConfig(), suffix); // We don't set dirty indices here because this method gives read-only access return arrayAndDirtyIndices.array; } inline SpanC PathToAttributesMap::getArrayWrGpuC(BucketId bucketId, const TokenC& attrName, NameSuffix suffix) { APILOGGER("getArrayWrC", apiLogEnabled, attrName); ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(bucketId, attrName, CudaWriteConfig(), suffix); setArrayDirty(arrayAndDirtyIndices); return arrayAndDirtyIndices.array; } inline SpanC PathToAttributesMap::getArraySpanWrGpuC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix) { APILOGGER("getArraySpanWrGpuC", apiLogEnabled, attrName); BucketId bucketId = findBucketId(bucket); if (bucketId == kInvalidBucketId) return { nullptr, 0, 0 }; ArrayAndDirtyIndices array = getArraySpanC(bucketId, attrName, CudaWriteConfig(), suffix); setArrayDirty(array); return array.array; } inline void* PathToAttributesMap::getArrayWrGpuC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix) { APILOGGER("getArrayWrGpuC", apiLogEnabled, attrName); // Get write-only GPU access BucketId bucketId = findBucketId(bucket); if (bucketId == kInvalidBucketId) return nullptr; ArrayAndDirtyIndices array = getArraySpanC(bucketId, attrName, CudaWriteConfig(), suffix); setArrayDirty(array); return array.array.ptr; } inline SpanC PathToAttributesMap::getArraySpanGpuC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix) { APILOGGER("getArraySpanGpuC", apiLogEnabled, attrName); BucketId bucketId = findBucketId(bucket); if (bucketId == kInvalidBucketId) return { nullptr, 0, 0 }; ArrayAndDirtyIndices array = getArraySpanC(bucketId, attrName, CudaReadWriteConfig(), suffix); setArrayDirty(array); return array.array; } inline void* PathToAttributesMap::getArrayGpuC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix) { APILOGGER("getArrayGpuC", apiLogEnabled, attrName); BucketId bucketId = findBucketId(bucket); if (bucketId == kInvalidBucketId) return nullptr; ArrayAndDirtyIndices array = getArraySpanC(bucketId, attrName, CudaReadWriteConfig(), suffix); setArrayDirty(array); return array.array.ptr; } inline SpanC PathToAttributesMap::getAttributeGpuC(const PathC& path, const TokenC& attrName, PtrToPtrKind ptrToPtrKind, NameSuffix suffix) { APILOGGER("getAttributeGpuC", apiLogEnabled, path, attrName); bool present; // Whether this path has a bucket BucketId bucketId; // Pointer to the bucket if it does size_t element; // Index corresponding to path in this bucket's arrays std::tie(present, bucketId, element) = getPresentAndBucketAndElement(path); if (!present) return { nullptr, 0, 0 }; ArrayAndDirtyIndices array = getArraySpanC(bucketId, attrName, CudaReadWriteConfig().withPtrToPtrKind(ptrToPtrKind), suffix); setArrayElementDirty(array, element); return getArrayElementPtr(array.array, element); } inline ConstSpanC PathToAttributesMap::getAttributeRdGpuC(const PathC& path, const TokenC& attrName, PtrToPtrKind ptrToPtrKind, NameSuffix suffix) { APILOGGER("getAttributeRdGpuC", apiLogEnabled, path, attrName); bool present; BucketId bucketId; size_t element; std::tie(present, bucketId, element) = getPresentAndBucketAndElement(path); if (!present) return { nullptr, 0, 0 }; ConstSpanC array = getArraySpanC(bucketId, attrName, CudaReadConfig().withPtrToPtrKind(ptrToPtrKind), suffix).array; // We don't set dirty indices here because this method gives read-only access return getArrayElementPtr(array, element); } inline SpanC PathToAttributesMap::getAttributeWrGpuC(const PathC& path, const TokenC& attrName, PtrToPtrKind ptrToPtrKind, NameSuffix suffix) { APILOGGER("getAttributeWrGpuC", apiLogEnabled, path, attrName); // Writing an element is a RMW on the whole array, so use getArrayGpu instead of getArrayGpuWr bool present; BucketId bucketId; size_t element; std::tie(present, bucketId, element) = getPresentAndBucketAndElement(path); if (!present) return { nullptr, 0, 0 }; ArrayAndDirtyIndices array = getArraySpanC(bucketId, attrName, CudaReadWriteConfig().withPtrToPtrKind(ptrToPtrKind), suffix); setArrayElementDirty(array, element); return getArrayElementPtr(array.array, element); } // Typed accessors template <typename T> inline const T* PathToAttributesMap::getArrayRdGpu(const Bucket& bucket, const TokenC& attrName) { APILOGGER("getArrayRdGpu", apiLogEnabled, attrName); // TODO: check that T is the correct type return reinterpret_cast<const T*>(getArrayRdGpuC(bucket, attrName)); } template <typename T> inline T* PathToAttributesMap::getArrayWrGpu(const Bucket& bucket, const TokenC& attrName) { APILOGGER("getArrayWrGpu", apiLogEnabled, attrName); // TODO: check that T is the correct type return reinterpret_cast<T*>(getArrayWrGpuC(bucket, attrName)); } template <typename T> inline T* PathToAttributesMap::getArrayGpu(const Bucket& bucket, const TokenC& attrName) { APILOGGER("getArrayGpu", apiLogEnabled, attrName); // TODO: check that T is the correct type return reinterpret_cast<T*>(getArrayGpuC(bucket, attrName)); } template <typename T> inline T* PathToAttributesMap::getAttributeGpu(const PathC& path, const TokenC& attrName, PtrToPtrKind ptrToPtrKind) { APILOGGER("getAttributeGpu", apiLogEnabled, path, attrName); // TODO: check that T is the correct type return reinterpret_cast<T*>(getAttributeGpuC(path, attrName, ptrToPtrKind).ptr); } template <typename T> inline const T* PathToAttributesMap::getAttributeRdGpu(const PathC& path, const TokenC& attrName, PtrToPtrKind ptrToPtrKind) { APILOGGER("getAttributeRdGpu", apiLogEnabled, path, attrName); // TODO: check that T is the correct type return reinterpret_cast<const T*>(getAttributeRdGpuC(path, attrName, ptrToPtrKind).ptr); } template <typename T> inline T* PathToAttributesMap::getAttributeWrGpu(const PathC& path, const TokenC& attrName, PtrToPtrKind ptrToPtrKind) { APILOGGER("getAttributeWrGpu", apiLogEnabled, path, attrName); // TODO: check that T is the correct type return reinterpret_cast<T*>(getAttributeWrGpuC(path, attrName, ptrToPtrKind).ptr); } } }
omniverse-code/kit/fabric/include/carb/flatcache/AttrNameAndType.h
// Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <carb/flatcache/IToken.h> #include <carb/flatcache/IPath.h> #include <carb/flatcache/Type.h> namespace carb { namespace flatcache { // Basic types // Types and methods ending in C are intended to be used with C-ABI interfaces. // PathToAttributesMap allows metadata to be attached to attributes. // The metadata that exist currently are for (flatcache) internal use only. // Abstractly it holds an array for each attribute, where element 0 // (NameSuffix::none) is the value itself, and other elements are the metadata. // It is called NameSuffix because conceptually each metadatum adds a // new attribute with a name suffix specifying the type of metadata. // For example, suppose you have an attribute "points" that has an attached // elemCount. Conceptually you have two attributes "points" and // "points_elemCount". enum class NameSuffix { none = 0, // Index NameSuffix::value is the index of the data itself // The following metadata is present on USD attributes that connect to others connection // The target(prim, attribute) of the connection }; struct Connection { PathC path; TokenC attrName; }; // AttrNameAndType specifies the name and type of an attribute. When the user // searches for buckets of prims they use this type to specify which attributes // the prims must have. Also the user can query the name and type of an // attribute at a given path, and the output has this type. // // This version of the struct contains the type in flatcache format only. // The original, AttrNameAndType, additionally contains the type in USD format, // but that version will be deprecated. struct AttrNameAndType { Type type; Token name; NameSuffix suffix; AttrNameAndType() = default; AttrNameAndType(Type type, Token name, NameSuffix suffix = NameSuffix::none) : type(type), name(name), suffix(suffix) { } // Note that in the name comparisons below TokenC masks off USD's lifetime bit. // For example, tokens created from the same string are considered equal even // if one was created with finite lifetime and the other infinite lifetime. bool operator<(const AttrNameAndType& rhs) const { if (TypeC(type) < TypeC(rhs.type)) return true; if (TypeC(rhs.type) < TypeC(type)) return false; if (TokenC(name) < TokenC(rhs.name)) return true; if (TokenC(rhs.name) < TokenC(name)) return false; return suffix < rhs.suffix; } bool operator==(const AttrNameAndType& other) const { return type == other.type && name == other.name && suffix == other.suffix; } }; static_assert(std::is_standard_layout<AttrNameAndType>::value, "AttrNameAndType must be standard layout as it is used in C-ABI interfaces"); // NOTE: This type alias provides source level compatibility. Usage of the original AttrNameAndType structure has // been replaced with what was previously called AttrNameAndType_v2 and the _v2 suffix dropped. This alias allows code // which still refers to AttrNameAndType_v2 to compile. using AttrNameAndType_v2 = AttrNameAndType; } } namespace std { template <> struct hash<carb::flatcache::AttrNameAndType> { // Use the same hash_combine as boost template <class T> static inline void hash_combine(std::size_t& seed, const T& v) { std::hash<T> hasher; seed ^= hasher(v) + 0x9e3779b9 + (seed << 6) + (seed >> 2); } std::size_t operator()(const carb::flatcache::AttrNameAndType& key) const { size_t hash = std::hash<carb::flatcache::Type>{}(key.type); hash_combine(hash, std::hash<carb::flatcache::Token>{}(key.name)); hash_combine(hash, uint32_t(key.suffix)); return hash; } }; }
omniverse-code/kit/fabric/include/carb/flatcache/Platform.h
#pragma once #include <carb/flatcache/Allocator.h> namespace omni { namespace gpucompute { struct GpuCompute; struct Context; } // namespace gpucompute } // namespace omni namespace carb { namespace flatcache { struct Platform { Allocator allocator; omni::gpucompute::GpuCompute* gpuCuda = nullptr; omni::gpucompute::Context* gpuCudaCtx = nullptr; // The gpuD3dVk interface is used only if you access GPU arrays using D3D or Vulkan. // If you're only using CPU or CUDA GPU arrays then you don't set it. omni::gpucompute::GpuCompute* gpuD3dVk = nullptr; omni::gpucompute::Context* gpuD3dVkCtx = nullptr; Platform() = default; Platform(const Platform& other) = delete; Platform& operator=(const Platform& other) = delete; Platform(Platform&& other) = default; Platform& operator=(Platform&& other) = default; inline void reset() { gpuD3dVk = nullptr; gpuD3dVkCtx = nullptr; gpuCuda = nullptr; gpuCudaCtx = nullptr; allocator.~Allocator(); new (&allocator) Allocator(); } // mirror of IPlatform functions static void get(const PlatformId& id); static void getMutable(const PlatformId& id); static void reset(const PlatformId& id); static void resetAll(); }; } // namespace flatcache } // namespace carb
omniverse-code/kit/fabric/include/carb/flatcache/Type.h
// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <sstream> #include <string> namespace carb { namespace flatcache { // TypeC are integer keys that identify types, like float3, int[] etc. // There isn't a USD type that can be cast to TypeC, // please use carb::flatcache::usdTypeToTypeC(). struct TypeC { uint32_t type; constexpr bool operator<(const TypeC& other) const { return type < other.type; } constexpr bool operator==(const TypeC& other) const { return type == other.type; } constexpr bool operator!=(const TypeC& other) const { return type != other.type; } }; static_assert(std::is_standard_layout<TypeC>::value, "Struct must be standard layout as it is used in C-ABI interfaces"); static constexpr TypeC kUnknownType{ 0 }; enum class BaseDataType : uint8_t { eUnknown = 0, eBool, eUChar, eInt, eUInt, eInt64, eUInt64, eHalf, eFloat, eDouble, eToken, // RELATIONSHIP is stored as a 64-bit integer internally, but shouldn't be // treated as an integer type by nodes. eRelationship, // For internal use only eAsset, ePrim, eConnection, // eTags are attributes that have a name but no type or value // They are used for named tags, including USD applied schemas eTag }; inline std::ostream& operator<<(std::ostream& s, const BaseDataType& type) { static const std::string names[] = { "unknown", "bool", "uchar", "int", "uint", "int64", "uint64", "half", "float", "double", "token", "rel", "asset", "prim", "connection", "tag" }; if (type <= BaseDataType::eTag) { return s << names[uint8_t(type)]; } return s; } // These correspond with USD attribute "roles", with the exception of eString. // For example that a vector3f or vector3d (VECTOR) would be transformed // differently from a point3f or point3d (POSITION). enum class AttributeRole : uint8_t { eNone = 0, eVector, eNormal, ePosition, eColor, eTexCoord, eQuaternion, eTransform, eFrame, eTimeCode, // eText is not a USD role. If a uchar[] attribute has role eText then // the corresponding USD attribute will have type "string", and be human // readable in USDA. If it doesn't, then it will have type "uchar[]" in USD // and appear as an array of numbers in USDA. eText, // eAppliedSchema is not a USD role, eTags with this role are USD applied schema. eAppliedSchema, // ePrimTypeName is not a USD role, eTags with this role are USD prim types. ePrimTypeName, // eExecution is not a USD role, uint attributes with this role are used for control flow in Action Graphs. eExecution, eMatrix, // eObjectId is not a USD role, uint64 attributes with this role are used for Python object identification. eObjectId, // eBundle is not a USD role, ePrim and eRelationship attributes with this role identify OmniGraph bundles eBundle, // ePath is not a USD role, it refers to strings that are reinterpreted as SdfPaths. The attribute type must // be a uchar[] with a USD type "string". ePath, // eInstancedAttribute is used as a role on tag types in place of attribute types on instanced prims. eInstancedAttribute, // eAncestorPrimTypeName is not a USD role, eTags with this role are ancestor types of a USD prim type. eAncestorPrimTypeName, // Special marker for roles that are not yet determined eUnknown, }; inline std::ostream& operator<<(std::ostream& s, const AttributeRole& type) { static const std::string ognRoleNames[] = { "none", "vector", "normal", "point", "color", "texcoord", "quat", "transform", "frame", "timecode", "text", "appliedSchema", "primTypeName", "execution", "matrix", "objectId", "bundle", "path", "instancedAttribute", "ancestorPrimTypeName", "unknown" }; if (type <= AttributeRole::eUnknown) { return s << ognRoleNames[uint8_t(type)]; } return s; } // Role names as used by USD, which are slightly different from the internal names used inline std::string usdRoleName(const AttributeRole& type) { static const std::string usdRoleNames[] = { "none", "vector", "normal", "position", "color", "texCoord", "quaternion", "transform", "frame", "timecode", "text", "appliedSchema", "primTypeName", "execution", "matrix", "objectId", "bundle", "path", "instancedAttribute", "ancestorPrimTypeName", "unknown" }; if (type <= AttributeRole::eUnknown) { return usdRoleNames[uint8_t(type)]; } return usdRoleNames[uint8_t(AttributeRole::eUnknown)]; } struct Type { BaseDataType baseType; // 1 byte // 1 for raw base types; 2 for vector2f, int2, etc; 3 for point3d, normal3f, etc; // 4 for quatf, color4d, float4, matrix2f etc; 9 for matrix3f, etc; 16 for matrix4d, etc. uint8_t componentCount; // 1 byte // 0 for a single value // 1 for an array // 2 for an array of arrays (not yet supported) uint8_t arrayDepth; // 1 byte AttributeRole role; // 1 byte constexpr Type(BaseDataType baseType, uint8_t componentCount = 1, uint8_t arrayDepth = 0, AttributeRole role = AttributeRole::eNone) : baseType(baseType), componentCount(componentCount), arrayDepth(arrayDepth), role(role) { } constexpr Type() : Type(BaseDataType::eUnknown) { } // Matches little endian interpretation of the four bytes constexpr explicit Type(const TypeC& t) : baseType(BaseDataType(t.type & 0xff)), componentCount((t.type >> 8) & 0xff), arrayDepth((t.type >> 16) & 0xff), role(AttributeRole((t.type >> 24) & 0xff)) { } constexpr explicit operator TypeC() const { uint32_t type = uint8_t(role) << 24 | arrayDepth << 16 | componentCount << 8 | uint8_t(baseType); return TypeC{ type }; } constexpr bool operator==(const Type& rhs) const { return compatibleRawData(rhs) && role == rhs.role; } constexpr bool operator!=(const Type& rhs) const { return !((*this) == rhs); } constexpr bool operator<(const Type& rhs) const { return TypeC(*this) < TypeC(rhs); } /** * Role-insensitive equality check */ constexpr bool compatibleRawData(const Type& otherType) const { return baseType == otherType.baseType && componentCount == otherType.componentCount && arrayDepth == otherType.arrayDepth; } /** * Check to see if this is one of the matrix types */ constexpr bool isMatrixType() const { return (role == AttributeRole::eMatrix) || (role == AttributeRole::eFrame) || (role == AttributeRole::eTransform); } /** * Returns the dimensions of the type, componentCount for most types and square root of that for matrix types */ constexpr uint8_t dimension() const { if (isMatrixType()) { return componentCount == 4 ? 2 : (componentCount == 9 ? 3 : (componentCount == 16 ? 4 : componentCount)); } return componentCount; } std::string getTypeName() const { std::ostringstream typeName; typeName << baseType; if (componentCount > 1) typeName << uint32_t(componentCount); if (arrayDepth == 1) typeName << "[]"; else if (arrayDepth == 2) typeName << "[][]"; // Some roles are hidden from USD if ((role != AttributeRole::eNone) && (role != AttributeRole::eObjectId) && (role != AttributeRole::eBundle) && (role != AttributeRole::ePath) ) { typeName << " (" << usdRoleName(role) << ")"; } return typeName.str(); } // ====================================================================== /** * OGN formats the type names slightly differently. * - the tuples are internal "float[3]" instead of "float3" * - the roles replace the actual name "colord[3]" instead of "double3 (color)" */ std::string getOgnTypeName() const { std::ostringstream typeName; if (role == AttributeRole::eText) { typeName << "string"; return typeName.str(); } if (role == AttributeRole::ePath) { typeName << "path"; return typeName.str(); } if (role != AttributeRole::eNone) { typeName << role; // For roles with explicit types, add that to the role name if ((role != AttributeRole::eTimeCode) && (role != AttributeRole::eTransform) && (role != AttributeRole::eFrame) && (role != AttributeRole::eObjectId) && (role != AttributeRole::eBundle) && (role != AttributeRole::eExecution)) { switch (baseType) { case BaseDataType::eHalf: typeName << "h"; break; case BaseDataType::eFloat: typeName << "f"; break; case BaseDataType::eDouble: typeName << "d"; break; default: typeName << baseType; break; } } } else { typeName << baseType; } if (componentCount > 1) { typeName << "[" << uint32_t(dimension()) << "]"; } if (arrayDepth == 1) typeName << "[]"; else if (arrayDepth == 2) typeName << "[][]"; return typeName.str(); } }; inline std::ostream& operator<<(std::ostream& s, const Type& type) { s << type.getTypeName(); return s; } } } namespace std { template <> struct hash<carb::flatcache::Type> { std::size_t operator()(const carb::flatcache::Type& key) const { return carb::flatcache::TypeC(key).type; } }; template <> struct hash<carb::flatcache::TypeC> { std::size_t operator()(const carb::flatcache::TypeC& key) const { return key.type; } }; }
omniverse-code/kit/fabric/include/carb/flatcache/GetArrayD3dGpu.h
// Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <carb/Defines.h> #include <carb/graphics/Graphics.h> #include <omni/gpucompute/GpuCompute.h> using namespace carb::graphics; using std::unique_ptr; namespace carb { namespace flatcache { // If this is an array-of-arrays: // array.cpuData - array of CPU pointers on CPU // gpuPointerArray->cpuData() - array of GPU pointers on CPU inline void PathToAttributesMap::enableD3dGpuRead(MirroredArray& array, const size_t* elemToArraySize, MirroredArray* elemToArrayCpuCapacity, MirroredArray* elemToArrayGpuCapacity, MirroredArray* gpuPointerArray) { using omni::gpucompute::MemcpyKind; bool& cpuValid = array.cpuValid; bool& gpuValid = array.gpuValid; bool& gpuAllocedWithCuda = array.gpuAllocedWithCuda; uint8_t* cpuArray = array.cpuData(); uint8_t*& gpuArray = array.gpuArray; if (gpuValid) { // Nothing to do } else if (!gpuValid && cpuValid) { const TypeC type = array.type; const Typeinfo &typeInfo = getTypeInfo(type); std::lock_guard<MirroredArray::AttributeMutex> hostDeviceLock(array.attributeMutex); // If each element is an array, then they could be of different sizes // So alloc and memcpy each one if (typeInfo.isArray) { size_t elemCount = array.count; uint8_t** elemToArrayCpuData = (uint8_t**)cpuArray; uint8_t** elemToArrayGpuData = (uint8_t**)gpuPointerArray->cpuData(); for (size_t elem = 0; elem != elemCount; elem++) { // Make sure that the dest (GPU) buffer is large enough const uint8_t* const& cpuData = elemToArrayCpuData[elem]; // src size_t& destCapacity = reinterpret_cast<size_t*>(elemToArrayGpuCapacity->cpuData())[elem]; size_t desiredElemCount = elemToArraySize[elem]; destructiveResizeIfNecessaryGPU( *gpuPointerArray, elem, destCapacity, desiredElemCount, typeInfo.arrayElemSize, platform.gpuD3dVk, platform.gpuD3dVkCtx); // Copy from CPU to GPU if (desiredElemCount != 0 && cpuData) { uint8_t*& gpuData = elemToArrayGpuData[elem]; // dest size_t copyByteCount = desiredElemCount * typeInfo.arrayElemSize; platform.gpuD3dVk->memcpy(*platform.gpuD3dVkCtx, gpuData, cpuData, copyByteCount, MemcpyKind::hostToDevice); } else if (desiredElemCount != 0 && !cpuData) { printf("Warning: GPU read access requested, CPU is valid but not allocated\n"); } } // We don't need to copy the outer array to GPU here. // In D3dVk, the outer array is currently a CPU array of descriptors that we copy to // a kernel calls descriptor set at dispatch time } else { // Copy the outer array from CPU to GPU size_t byteCount = array.size(); allocGpuMemIfNecessary(array, byteCount, typeInfo.size, platform.gpuD3dVk, platform.gpuD3dVkCtx); uint8_t* cpuArray = array.cpuData(); platform.gpuD3dVk->memcpy(*platform.gpuD3dVkCtx, gpuArray, cpuArray, byteCount, MemcpyKind::hostToDevice); } // New state cpuValid = true; gpuValid = true; gpuAllocedWithCuda = false; } } inline void PathToAttributesMap::enableD3dGpuWrite(PathToAttributesMap::MirroredArray& array, const size_t* elemToArraySize, PathToAttributesMap::MirroredArray* elemToArrayCpuCapacity, PathToAttributesMap::MirroredArray* elemToArrayGpuCapacity, PathToAttributesMap::MirroredArray* arrayGpuDataArray) { log("begin enableGpuWrite\n"); bool& usdValid = array.usdValid; bool& cpuValid = array.cpuValid; bool& gpuValid = array.gpuValid; bool& gpuAllocedWithCuda = array.gpuAllocedWithCuda; const TypeC type = array.type; const Typeinfo &typeInfo = getTypeInfo(type); std::lock_guard<MirroredArray::AttributeMutex> hostDeviceLock(array.attributeMutex); if (!gpuValid) { size_t byteCount = array.size(); allocGpuMemIfNecessary(array, byteCount, typeInfo.size, platform.gpuD3dVk, platform.gpuD3dVkCtx); // Array-valued elements are lazily allocated, meaning they are only // resized when write access is requested. // Write access has been requested, so resize if necessary if (typeInfo.isArray) { size_t elemCount = array.count; for (size_t elem = 0; elem != elemCount; elem++) { size_t& gpuCapacity = reinterpret_cast<size_t*>(elemToArrayGpuCapacity->cpuData())[elem]; size_t desiredElemCount = elemToArraySize[elem]; resizeIfNecessaryGPU( *arrayGpuDataArray, elem, gpuCapacity, desiredElemCount, typeInfo.arrayElemSize, platform.gpuD3dVk, platform.gpuD3dVkCtx); } // Upload of allocated pointers to GPU happens outside this function } } // New state usdValid = false; cpuValid = false; gpuValid = true; gpuAllocedWithCuda = false; log("end enableGpuWrite\n\n"); } inline const void* PathToAttributesMap::getArrayRdD3d(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix) { BucketId bucketId = findBucketId(bucket); if (bucketId == kInvalidBucketId) return nullptr; // We don't set dirty indices here because this method gives read-only access return getArraySpanC(bucketId, attrName, D3dVkReadConfig(), suffix).array.ptr; } inline void* PathToAttributesMap::getArrayWrD3d(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix) { BucketId bucketId = findBucketId(bucket); if (bucketId == kInvalidBucketId) return nullptr; ArrayAndDirtyIndices array = getArraySpanC(bucketId, attrName, D3dVkReadWriteConfig(), suffix); setArrayDirty(array); return array.array.ptr; } inline void* PathToAttributesMap::getArrayD3d(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix) { BucketId bucketId = findBucketId(bucket); if (bucketId == kInvalidBucketId) return nullptr; ArrayAndDirtyIndices array = getArraySpanC(bucketId, attrName, D3dVkReadWriteConfig(), suffix); setArrayDirty(array); return array.array.ptr; } #if 0 // If array of values, return the Buffer* that was returned by malloc // If array of arrays, return array of Buffer* for each element array inline std::pair<void**, size_t> PathToAttributesMap::getArrayD3d(const Bucket& bucket, const TokenC& attrName) { std::pair<void**, size_t> retval = { nullptr, 0 }; BucketImpl& bucketImpl = buckets[bucket]; auto iter = bucketImpl.arrays.find({ pxr::TfType(), TypeC(), attrName }); bool found = (iter != bucketImpl.arrays.end()); if (found) { bool isTag = (typeToInfo[iter->first.type].size == 0); if (!isTag) { pxr::TfType type = iter->first.type; size_t elemSize = typeToInfo[type].size; size_t arrayElemSize = typeToInfo[type].arrayElemSize; // Read enable must come before write enable enableD3dGpuRead(iter->second, elemSize, arrayElemSize); enableD3dGpuWrite(iter->second, elemSize, arrayElemSize); retval.first = iter->second.d3dArrays.data(); retval.second = iter->second.d3dArrays.size(); } else if (isTag) { // If is a tag, then array.data() will be zero, so set special value // to distinguish from tag absent case retval.first = (void**)-1; } } return retval; } #endif inline omni::gpucompute::GpuPointer PathToAttributesMap::getAttributeD3d(const PathC& path, const TokenC& attrName) { bool present; // Whether this path has a bucket BucketId bucketId; // Pointer to the bucket if it does size_t element; // Index corresponding to path in this bucket's arrays std::tie(present, bucketId, element) = getPresentAndBucketAndElement(path); if (!present) return { nullptr, 0, 0 }; // TODO: Get rid of double hash lookup below (getArraySpanC + explicit call) ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(bucketId, attrName, D3dVkReadWriteConfig()); setArrayElementDirty(arrayAndDirtyIndices, element); void* array = arrayAndDirtyIndices.array.ptr; if (array != nullptr) { // Get elemSize const BucketImpl* bucketImplPtr = buckets.find(bucketId); if (!bucketImplPtr) return { nullptr, 0, 0 }; const AttrName name{ attrName, NameSuffix::none }; const MirroredArray *valuesArray; if (!bucketImplPtr->scalarAttributeArrays.find(name, &valuesArray)) { const ArrayAttributeArray *arrayAttributeArray; if (bucketImplPtr->arrayAttributeArrays.find(name, &arrayAttributeArray)) { valuesArray = &arrayAttributeArray->values; } else { return { nullptr, 0, 0 }; } } assert(valuesArray); const Typeinfo &typeinfo = getTypeInfo(valuesArray->type); const bool isArrayOfArray = typeinfo.isArray; const size_t elemSize = typeinfo.size; if (!isArrayOfArray) { return { array, element * elemSize, elemSize }; } else if (isArrayOfArray) { // For arrays of arrays we return the Buffer* of the inner array uint8_t* const* elemToArrayData = (uint8_t* const*)array; return { elemToArrayData[element], 0, 0 }; } } return { nullptr, 0, 0 }; } } }
omniverse-code/kit/fabric/include/carb/flatcache/PathToAttributesMap.h
// Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <carb/Defines.h> #include <carb/flatcache/AttrNameAndType.h> #include <carb/flatcache/IdTypes.h> #include <carb/flatcache/ApiLogger.h> #include <carb/flatcache/HashMap.h> #include <carb/flatcache/IFlatcache.h> #include <carb/flatcache/IPath.h> #include <carb/flatcache/IToken.h> #include <carb/flatcache/Ordered_Set.h> #include <carb/flatcache/Platform.h> #include <carb/flatcache/PrimChanges.h> #include <carb/flatcache/Type.h> #include <carb/logging/Log.h> #include <carb/profiler/Profile.h> #include <carb/thread/Mutex.h> #include <omni/gpucompute/GpuCompute.h> #include <fstream> // The following is needed to include USD headers #if defined(__GNUC__) # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wdeprecated-declarations" # pragma GCC diagnostic ignored "-Wunused-local-typedefs" # pragma GCC diagnostic ignored "-Wunused-function" // This suppresses deprecated header warnings, which is impossible with pragmas. // Alternative is to specify -Wno-deprecated build option, but that disables other useful warnings too. # ifdef __DEPRECATED # define OMNI_USD_SUPPRESS_DEPRECATION_WARNINGS # undef __DEPRECATED # endif #endif #include <pxr/base/tf/type.h> #include <pxr/usd/usd/timeCode.h> // PathToAttributesMap doesn't depend on USD for tokens or paths // However, it's useful to be able to see the USD text representation of tokens and // paths when debugging. Set ENABLE_USD_DEBUGGING to 1 to enable that. #if defined(_DEBUG) # define ENABLE_USD_DEBUGGING 1 #else # define ENABLE_USD_DEBUGGING 0 #endif // TODO: Move this to some shared macro header if needed elsewhere #if defined(_DEBUG) #define VALIDATE_TRUE(X) CARB_ASSERT(X) #else #define VALIDATE_TRUE(X) X #endif #define PTAM_SIZE_TYPE size_t #define PTAM_SIZE_TYPEC (flatcache::TypeC(carb::flatcache::Type(carb::flatcache::BaseDataType::eUInt64))) static_assert(sizeof(PTAM_SIZE_TYPE) == sizeof(uint64_t), "Unexpected sizeof size_t"); #define PTAM_POINTER_TYPE void* #define PTAM_POINTER_TYPEC (flatcache::TypeC(carb::flatcache::Type(carb::flatcache::BaseDataType::eUInt64))) static_assert(sizeof(PTAM_POINTER_TYPE) == sizeof(uint64_t), "Unexpected sizeof void*"); // When we switch to CUDA async CPU<->GPU copies, we'll need to use pinned CPU // memory for performance. However, the allocations themselves will be much // slower. If you want to see how much slower, set USE_PINNED_MEMORY to 1. // When we do switch, we should probably do a single allocation and sub // allocate it ourselves. That way we'd only call cudaHostAlloc once. #define USE_PINNED_MEMORY 0 // Set this to one to enable CARB profile zones for large bucket copies #define PROFILE_LARGE_BUCKET_COPIES 0 // We plan to move TfToken and AssetPath construction to IToken. // Until we do we have to depend on token.h, a USD header #include "FlatCacheUSD.h" #include <pxr/usd/sdf/pathTable.h> // 104 only - do not port this to 105+ // Enums are in their own file since they have no external dependencies #include "Enums.h" #include <pxr/base/tf/token.h> #include <pxr/usd/sdf/path.h> #include <algorithm> #include <iostream> #include <iterator> #include <map> #include <queue> #include <set> #include <unordered_map> #include <utility> using pxr::UsdTimeCode; using Hierarchy = pxr::SdfPathTable<int>; // 104 only - do not port this to 105+ namespace carb { namespace flatcache { struct AttrName { TokenC name; NameSuffix suffix; bool operator<(const AttrName& other) const = delete; bool operator==(const AttrName& other) const = delete; }; // Use the same hash_combine as boost template <class T> static inline size_t hash_combine(std::size_t seed, const T& v) { std::hash<T> hasher; seed ^= hasher(v) + 0x9e3779b9 + (seed << 6) + (seed >> 2); return seed; } } } namespace carb { namespace flatcache { inline std::string toString(NameSuffix suffix) { if (suffix == NameSuffix::connection) return "_connection"; else if (suffix == NameSuffix::none) return ""; return ""; } inline std::ostream& operator<<(std::ostream& s, const NameSuffix& nameSuffix) { s << toString(nameSuffix); return s; } // FlatCache buckets UsdPrims according to their type and the UsdAttributes // they have. For example, all the UsdGeomMeshes can go in one bucket, all the // UsdSkelAnimations in another. The user can then quickly get a contiguous // array of all the meshes, without having to traverse the whole stage. // // The name of a bucket is its set of attribute names (Tokens). // As in the USD API, these tokens are the names used in USDA files, not C++ // type names like UsdGeomMesh. // // Example (using C++11 initializer lists to create the sets): // Bucket meshes = { Token("Mesh") }; // Bucket skelAnimations = { Token("SkelAnimation") }; // // For efficiency, set is an ordered c++ array, not std::set using Bucket = set<AttrNameAndType>; struct BucketAndId { const Bucket bucket; const BucketId bucketId; }; // Buckets store attribute values in contiguous arrays, and in C++ array // indices are size_t using ArrayIndex = size_t; const ArrayIndex kInvalidArrayIndex = 0xffff'ffff'ffff'ffff; // Invariants: // I0: setOfChangedIndices = [0..N), if allIndicesChanged // = changedIndices, otherwise // I1: If setOfChangedIndices==[0..N) then allIndicesChanged=true and changedIndices = {} // where N is defined by the caller // // In particular this means that changedIndices can't have size N // because if all indices were changed, then changedIndices = {}, by I1 struct ChangedIndicesImpl { bool allIndicesChanged = true; flatcache::set<ArrayIndex> changedIndices; ChangedIndicesImpl(size_t N) { if (N == 0) allIndicesChanged = true; else allIndicesChanged = false; } // Create the singleton set {index} ChangedIndicesImpl(ArrayIndex index, size_t N) { if (index == 0 && N == 1) { // Maintain invariant I0 allIndicesChanged = true; } else { // Maintain invariant I0 changedIndices = { index }; allIndicesChanged = false; } } void dirtyAll() { // Maintain invariant I1 allIndicesChanged = true; changedIndices.clear(); } void insert(size_t index, size_t N) { CARB_ASSERT(index < N); // If all indices already changed, then inserting an index has no // effect if (allIndicesChanged) return; changedIndices.insert(index); // Maintain invariant I1 if (changedIndices.size() == N) { dirtyAll(); } } void decrementN(size_t newN) { if (allIndicesChanged) return; changedIndices.erase(newN); if (changedIndices.size() == newN) allIndicesChanged = true; } void erase(size_t index, size_t N) { CARB_ASSERT(index < N); if (allIndicesChanged) { allIndicesChanged = false; // Make a sorted list of integers [0..N) \ index changedIndices.v.resize(N - 1); size_t dest = 0; for (size_t i = 0; i != index; i++) { changedIndices.v[dest++] = i; } for (size_t i = index + 1; i != N; i++) { changedIndices.v[dest++] = i; } return; } changedIndices.erase(index); } bool contains(size_t index) { if (allIndicesChanged) return true; return changedIndices.contains(index); } }; struct ArrayAndDirtyIndices { SpanC array; // We use SpanC instead of gsl::span<const uint8_t> to allow casting to array of correct type std::vector<ChangedIndicesImpl*> changedIndicesForEachListener; // This is empty if change tracking is not enabled for this attribute }; // Bucket vectors and their attribute arrays are public, so users can iterate // over them directly using for loops. // For users that prefer opaque iterators, we provide View. struct View; // FlatCache doesn't need all metadata in UsdAttribute, just the attribute's // type, size in bytes, whether it is an array, and if it is an array, the // size of each elements in bytes struct Typeinfo { size_t size; bool isArray; size_t arrayElemSize; }; // FlatCache stores a map from attribute names (Tokens) to their type and // size. using TypeToInfo = HashMap<TypeC, Typeinfo, std::hash<TypeC>, std::equal_to<TypeC>, AllocFunctor, FreeFunctor>; // By default, an attribute's value is not in the cache, and flags == eNone // Once the user reads a value, ePresent is true // Once the user writes a value, eDirty is true enum class Flags { eNone = 0, ePresent = 1, eDirty = 2 }; // Operators for combining Flags constexpr enum Flags operator|(const enum Flags a, const enum Flags b) { return (enum Flags)(uint32_t(a) | uint32_t(b)); } constexpr enum Flags operator&(const enum Flags a, const enum Flags b) { return (enum Flags)(uint32_t(a) & uint32_t(b)); } struct BucketChangesImpl { // Which attributes changed gsl::span<const AttrNameAndType> changedAttributes; // For each attribute, which prims changed? std::vector<ConstChangedIndicesC> changedIndices; gsl::span<const Path> pathArray; // Which indices contain newly added prims? gsl::span<const size_t> addedIndices; }; struct PrimBucketListImpl { flatcache::set<BucketId> buckets; std::vector<BucketChangesImpl> changes; void clear() { buckets.clear(); changes.clear(); } }; using SerializationCache = HashMap<uint64_t, std::string>; using DeserializationCache = HashMap<std::string, pxr::SdfPath>; // Now we've defined the basic types, we can define the type of FlatCache. // // Abstractly, FlatCache maps each Path to the UsdAttributes of the UsdPrim // at that path. So the type of FlatCache is "PathToAttributesMap". struct PathToAttributesMap { struct MirroredArray { private: std::vector<uint8_t> cpuArray; public: Platform& platform; TypeC type; Typeinfo typeinfo; uint8_t* gpuArray; size_t gpuCapacity; // Amount of memory allocated at gpuArray in bytes std::vector<void*> d3dArrays; // Actually vector of Buffer* size_t count; bool usdValid; bool cpuValid; bool gpuValid; bool gpuAllocedWithCuda; using AttributeMutex = carb::thread::mutex; AttributeMutex attributeMutex; MirroredArray(Platform& platform_, const TypeC &type, const Typeinfo& typeinfo) noexcept; ~MirroredArray(); MirroredArray(const MirroredArray& other) = delete; MirroredArray& operator=(const MirroredArray& other) noexcept; MirroredArray(MirroredArray&& other) noexcept; MirroredArray& operator=(MirroredArray&& other) noexcept; friend void swap(MirroredArray& a, MirroredArray& b) noexcept; inline bool isArrayOfArray() const { CARB_ASSERT((typeinfo.arrayElemSize != 0) == typeinfo.isArray); return typeinfo.isArray; } inline MirroredArray* getValuesArray() { return this; } void resize(size_t byteCount) { // CPU // At the moment, CPU always resizes, but eventually it will only // resize if it is allocated and valid // This will ensure that GPU temp data is never allocated on CPU cpuArray.resize(byteCount); // Don't need to resize GPU here, because it is deferred until next // copy to/from GPU mem } // GPU resize that preserves existing contents // This is used by addPath and addAttributes void resizeGpu(omni::gpucompute::GpuCompute* computeAPI, omni::gpucompute::Context* computeCtx, size_t byteCount, size_t elemSize) { if (!computeAPI || !computeCtx) return; bool capacitySufficient = (byteCount <= gpuCapacity); if (!capacitySufficient) { void* oldGpuArray = gpuArray; size_t oldByteCount = gpuCapacity; gpuArray = reinterpret_cast<uint8_t*>(computeAPI->mallocAsync(*computeCtx, byteCount, elemSize)); gpuCapacity = byteCount; if (oldGpuArray) { using omni::gpucompute::MemcpyKind; computeAPI->memcpyAsync(*computeCtx, gpuArray, oldGpuArray, oldByteCount, MemcpyKind::deviceToDevice); computeAPI->freeAsync(*computeCtx, oldGpuArray); } } } size_t size() const { return cpuArray.size(); } uint8_t* cpuData() { return cpuArray.data(); } const uint8_t* cpuData() const { return cpuArray.data(); } void clear() { cpuArray.clear(); } }; using ScalarAttributeArray = MirroredArray; struct ArrayAttributeArray { enum class MirroredArrays : uint8_t { Values, ElemCounts, CpuElemCounts, GpuElemCounts, GpuPtrs, Count }; ArrayAttributeArray(Platform& platform_, const TypeC& type, const Typeinfo &typeinfo) noexcept; ~ArrayAttributeArray(); ArrayAttributeArray(const ArrayAttributeArray& other) = delete; ArrayAttributeArray& operator=(const ArrayAttributeArray& other) noexcept; ArrayAttributeArray(ArrayAttributeArray&& other) noexcept; ArrayAttributeArray& operator=(ArrayAttributeArray&& other) noexcept; friend void swap(ArrayAttributeArray& a, ArrayAttributeArray& b) noexcept; inline MirroredArray* getValuesArray() { return &values; } MirroredArray values; MirroredArray elemCounts; MirroredArray cpuElemCounts; MirroredArray gpuElemCounts; MirroredArray gpuPtrs; }; // DO NOT generalize this static_assert using globally named defines for magic numbers. // We intentionally sprinkle static_assert on hardcoded sizes around this file to increase friction when changing // the struct definition. Any change to ArrayAttributeArray requires evaluating multiple locations that rely on // keeping in sync with the struct. Having each of these be hardcoded comparions forces future authors to // individually evaluate each dependent site for correctness. If the comparison is generalized, future authors could // simply adjust the global definition without examining every dependent routine, which might lead to errors. static_assert(sizeof(ArrayAttributeArray) == 5 * sizeof(MirroredArray), "ArrayAttributeArray has unexpected size"); struct Changes { // changedAttributes and changesIndices together implement an ordered // map, from attribute to changed indices. // changedAttributes is a flatcache::set, which is a sorted std::vector. // // To lookup an element of the map, find the index, i, of the key in // changedAttributes, then read the value from changedIndices[i] // // changedAttributes and changedIndices must have the same size. // // TODO: make a general ordered_map class based on flatcache::set flatcache::set<AttrNameAndType> changedAttributes; std::vector<ChangedIndicesImpl> changedIndices; // // New elements are stored in a set // flatcache::set<ArrayIndex> addedIndices; void setDirty(const AttrNameAndType& nameAndType, size_t index, size_t maxIndex) { auto& keys = changedAttributes.v; auto& values = changedIndices; auto insertIter = lower_bound(keys.begin(), keys.end(), nameAndType); ptrdiff_t insertIndex = insertIter - keys.begin(); bool found = (insertIter != keys.end() && !(nameAndType < *insertIter)); if (found) { values[insertIndex].insert(index, maxIndex); } else { keys.insert(insertIter, nameAndType); values.insert(values.begin() + insertIndex, ChangedIndicesImpl(index, maxIndex)); } } void dirtyAll(const AttrNameAndType& nameAndType, size_t maxIndex) { auto& keys = changedAttributes.v; auto& values = changedIndices; auto insertIter = lower_bound(keys.begin(), keys.end(), nameAndType); ptrdiff_t insertIndex = insertIter - keys.begin(); bool found = (insertIter != keys.end() && !(nameAndType < *insertIter)); if (found) { values[insertIndex].dirtyAll(); } else { keys.insert(insertIter, nameAndType); ChangedIndicesImpl changedIndices(maxIndex); changedIndices.dirtyAll(); values.insert(values.begin() + insertIndex, changedIndices); } } void addNewPrim(size_t index) { addedIndices.insert(index); } void removePrim(size_t index) { // // we just clean the index from the set // addedIndices.erase(index); } size_t getNewPrimCount() { return addedIndices.size(); } }; // FlatCache buckets UsdPrims according to type and attributes, and // BucketImpl stores the attribute values of a bucket's prims in // structure-of-arrays (SOA) format. // BucketImpl maps each attribute name (TokenC) to a MirroredArray, a // contiguous byte array (vector<uint8_t>) and a bitfield encoding the // validate/dirtiness of each mirror. // Abstractly, flatcache data is addressed like a multidimensional array // buckets[bucket][attributeName][path]. // FlatCache uses byte arrays instead of typed arrays, because USD files, // scripts, and plugins can define custom types, so no dll or exe knows the // complete set of types at the time of its compilation. // BucketImpl also contains elemToPath to map each SOA element to the // Path it came from. struct BucketImpl { struct Hasher { size_t operator()(const AttrName& key) const { return hash_combine(hash(key.name), uint32_t(key.suffix)); } }; struct KeyEqual { bool operator()(const AttrName& a, const AttrName& b) const { return (a.name == b.name) && (a.suffix == b.suffix); } }; using ScalarAttributeArrays = HashMap<AttrName, ScalarAttributeArray, Hasher, KeyEqual, AllocFunctor, FreeFunctor>; using ArrayAttributeArrays = HashMap<AttrName, ArrayAttributeArray, Hasher, KeyEqual, AllocFunctor, FreeFunctor>; Platform& platform; ScalarAttributeArrays scalarAttributeArrays; ArrayAttributeArrays arrayAttributeArrays; std::vector<pxr::SdfPath> elemToPath; // listenerIdToChanges entries are lazily created when the user changes // an attribute, or when an attribute moves between buckets HashMap<ListenerId, Changes, ListenerIdHasher, std::equal_to<ListenerId>, AllocFunctor, FreeFunctor> listenerIdToChanges; template<typename CallbackT> void forEachValueArray(CallbackT callback); BucketImpl(Platform &platform_) : platform(platform_) , scalarAttributeArrays(0, Hasher(), KeyEqual(), AllocFunctor{ &platform.allocator }, FreeFunctor{ &platform.allocator }) , arrayAttributeArrays(0, Hasher(), KeyEqual(), AllocFunctor{ &platform.allocator }, FreeFunctor{ &platform.allocator }) , elemToPath() , listenerIdToChanges(0, ListenerIdHasher(), std::equal_to<ListenerId>(), AllocFunctor{ &platform.allocator }, FreeFunctor{ &platform.allocator }) { } ~BucketImpl() { #if PROFILE_LARGE_BUCKET_COPIES size_t count = elemToPath.size(); carb::profiler::ZoneId zoneId = carb::profiler::kUnknownZoneId; if (1000 <= count) zoneId = CARB_PROFILE_BEGIN(1, "Destroy Bucket %zu", count); arrays.clear(); elemToPath.clear(); if (1000 <= count) CARB_PROFILE_END(1, zoneId); #endif // #if PROFILE_LARGE_BUCKET_COPIES } BucketImpl(const BucketImpl&) = delete; inline BucketImpl& operator=(const BucketImpl& other) noexcept { #if PROFILE_LARGE_BUCKET_COPIES size_t count = other.elemToPath.size(); carb::profiler::ZoneId zoneId = carb::profiler::kUnknownZoneId; if (1000 <= count) zoneId = CARB_PROFILE_BEGIN(1, "Copy Bucket %zu", count); #endif // #if PROFILE_LARGE_BUCKET_COPIES scalarAttributeArrays.clear(); scalarAttributeArrays.reserve(other.scalarAttributeArrays.size()); other.scalarAttributeArrays.forEach([this](const AttrName& name, const ScalarAttributeArray &otherArray) { // construct new array with the current BucketImpl platform, but mimicing type of otherArray ScalarAttributeArray *array; scalarAttributeArrays.allocateEntry(name, &array); new (array) ScalarAttributeArray(platform, otherArray.type, otherArray.typeinfo); *array = otherArray; }); arrayAttributeArrays.clear(); arrayAttributeArrays.reserve(other.arrayAttributeArrays.size()); other.arrayAttributeArrays.forEach([this](const AttrName& name, const ArrayAttributeArray &otherArray) { // construct new array with the current BucketImpl platform, but mimicing type of otherArray ArrayAttributeArray *array; arrayAttributeArrays.allocateEntry(name, &array); new (array) ArrayAttributeArray(platform, otherArray.values.type, otherArray.values.typeinfo); *array = otherArray; }); elemToPath = other.elemToPath; listenerIdToChanges.clear(); listenerIdToChanges.reserve(other.listenerIdToChanges.size()); other.listenerIdToChanges.forEach([this](const ListenerId& listener, const Changes &otherChanges) { Changes* changes; VALIDATE_TRUE(listenerIdToChanges.allocateEntry(listener, &changes)); static_assert(std::is_copy_constructible<Changes>::value, "Expected listenerIdToChanges values to be copy-constructible"); new (changes) Changes(otherChanges); }); bucket = other.bucket; #if PROFILE_LARGE_BUCKET_COPIES if (1000 <= count) CARB_PROFILE_END(1, zoneId); #endif // #if PROFILE_LARGE_BUCKET_COPIES return *this; } BucketImpl(BucketImpl&& other) noexcept = delete; inline BucketImpl& operator=(BucketImpl&& other) noexcept { // this->platform = std::move(b.platform); // intentionally not move-assigning platform (we can't anyways, it's a ref) this->scalarAttributeArrays = std::move(other.scalarAttributeArrays); this->arrayAttributeArrays = std::move(other.arrayAttributeArrays); this->elemToPath = std::move(other.elemToPath); this->listenerIdToChanges = std::move(other.listenerIdToChanges); this->bucket = std::move(other.bucket); return *this; } const Bucket& GetBucket() const { return bucket; } // // TODO: In the future we should support universal ref + // move assignments, unforuntately Bucket doesn't follow // the rule of 5 so that is unavailable to us currently. // void SetBucket(const Bucket& _bucket) { bucket = _bucket; } private: // // bucketImpl knows the Bucket it represents // Bucket bucket; }; /** * @struct BucketIdToImpl * * @brief Convenience data structure for quick bucket lookups * * @details We want to avoid the cost of hashmap lookups when possible * due to the large number of times that elements are looked * up via a single element lookup. This class creates a static vector * to track buckets. It also will keep the array densely packed * as possible, while not incurring the cost of moves * * @notes 1) NOT threadsafe * * @todo possibly store the last and first valid so one can avoid un-needed * iteration * @todo provide an iterator * @todo make deleting buckets move lastFreeSlot back where approriate so that * it is always meaningful as "end()" */ struct BucketIdToImpl { // A reasonable first size for the number of buckets // If one changed this number, they would have to update // the constants in the unit tests "Check Bucket Growth" static const int max_buckets_init = 1024; /** * @brief Initialize storage to the minimum size * We rely on C++ behavior that default initalizes the * std::vector bool to false for valid tracking * */ BucketIdToImpl(Platform& platform) : platform(platform) , buckets(max_buckets_init) , lastEmptySlot{ 0 } { } ~BucketIdToImpl() { clear(); } /** * @brief mimic emplace function for stl objects * * @details This allows us to do move of buckets after they have been * created into the storage. We choose for backwards compatibility * with other APS to return a pair. It takes care of the correct place * to store the bucket for you to keep the storage as dense as possible. * This claims the next bucket, and will move out of * @param bucketImpl a new bucket to be added to the Storage, empty after function call * due to move. * * @return <BucketId, BucketImpl&> the pair representing the Id for lookup of the bucket * and a reference to the data since it was moved from param * */ std::pair<BucketId, BucketImpl&> emplace(BucketImpl* bucketImpl) { CARB_ASSERT(bucketImpl); BucketId bucketId = ClaimNextOpenBucket(); if (buckets[size_t(bucketId)]) { platform.allocator.delete_(buckets[size_t(bucketId)]); } buckets[size_t(bucketId)] = std::move(bucketImpl); return std::pair<BucketId, BucketImpl&>(bucketId, *buckets[(size_t)bucketId]); } /** * @brief Erase the specified bucket * * @details This actually forces deletion of the object that is to be deleted it adds the * id to the list of free buckets so that it will be recycled before more are * added to the end * * @param id : The id of the bucket to be deleted */ void erase(BucketId id) { if (size_t(id) < buckets.size() && buckets[size_t(id)]) { // Ignoring optimization of if last slot is empty freeSlots.push(id); platform.allocator.delete_(buckets[size_t(id)]); buckets[size_t(id)] = nullptr; } } // Find the bucket at the requested slot, if no bucket exists // then we return /** * @brief Find the bucket if it exists * * @param id : The id of the bucket to be found * * @return If the bucket exists a pointer is return, otherwise a null pointer is returned */ BucketImpl* find(BucketId id) { if (size_t(id)< buckets.size()) return buckets[size_t(id)]; return nullptr; } /** * @brief Find the bucket if it exists (const) * * @param id : The id of the bucket to be found * * @return If the bucket exists a cosnt pointer is return, otherwise a null pointer is returned */ const BucketImpl* find(BucketId slot) const { if (size_t(slot) < buckets.size()) return buckets[size_t(slot)]; return nullptr; } /** * @brief Clear all the buckets * * @details This will force deletion of all the nodes and make the storage appear empty * */ void clear() { for (uint64_t i = 0; i < size_t(lastEmptySlot); ++i) { if (buckets[i]) { platform.allocator.delete_(buckets[i]); buckets[i] = nullptr; } } // no clear in std::queue so we swap with a new one std::queue<BucketId>().swap(freeSlots); lastEmptySlot.id = 0; } /** * @brief get the possible end of the storage * * @details This will return the last possible id for a bucket, however * should be combined with valid to be carefule * * @return The last "allocated bucket" but could not be valid */ size_t end() const { return size_t(lastEmptySlot); } /** * @brief Support copy assignment. * * @note In order for this to be a valid copy it must be followed up * by a call to PathToAttributesMap::BucketImplCopyArrays to * correctly copy array-of-array data. * * @return Copy constructed buckets, without array-of-arrays set up */ BucketIdToImpl& operator=(const BucketIdToImpl& other) { // Array of free slots this->freeSlots = other.freeSlots; // Track the last empty slot this->lastEmptySlot = other.lastEmptySlot; // // A bucketImpl is a struct that mainly contains a map // to arrays which are of the data type MirroredArray. // A MirroredArray has two states // (1) It contains an array of data // (2) It contains an array of arrays. // In the case of (2) the array itself doesn't have enough // information to make a copy of the array of arrays, so the // copy constructor is overloaded and the structure around the // array of arrays is loaded, but the actual copying of that data is // pushed off to be done by the function BucketImplCopyArrays // which is a member of PathToAttributesMap which is the only place // that currently has enough information to make the copy // this->buckets.resize(other.buckets.size()); for (size_t i = 0; i < this->buckets.size(); ++i) { if (other.buckets[i]) { if (!this->buckets[i]) { this->buckets[i] = platform.allocator.new_<BucketImpl>(platform); } *this->buckets[i] = *other.buckets[i]; } else if (this->buckets[i]) { platform.allocator.delete_(this->buckets[i]); this->buckets[i] = nullptr; } } return *this; } /** * @brief Resize the internals * * @details Note that resize will only grow, calling with a smaller size is a no-op * */ void resize(size_t newSize) { if (newSize > buckets.size()) { buckets.resize(newSize); } } /** * @brief Claim a bucket by index, this means that the pointer will be * returned regardless of valid, and that it will mark as valid * and update internals where needed. In the case where an index is * requested that is past the allocated then more memory is allocated * * @note This should be used sparingly, generally it is intended to be used * in the case where we are re-constructing one flat cache from another, * if things are done out of order it could be expensize, also it is assumed * all external mappings are maintained by the claimer * * @param id : The id of the bucket to be claimed * * @return A reference to the bucketImpl that was claimed */ BucketImpl& claim(BucketId id) { // grow if needed if (size_t(id)>= buckets.size()) { resize(size_t(id)+ 1); } // if the bucket is already valid we can just return access to it if (!buckets[size_t(id)]) { // otherwise we need to update accordingly while (lastEmptySlot <= id) { if (!buckets[size_t(lastEmptySlot)]) { freeSlots.push(lastEmptySlot); } ++lastEmptySlot; } buckets[size_t(id)] = platform.allocator.new_<BucketImpl>(platform); } CARB_ASSERT(buckets[size_t(id)]); return *buckets[size_t(id)]; } template<typename CallbackT> void forEachValidBucket(CallbackT callback) const; private: /** * @brief Get the next open bucket * * @note May invalidate references to existing buckets * */ BucketId ClaimNextOpenBucket() { BucketId slot = lastEmptySlot; if (freeSlots.size() != 0) { slot = freeSlots.front(); freeSlots.pop(); } else { ++lastEmptySlot; if (size_t(lastEmptySlot) == buckets.size()) { std::vector<BucketImpl*> newVector; newVector.resize(buckets.size() * 2); // // A bucketImpl is a struct that mainly contains a map // to arrays which are of the data type MirroredArray. // A MirroredArray has two states // (1) It contains an array of data // (2) It contains an array of arrays. // In the case of (2) the array itself doesn't have enough // information to make a copy of the array of arrays, so the // copy constructor is overloaded and the structure around the // array of arrays is loaded, but the actual copying of that data is // pushed off to be done by the function BucketImplCopyArrays // which is a member of PathToAttributesMap which is the only place // that currently has enough information to make the copy // // Since we cannot guarantee that someone will know to call // the copyArrays function we enforce that data must be moved here. // for (size_t i = 0; i < buckets.size(); ++i) { newVector[i] = std::move(buckets[i]); } std::swap(newVector, buckets); } } CARB_ASSERT(!buckets[size_t(slot)]); buckets[size_t(slot)] = platform.allocator.new_<BucketImpl>(platform); return slot; } Platform& platform; // array of bucket impls std::vector<BucketImpl*> buckets; // Array of free slots std::queue<BucketId> freeSlots; // Track the last empty slot BucketId lastEmptySlot; }; // Internally we convert Paths to uint64_t path ids using asInt(). // PathId is the domain of pathToBucketElem, defined below. using PathId = PathC; Platform& platform; // Concretely, FlatCache is the following three maps: // 1) Each path maps to a bucket, and an SOA index within that bucket. // This level of indirection allows the user to delete prims and/or // attributes without creating holes in the SOAs. Whenever the user // deletes a prim, the cache moves the last SOA element to the deleted // element, and updates the path to element map of the moved SOA element. // 2) Buckets (sets of attribute names) map to BucketImpls, defined above. // This allows the user to quickly get e.g. arrays of all the meshes, // or all the meshes that have rigid body attributes. // 3) pxr::TfType names map to TypeInfos, containing attribute type and size in bytes. HashMap<PathId, std::pair<BucketId, ArrayIndex>, std::hash<PathId>, std::equal_to<PathId>, AllocFunctor, FreeFunctor> pathToBucketElem; BucketIdToImpl buckets; std::map<Bucket, BucketId> attrNameSetToBucketId; // Each listener has its own attrNamesToLog and enableChangeTracking struct ChangeTrackerConfig { set<TokenC> attrNamesToLog; bool changeTrackingEnabled = true; }; HashMap<ListenerId, ChangeTrackerConfig, ListenerIdHasher, std::equal_to<ListenerId>, AllocFunctor, FreeFunctor> listenerIdToChangeTrackerConfig; TypeToInfo typeToInfo; UsdStageId usdStageId; bool minimalPopulationDone = false; // 104 only - do not port this forward to 105+ Hierarchy stageHierarchy; // 104 only - do not port this forward to 105+ mutable bool apiLogEnabled = false; // The rest of PathToAttributesMap is methods size_t size(); void clear(); void printMirroredArray(const char* const label, const ScalarAttributeArray &array, const size_t* const arrayElemCount) const; void print() const; // Void* multiple attribute interface void getArraysRdC(const void** attrsOut, const Bucket& bucket, const TokenC* attrNames, size_t attrCount); void getAttributesRdC(const void** attrsOut, const PathC* paths, const TokenC* attrNames, size_t attrCount); void getAttributesRdGpuC(const void** attrsOut, const PathC* paths, const TokenC* attrNames, size_t attrCount, PtrToPtrKind ptrToPtrKind); void getArraysWrC(void** attrsOut, const Bucket& bucket, const TokenC* attrNames, size_t attrCount); void getAttributesWrC(void** attrsOut, const PathC& path, const TokenC* attrNames, size_t attrCount); void getAttributesWrGpuC(void** attrsOut, const PathC& path, const TokenC* attrNames, size_t attrCount, PtrToPtrKind ptrToPtrKind); // Span interface SpanC getArraySpanC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix = NameSuffix::none); ConstSpanC getArraySpanRdC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix = NameSuffix::none); SpanC getArraySpanWrC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix = NameSuffix::none); // Void* interface void addAttributeC( const PathC& path, const TokenC& attrName, TypeC type, const void* value = nullptr); void addArrayAttributeC( const PathC& path, const TokenC& attrName, TypeC type, const void* value, const size_t arrayElemCount); void addAttributesToPrim( const PathC& path, const std::vector<TokenC>& attrNames, const std::vector<TypeC>& typeCs); void addAttributeC(const PathC& path, const TokenC& attrName, NameSuffix suffix, TypeC type, const void* value = nullptr); void addArrayAttributeC(const PathC& path, const TokenC& attrName, NameSuffix suffix, TypeC type, const void* value, const size_t arrayElemCount); void* getArrayC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix = NameSuffix::none); SpanC getAttributeC(const PathC& path, const TokenC& attrName, NameSuffix suffix = NameSuffix::none); const void* getArrayRdC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix = NameSuffix::none); ConstSpanC getAttributeRdC(const PathC& path, const TokenC& attrName, NameSuffix suffix = NameSuffix::none); void* getArrayWrC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix = NameSuffix::none); SpanC getAttributeWrC(const PathC& path, const TokenC& attrName, NameSuffix suffix = NameSuffix::none); SpanC getOrCreateAttributeWrC(const PathC& path, const TokenC& attrName, TypeC type); // Type safe interface template <typename T> void addAttribute(const PathC& path, const TokenC& attrName, TypeC type, const T& value); template <typename T> void addSubAttribute(const PathC& path, const TokenC& attrName, NameSuffix suffix, TypeC type, const T& value); template <typename T> T* getArray(const Bucket& bucket, const TokenC& attrName); template <typename T> T* getAttribute(const PathC& path, const TokenC& attrName); template <typename T> const T* getArrayRd(const Bucket& bucket, const TokenC& attrName); template <typename T> const T* getArrayRd(BucketId bucketId, const TokenC& attrName); template <typename T> const T* getAttributeRd(const PathC& path, const TokenC& attrName); template <typename T> T* getArrayWr(const Bucket& bucket, const TokenC& attrName); template <typename T> T* getAttributeWr(const PathC& path, const TokenC& attrName); template <typename T> T* getAttributeWr(const PathC& path, const TokenC& attrName, NameSuffix suffix); void removeAttribute(const PathC& path, const TokenC& attrName); void removeSubAttribute(const PathC& path, const TokenC& attrName, NameSuffix suffix); /** @brief Destroy all attributes with matching names from prim at given path * * @path[in] Path to the prim holding the attribute * @attrNames[in] Attribute name array * */ void removeAttributesFromPath(const PathC& path, const std::vector<TokenC>& attrNames); ValidMirrors getAttributeValidBits( const PathC& path, const TokenC& attrName, ArrayAttributeArray::MirroredArrays subArray = ArrayAttributeArray::MirroredArrays::Values) const; // Accessors for element count of array attributes size_t* getArrayAttributeSize(const PathC& path, const TokenC& attrName); const size_t* getArrayAttributeSizeRd(const PathC& path, const TokenC& attrName); size_t* getArrayAttributeSizeWr(const PathC& path, const TokenC& attrName); size_t* getArrayAttributeSizes(const Bucket& bucket, const TokenC& attrName); const size_t* getArrayAttributeSizesRd(const Bucket& bucket, const TokenC& attrName); size_t* getArrayAttributeSizesWr(const Bucket& bucket, const TokenC& attrName); SpanC setArrayAttributeSizeAndGet(PathC path, const TokenC& attrName, size_t newSize); SpanC setArrayAttributeSizeAndGet(BucketId bucketId, size_t elementIndex, const TokenC& attrName, size_t newSize); // GPU can currently read, but not write, size of arrays // This is because writing causes array to resize, and that's not currently supported on GPU const size_t* getArrayAttributeSizeRdGpu(const PathC& path, const TokenC& attrName); const size_t* getArrayAttributeSizesRdGpu(const Bucket& bucket, const TokenC& attrName); // Void* CUDA GPU interface SpanC getAttributeGpuC(const PathC& path, const TokenC& attrName, PtrToPtrKind ptrToPtrKind = PtrToPtrKind::eNotApplicable, NameSuffix suffix = NameSuffix::none); ConstSpanC getAttributeRdGpuC(const PathC& path, const TokenC& attrName, PtrToPtrKind ptrToPtrKind = PtrToPtrKind::eNotApplicable, NameSuffix suffix = NameSuffix::none); SpanC getAttributeWrGpuC(const PathC& path, const TokenC& attrName, PtrToPtrKind ptrToPtrKind = PtrToPtrKind::eNotApplicable, NameSuffix suffix = NameSuffix::none); void* getArrayGpuC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix = NameSuffix::none); const void* getArrayRdGpuC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix = NameSuffix::none); void* getArrayWrGpuC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix = NameSuffix::none); // Span CUDA GPU interface SpanC getArraySpanGpuC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix = NameSuffix::none); ConstSpanC getArraySpanRdGpuC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix = NameSuffix::none); SpanC getArraySpanWrGpuC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix = NameSuffix::none); // Type safe CUDA GPU interface template <typename T> T* getAttributeGpu(const PathC& path, const TokenC& attrName, PtrToPtrKind ptrToPtrKind = PtrToPtrKind::eNotApplicable); template <typename T> const T* getAttributeRdGpu(const PathC& path, const TokenC& attrName, PtrToPtrKind ptrToPtrKind = PtrToPtrKind::eNotApplicable); template <typename T> T* getAttributeWrGpu(const PathC& path, const TokenC& attrName, PtrToPtrKind ptrToPtrKind = PtrToPtrKind::eNotApplicable); template <typename T> T* getArrayGpu(const Bucket& bucket, const TokenC& attrName); template <typename T> const T* getArrayRdGpu(const Bucket& bucket, const TokenC& attrName); template <typename T> T* getArrayWrGpu(const Bucket& bucket, const TokenC& attrName); // D3D GPU interface omni::gpucompute::GpuPointer getAttributeD3d(const PathC& path, const TokenC& attrName); void* getArrayD3d(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix = NameSuffix::none); const void* getArrayRdD3d(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix = NameSuffix::none); void* getArrayWrD3d(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix = NameSuffix::none); // PathC methods void addPath(const PathC& path, const Bucket& bucket = {}); void renamePath(const PathC& oldPath, const PathC& newPath); set<AttrNameAndType> getTypes(const PathC& path) const; size_t getAttributeCount(const PathC& path) const; TypeC getType(const PathC& path, const TokenC& attrName) const; void removePath(const PathC& path); size_t count(const PathC& path) const; size_t count(const PathC& path, const TokenC& attrName) const; // Type methods void addType(TypeC type, Typeinfo typeInfo); Typeinfo getTypeInfo(TypeC type) const; // Bucket methods BucketId addBucket(const Bucket& bucket); void addAttributeC( const Bucket& bucket, const TokenC& attrName, TypeC type, const void* value = nullptr); void addArrayAttributeC(const Bucket& bucket, const TokenC& attrName, TypeC type, const void* value, const size_t arrayElemCount); template <typename T> void addAttribute(const Bucket& bucket, const TokenC& attrName, TypeC type, const T& value); void removeAttributeC(const Bucket& bucket, const TokenC& attrName, TypeC type); void printBucket(const Bucket& bucket) const; void printBucketName(const Bucket& bucketTypes, BucketId bucketId) const; void printBucketNames() const; void printBucketNamesAndTypes() const; flatcache::set<BucketId> findBuckets(const set<AttrNameAndType>& all, const set<AttrNameAndType>& any = {}, const set<AttrNameAndType>& none = {}) const; View getView(const set<AttrNameAndType>& inc, const set<AttrNameAndType>& exc = {}); size_t getElementCount(const Bucket& bucket) const; const PathC* getPathArray(const Bucket& bucket) const; TypeC getType(const Bucket& bucket, const TokenC& attrName) const; /** @brief Destroy all attributes with matching names from a given Bucket - array version of removeAttributeC * * @bucket[in] Bucket to remove attributes from * @attrNames[in] Attribute name array * */ void removeAttributesFromBucket(const Bucket& bucket, const std::vector<TokenC>& attrNames); // BucketId methods size_t getElementCount(BucketId bucketId) const; SpanC getArrayC(BucketId bucketId, const TokenC& attrName, NameSuffix suffix = NameSuffix::none); ConstSpanC getArrayRdC(BucketId bucketId, const TokenC& attrName, NameSuffix suffix = NameSuffix::none); SpanC getArrayWrC(BucketId bucketId, const TokenC& attrName, NameSuffix suffix = NameSuffix::none); SpanC getOrCreateArrayWrC( BucketId bucketId, const TokenC& attrName, TypeC type, NameSuffix suffix = NameSuffix::none); SpanC getArrayGpuC(BucketId bucketId, const TokenC& attrName, NameSuffix suffix = NameSuffix::none); ConstSpanC getArrayRdGpuC(BucketId bucketId, const TokenC& attrName, NameSuffix suffix = NameSuffix::none); SpanC getArrayWrGpuC(BucketId bucketId, const TokenC& attrName, NameSuffix suffix = NameSuffix::none); BucketId getBucketId(const PathC& path) const; ArrayPointersAndSizesC getArrayAttributeArrayWithSizes(BucketId bucketId, const TokenC& attrName); ConstArrayPointersAndSizesC getArrayAttributeArrayWithSizesRd(BucketId bucketId, const TokenC& attrName); ArrayPointersAndSizesC getArrayAttributeArrayWithSizesWr(BucketId bucketId, const TokenC& attrName); SpanSizeC getArrayAttributeSizes(BucketId bucketId, const TokenC& attrName); ConstSpanSizeC getArrayAttributeSizesRd(BucketId bucketId, const TokenC& attrName); ConstSpanSizeC getArrayAttributeSizesRdGpu(BucketId bucketId, const TokenC& attrName); SpanSizeC getArrayAttributeSizesWr(BucketId bucketId, const TokenC& attrName); ConstPathCSpan getPathArray(BucketId bucketId) const; Bucket getNamesAndTypes(BucketId bucketId) const; // std::vector<Bucket> methods std::vector<size_t> getElementCounts(const std::vector<Bucket>& buckets) const; template <typename T> std::vector<const T*> getArraysRd(const std::vector<Bucket>& buckets, const TokenC& attrName); template <typename T> std::vector<T*> getArraysWr(const std::vector<Bucket>& buckets, const TokenC& attrName); template <typename T> std::vector<T*> getArrays(const std::vector<Bucket>& buckets, const TokenC& attrName); // BucketImpl methods size_t getElementCount(const BucketImpl& bucketImpl) const; // Allow default construction PathToAttributesMap(const PlatformId& platformId = PlatformId::Global); // Disallow copying PathToAttributesMap(const PathToAttributesMap&) = delete; // Allow copy assignment. This is used by StageWithHistory PathToAttributesMap& operator=(const PathToAttributesMap&); // Allow move construction and assignment PathToAttributesMap(PathToAttributesMap&& other) noexcept = default; PathToAttributesMap& operator=(PathToAttributesMap&& other) noexcept = default; ~PathToAttributesMap(); // Methods that are currently used in flatcache.cpp // TODO: Make private struct ArrayOfArrayInfo { // For each array, the element count requested by the user size_t* arraySizeArray; // For each array, the element count allocated on the CPU MirroredArray* arrayCpuCapacityArray; // For each array, the element count allocated on the GPU MirroredArray* arrayGpuCapacityArray; // For each array, the GPU data MirroredArray* arrayGpuPtrArray; }; struct ConstArrayOfArrayInfo { // For each array, the element count requested by the user const MirroredArray * arraySizeArray; // For each array, the element count allocated on the CPU const MirroredArray* arrayCpuCapacityArray; // For each array, the element count allocated on the GPU const MirroredArray* arrayGpuCapacityArray; // For each array, the GPU data const MirroredArray* arrayGpuPtrArray; }; // enableCpuWrite() is used in flatcache.cpp, so needs to be public // TODO: move that code from there to here void enableCpuWrite(MirroredArray& array, const size_t* elemToArraySize, MirroredArray* elemToArrayCpuCapacity, MirroredArray* elemToArrayGpuCapacity, MirroredArray* elemToArrayGpuData); // getArrayOfArrayInfo() is used in flatcache.cpp, so needs to be public // TODO: move that code from there to here ArrayOfArrayInfo getArrayOfArrayInfo(Typeinfo typeInfo, BucketImpl& bucketImpl, TokenC attrName); ArrayOfArrayInfo getArrayOfArrayInfo(ArrayAttributeArray &arrayAttributeArray); ConstArrayOfArrayInfo getArrayOfArrayInfo(Typeinfo typeInfo, const BucketImpl& bucketImpl, TokenC attrName) const; ConstArrayOfArrayInfo getArrayOfArrayInfo(const ArrayAttributeArray &arrayAttributeArray) const; void bucketImplCopyScalarAttributeArray(ScalarAttributeArray &dest, const ScalarAttributeArray &src); void bucketImplCopyArrayAttributeArray(BucketImpl& destBucketImpl, const AttrName& destName, ArrayAttributeArray &dest, const ArrayAttributeArray &src); void bucketImplCopyArrays(BucketImpl& destBucketImpl, BucketId destBucketId, const BucketImpl& srcBucketImpl, BucketId srcBucketId, const carb::flatcache::set<AttrNameAndType>& attrFilter = {}); // Serialization struct Serializer { uint8_t *p; uint8_t *buf; uint8_t *end; uint64_t bytesWritten; // increments even if attempts are made to write past end bool overflowed; void init(uint8_t *const _buf, uint8_t *const end); bool writeBytes(const uint8_t *const src, uint64_t size); bool writeString(const char* const s, const size_t len); bool writeString(const std::string &s); template<typename T> bool write(const T &t); }; struct Deserializer { const uint8_t *p; const uint8_t *buf; const uint8_t *end; uint64_t bytesRead; // increments even if attempts are made to read past end bool overflowed; void init(const uint8_t *const _buf, const uint8_t *const end); bool readBytes(uint8_t *const dst, uint64_t size); bool readString(std::string &s); template<typename T> bool read(T &t); }; uint64_t serializeScalarAttributeArray(BucketImpl& srcBucketImpl, const BucketId& srcBucketId, const AttrName& srcName, ScalarAttributeArray& srcScalarAttributeArray, Serializer &out); bool deserializeScalarAttributeArray(BucketImpl& destBucketImpl, const BucketId& destBucketId, Deserializer &in); uint64_t serializeArrayAttributeArray(BucketImpl& srcBucketImpl, const BucketId& srcBucketId, const AttrName& srcName, ArrayAttributeArray& srcArrayAttributeArray, Serializer &out); bool deserializeArrayAttributeArray(BucketImpl& destBucketImpl, const BucketId& destBucketId, Deserializer &in); PrimBucketListImpl getChanges(ListenerId listener); void popChanges(ListenerId listener); private: // Device is used by getArrayC enum class Device { eCPU = 0, eCudaGPU = 1, eD3dVkGPU = 2 }; static inline constexpr ArrayOfArrayInfo ScalarArrayOfArrayInfo() { return ArrayOfArrayInfo{ nullptr, nullptr, nullptr, nullptr }; } // TODO: Now that EnableReadFn and EnableWriteFn can have the same type, should they just be one alias? using EnableReadFn = void (PathToAttributesMap::*)(PathToAttributesMap::MirroredArray& array, const size_t* elemToArraySize, PathToAttributesMap::MirroredArray* elemToArrayCpuCapacity, PathToAttributesMap::MirroredArray* elemToArrayGpuCapacity, PathToAttributesMap::MirroredArray* elemToArrayGpuData); using EnableWriteFn = void (PathToAttributesMap::*)(PathToAttributesMap::MirroredArray& array, const size_t* elemToArraySize, PathToAttributesMap::MirroredArray* elemToArrayCpuCapacity, PathToAttributesMap::MirroredArray* elemToArrayGpuCapacity, PathToAttributesMap::MirroredArray* elemToArrayGpuData); struct IOConfig { EnableReadFn enableRead; EnableWriteFn enableWrite; EnableReadFn enableRdPtrForWrite; Device device; PtrToPtrKind ptrToPtrKind; inline IOConfig& withEnableRead(EnableReadFn _enableRead) { enableRead = _enableRead; return *this; } inline IOConfig& withEnableWrite(EnableWriteFn _enableWrite) { enableWrite = _enableWrite; return *this; } inline IOConfig& withEnableRdPtrForWrite(EnableReadFn _enableRdPtrForWrite) { enableRdPtrForWrite = _enableRdPtrForWrite; return *this; } inline IOConfig& withDevice(Device _device) { device = _device; return *this; } inline IOConfig& withPtrToPtrKind(PtrToPtrKind _ptrToPtrKind) { ptrToPtrKind = _ptrToPtrKind; return *this; } }; void serializeMirroredArrayMetadata(const AttrName& srcName, MirroredArray &srcValuesArray, Serializer &out); template<typename ArraysT, typename ArraysMapT> void deserializeMirroredArrayMetadata(Platform& platform, ArraysMapT& arraysMap, AttrName &destName, Typeinfo *&typeInfo, ArraysT *&destArray, Deserializer &in); BucketImpl& addAttributeInternal(BucketImpl& prevBucketImpl, const Bucket& bucket, const TokenC& attrName, const TypeC ctype, const void* value, const Typeinfo& typeinfo, const size_t arrayElemCount); void fillAttributeInternal(BucketImpl& bucketImpl, const AttrName& name, const size_t startIndex, const size_t endIndex, const void* value, const Typeinfo& typeinfo, const size_t arrayElemCount, MirroredArray *const valuesArray, ArrayAttributeArray *const arrayAttributeArray); void addAttributeInternal(const PathC& path, const TokenC& attrNameC, const NameSuffix nameSuffix, const TypeC ctype, const void* value, const Typeinfo& typeinfo, const size_t arrayElemCount); bool findArrayAttributeArrayForPath(const PathC& path, const TokenC& attrName, size_t& outElementIndex, BucketImpl*& outBucketImpl, ArrayAttributeArray*& outArrayAttributeArray); bool findArrayAttributeArrayForBucketId(const BucketId bucketId, const TokenC& attrName, BucketImpl*& outBucketImpl, ArrayAttributeArray*& outArrayAttributeArray); void allocElement(ScalarAttributeArray &scalar); void allocElement(ArrayAttributeArray &vector); size_t allocElement(BucketImpl& bucketImpl); void allocElementForMove(BucketImpl& srcBucketImpl, const ArrayOfArrayInfo &srcAoaInfo, const AttrName& name, MirroredArray &destArray, MirroredArray *const srcArray); size_t allocElementForMove(BucketImpl& destBucketImpl, BucketImpl& srcBucketImpl, const PathC& path); void addElementToTrackers(size_t elemIndex, BucketImpl& bucketImpl); void makeSrcValidIfDestValid(MirroredArray& srcArray, BucketImpl& srcBucketImpl, const ArrayOfArrayInfo& srcAoaInfo, const MirroredArray& destArray, const AttrName& name); void moveElementBetweenBuckets(const PathC& path, BucketId destBucketId, BucketId srcBucketId, const Bucket& destBucket); void moveElementScalarData(ScalarAttributeArray &destArray, const size_t destElemIndex, const ScalarAttributeArray &srcArray, const size_t srcElemIndex); void moveElementArrayData(ArrayAttributeArray &destArray, const size_t destElemIndex, const ArrayAttributeArray &srcArray, const size_t srcElemIndex); void moveElement(BucketImpl& destBucket, size_t destElemIndex, BucketImpl& srcBucket, size_t srcElemIndex); void destroyElement(BucketId bucketId, size_t elemIndex, bool destroyDataPointedTo); ArrayAndDirtyIndices getArraySpanC(BucketId bucketId, TokenC attrName, const IOConfig &io, NameSuffix suffix = NameSuffix::none); ArrayAndDirtyIndices getArraySpanC(MirroredArray& array, const AttrName& name, const ArrayOfArrayInfo& aoa, BucketImpl& bucketImpl, const IOConfig &io); void enableCpuReadImpl(MirroredArray& array, const size_t* elemToArraySize, MirroredArray* elemToArrayCpuCapacity, MirroredArray* elemToArrayGpuCapacity, MirroredArray* elemToArrayGpuData, bool printWarnings = true); void enableCpuRead(MirroredArray& array, const size_t* elemToArraySize, MirroredArray* elemToArrayCpuCapacity, MirroredArray* elemToArrayGpuCapacity, MirroredArray* elemToArrayGpuData); void enableCpuReadIfValid(MirroredArray& array, const size_t* elemToArraySize, MirroredArray* elemToArrayCpuCapacity, MirroredArray* elemToArrayGpuCapacity, MirroredArray* elemToArrayGpuData); void enableGpuRead(MirroredArray& array, const size_t* elemToArraySize, MirroredArray* elemToArrayCpuCapacity, MirroredArray* elemToArrayGpuCapacity, MirroredArray* elemToArrayGpuData); void enableGpuWrite(MirroredArray& array, const size_t* elemToArraySize, MirroredArray* elemToArrayCpuCapacity, MirroredArray* elemToArrayGpuCapacity, MirroredArray* elemToArrayGpuData); void enableD3dGpuRead(MirroredArray& array, const size_t* elemToArraySize, MirroredArray* elemToArrayCpuCapacity, MirroredArray* elemToArrayGpuCapacity, MirroredArray* gpuPointerArray); void enableD3dGpuWrite(MirroredArray& array, const size_t* elemToArraySize, MirroredArray* elemToArrayCpuCapacity, MirroredArray* elemToArrayGpuCapacity, MirroredArray* elemToArrayGpuData); static inline constexpr IOConfig CpuReadConfig() { return IOConfig{ &PathToAttributesMap::enableCpuRead, // enableRead nullptr, // enableWrite nullptr, // enableRdPtrForWrite Device::eCPU, // device PtrToPtrKind::eNotApplicable // ptrToPtrKind }; }; static inline constexpr IOConfig CpuWriteConfig() { return IOConfig{ nullptr, // enableRead &PathToAttributesMap::enableCpuWrite, // enableWrite &PathToAttributesMap::enableCpuRead, // enableRdPtrForWrite Device::eCPU, // device PtrToPtrKind::eNotApplicable // ptrToPtrKind }; }; static inline constexpr IOConfig CpuReadWriteConfig() { return IOConfig{ &PathToAttributesMap::enableCpuRead, // enableRead &PathToAttributesMap::enableCpuWrite, // enableWrite &PathToAttributesMap::enableCpuRead, // enableRdPtrForWrite Device::eCPU, // device PtrToPtrKind::eNotApplicable // ptrToPtrKind }; }; // TODO: This probably needs to go away, it only exists to turn off "printWarnings" static inline constexpr IOConfig CpuReadIfValidWriteConfig() { return IOConfig{ &PathToAttributesMap::enableCpuReadIfValid, // enableRead &PathToAttributesMap::enableCpuWrite, // enableWrite &PathToAttributesMap::enableCpuRead, // enableRdPtrForWrite Device::eCPU, // device PtrToPtrKind::eNotApplicable // ptrToPtrKind }; }; static inline constexpr IOConfig CudaReadConfig() { return IOConfig{ &PathToAttributesMap::enableGpuRead, // enableRead nullptr, // enableWrite nullptr, // enableRdPtrForWrite Device::eCudaGPU, // device PtrToPtrKind::eNotApplicable // ptrToPtrKind }; }; static inline constexpr IOConfig CudaWriteConfig() { return IOConfig{ nullptr, // enableRead &PathToAttributesMap::enableGpuWrite, // enableWrite &PathToAttributesMap::enableGpuRead, // enableRdPtrForWrite Device::eCudaGPU, // device PtrToPtrKind::eNotApplicable // ptrToPtrKind }; }; static inline constexpr IOConfig CudaReadWriteConfig() { return IOConfig{ &PathToAttributesMap::enableGpuRead, // enableRead &PathToAttributesMap::enableGpuWrite, // enableWrite &PathToAttributesMap::enableGpuRead, // enableRdPtrForWrite Device::eCudaGPU, // device PtrToPtrKind::eNotApplicable // ptrToPtrKind }; }; static inline constexpr IOConfig D3dVkReadConfig() { return IOConfig{ &PathToAttributesMap::enableD3dGpuRead, // enableRead nullptr, // enableWrite nullptr, // enableRdPtrForWrite Device::eD3dVkGPU, // device PtrToPtrKind::eNotApplicable // ptrToPtrKind }; }; static inline constexpr IOConfig D3dVkWriteConfig() { return IOConfig{ nullptr, // enableRead &PathToAttributesMap::enableD3dGpuWrite, // enableWrite &PathToAttributesMap::enableD3dGpuRead, // enableRdPtrForWrite Device::eD3dVkGPU, // device PtrToPtrKind::eNotApplicable // ptrToPtrKind }; }; static inline constexpr IOConfig D3dVkReadWriteConfig() { return IOConfig{ &PathToAttributesMap::enableD3dGpuRead, // enableRead &PathToAttributesMap::enableD3dGpuWrite, // enableWrite &PathToAttributesMap::enableD3dGpuRead, // enableRdPtrForWrite Device::eD3dVkGPU, // device PtrToPtrKind::eNotApplicable // ptrToPtrKind }; }; std::tuple<bool, BucketId, size_t> getPresentAndBucketAndElement(const PathC& path) const; SpanC getArrayElementPtr(SpanC array, size_t bucketElement) const; ConstSpanC getArrayElementPtr(ConstSpanC array, size_t bucketElement) const; void destructiveResizeIfNecessary(uint8_t*& cpuData, size_t& capacity, size_t desiredElemCount, size_t elemByteCount); void destructiveResizeIfNecessaryGPU(MirroredArray& gpuPointerArray, size_t elem, size_t& capacity, size_t desiredElemCount, size_t elemByteCount, omni::gpucompute::GpuCompute* computeAPI, omni::gpucompute::Context* computeCtx); void resizeIfNecessary(uint8_t*& cpuData, size_t& capacity, size_t desiredElemCount, size_t elemByteCount, TypeC type); void resizeIfNecessaryGPU(MirroredArray& gpuPointerArray, size_t elem, size_t& capacity, size_t desiredElemCount, size_t elemByteCount, omni::gpucompute::GpuCompute* computeAPI, omni::gpucompute::Context* computeCtx); void allocGpuMemIfNecessary(PathToAttributesMap::MirroredArray& array, size_t byteCount, size_t elemSize, omni::gpucompute::GpuCompute* computeAPI, omni::gpucompute::Context* computeCtx); std::pair<BucketId, BucketImpl&> findOrCreateBucket(const Bucket& bucket); void eraseBucket(const Bucket& bucket); BucketId findBucketId(const Bucket& bucket); std::tuple<BucketId, ArrayIndex> getBucketAndArrayIndex(const PathC& path) const; std::tuple<BucketId, ArrayIndex> addAttributeGetBucketAndArrayIndex( const PathC& path, const TokenC& attrNameC, NameSuffix nameSuffix, TypeC type); void addAttributesToBucket( const PathC& path, const std::vector<TokenC>& attrNames, const std::vector<TypeC>& typeCs); void setArrayDirty(ArrayAndDirtyIndices& arrayAndDirtyIndices); void setArrayElementDirty(ArrayAndDirtyIndices& arrayAndDirtyIndices, size_t elemIndex); BucketImpl& addAttributeC(BucketImpl& bucketImpl, const Bucket& bucket, const TokenC& attrName, TypeC ctype, const void* value = nullptr); BucketImpl& addArrayAttributeC(BucketImpl& bucketImpl, const Bucket& bucket, const TokenC& attrName, TypeC ctype, const void* value, const size_t arrayElemCount); void checkInvariants(); /** * @brief Internal debug function * * @details This loops over all the array attributes checking that the * invariants hold true. Currently it just enforces * (I1) If the size and cpu size match that the cpu array pointer isn't null * * * @return none */ bool __validateArrayInvariants() const; }; // PathToAttributesMap doesn't depend on USD for tokens or paths // However, it's useful to be able to see the USD text representation of tokens and // paths when debugging. Set ENABLE_USD_DEBUGGING to 1 to enable that. // Then use toTfToken() to convert TokenC to pxr::TfToken inline const pxr::TfToken& toTfToken(const TokenC& token) { return reinterpret_cast<const pxr::TfToken&>(token); } inline const pxr::SdfPath& toSdfPath(const PathC& path) { return reinterpret_cast<const pxr::SdfPath&>(path); } // Query result struct View { PathToAttributesMap* path2attrsMap; std::vector<Bucket> buckets; std::vector<size_t> bucketElemCounts; }; using BucketImpl = PathToAttributesMap::BucketImpl; // The rest of this file is methods // Returns the number of Paths known to the cache inline size_t PathToAttributesMap::size() { return pathToBucketElem.size(); } // Delete all data in the cache, meaning all the buckets and the map // from paths to buckets. inline void PathToAttributesMap::clear() { buckets.clear(); pathToBucketElem.clear(); attrNameSetToBucketId.clear(); } template<typename CallbackT> inline void PathToAttributesMap::BucketIdToImpl::forEachValidBucket(CallbackT callback) const { BucketId id{ 0 }; for (size_t i = 0; i < buckets.size(); ++i, ++id) { if (buckets[i]) { callback(id, *buckets[i]); } } } inline void PathToAttributesMap::printMirroredArray(const char* const label, const ScalarAttributeArray &array, const size_t* const arrayElemCount) const { auto printValue = [](const uint8_t *const data, const size_t size) { if (!data) { printf("<nullptr>"); } else { if (size <= sizeof(uint8_t)) { printf("u8=%u, d8=%d, c=%c", *data, *(const int8_t*)data, *(const char*)data); } else if (size <= sizeof(uint16_t)) { printf("u16=%u, d16=%d", *(const uint16_t*)data, *(const int16_t*)data); } else if (size <= sizeof(uint32_t)) { printf("u32=%u, d32=%d, float=%f", *(const uint32_t*)data, *(const int32_t*)data, *(const float*)data); } else if (size <= sizeof(uint64_t)) { printf("u64=%" PRIu64 ", d64=%" PRId64 ", double=%f, ptr=0x%p", *(const uint64_t*)data, *(const int64_t*)data, *(const double*)data, *(void**)data); } else { printf("\n"); for (size_t i = 0; i < size; i += 16) { printf(" %06zx: ", i); for (size_t j = 0; j < 16; ++j) { if (i + j < size) { printf("%02x ", data[i + j]); } else { printf(" "); } } printf(" "); for (size_t j = 0; j < 16; j++) { if (i + j < size) { printf("%c", isprint(data[i + j]) ? data[i + j] : '.'); } } printf("\n"); } } } }; printf(" %s (type %d)[count %zu]:\n", label, array.type.type, array.count); const Typeinfo &typeinfo = array.typeinfo; const size_t elemSize = typeinfo.size; printf(" cpuValid=%d 0x%p\n", array.cpuValid, array.cpuData()); if (array.cpuValid) { for (size_t elem = 0; elem < array.count; ++elem) { printf(" [%5zu]: ", elem); const uint8_t *const elemData = array.cpuData() + elem * elemSize; printf("0x%p ", elemData); if (arrayElemCount) { CARB_ASSERT(typeinfo.isArray); const uint8_t* const base = *((const uint8_t **)elemData); printf(" => 0x%p", base); for (size_t i = 0; i < arrayElemCount[elem]; ++i) { printf("\n [%5zu]: ", i); const uint8_t* const arrayData = base ? base + i * typeinfo.arrayElemSize : nullptr; printValue(arrayData, typeinfo.arrayElemSize); } } else { printValue(elemData, elemSize); } printf("\n"); } } printf(" gpuValid=%d 0x%p\n", array.gpuValid, array.gpuArray); printf(" usdValid=%d\n", array.usdValid); } // Print the cache, specifically the Paths and the UsdAttributes they map to // (but not the values of the attributes currently) inline void PathToAttributesMap::print() const { auto va = [](auto ...params) -> const char* { static char tmp[1024]; #ifdef _WIN32 _snprintf_s(tmp, sizeof(tmp), params...); #else snprintf(tmp, sizeof(tmp), params...); #endif return (const char*)&tmp; }; std::cout << "(== PathToAttributesMap::print() begin ==)\n"; buckets.forEachValidBucket([this, va](const BucketId bucketId, const BucketImpl& bucketImpl) { printf("bucket [%zu]:\n", size_t(bucketId)); if (!bucketImpl.elemToPath.size()) { printf(" <no elements>\n"); } else { for (size_t elem = 0; elem < bucketImpl.elemToPath.size(); ++elem) { printf(" elem [%5zu]: \"%s\"\n", elem, bucketImpl.elemToPath[elem].GetText()); } } if (bucketImpl.scalarAttributeArrays.empty() && bucketImpl.arrayAttributeArrays.empty()) { printf(" <no attributes>\n"); } else { bucketImpl.scalarAttributeArrays.forEach([this, &va](const AttrName& name, const ScalarAttributeArray &array) { printMirroredArray(va("%s \"%s\"", "sattr", toTfToken(name.name).GetText()), array, nullptr); }); bucketImpl.arrayAttributeArrays.forEach([this, &va](const AttrName& name, const ArrayAttributeArray &array) { printMirroredArray(va("%s \"%s\" %s", "vattr", toTfToken(name.name).GetText(), "values"), array.values, (const size_t*)array.cpuElemCounts.cpuData()); printMirroredArray(va("%s \"%s\" %s", "vattr", toTfToken(name.name).GetText(), "elemCounts"), array.elemCounts, nullptr); printMirroredArray(va("%s \"%s\" %s", "vattr", toTfToken(name.name).GetText(), "cpuElemCounts"), array.cpuElemCounts, nullptr); printMirroredArray(va("%s \"%s\" %s", "vattr", toTfToken(name.name).GetText(), "gpuElemCounts"), array.gpuElemCounts, nullptr); printMirroredArray(va("%s \"%s\" %s", "vattr", toTfToken(name.name).GetText(), "gpuPtrs"), array.gpuPtrs, nullptr); }); } }); std::cout << "(== PathToAttributesMap::print() end ==)\n\n"; } #define ENABLE_LOG 0 inline void log(const char* format, ...) { #if ENABLE_LOG va_list args; va_start(args, format); vprintf(format, args); va_end(args); #endif } inline void PathToAttributesMap::addType(TypeC type, Typeinfo typeInfo) { Typeinfo *v; typeToInfo.allocateEntry(type, &v); *v = typeInfo; } inline Typeinfo PathToAttributesMap::getTypeInfo(TypeC type) const { const Typeinfo* typeinfo; if (typeToInfo.find(type, &typeinfo)) { return *typeinfo; } else { return Typeinfo(); } } inline BucketId PathToAttributesMap::addBucket(const Bucket& bucket) { auto iter = attrNameSetToBucketId.find(bucket); bool found = (iter != attrNameSetToBucketId.end()); if (!found) { // Create bucket auto bucketIdAndImpl = buckets.emplace(platform.allocator.new_<BucketImpl>(platform)); auto bucketAndId = attrNameSetToBucketId.emplace(bucket, bucketIdAndImpl.first); const Bucket& addedBucket = bucketAndId.first->first; BucketImpl& bucketImpl = bucketIdAndImpl.second; bucketImpl.SetBucket(bucket); // Make array for each type for (const AttrNameAndType& b : addedBucket) { const Type attrType = b.type; const Token& attrName = b.name; const NameSuffix& suffix = b.suffix; const TypeC attrTypeC = TypeC(attrType); const Typeinfo* typeinfo; if (typeToInfo.find(attrTypeC, &typeinfo)) { AttrName name{ attrName, suffix }; if (typeinfo->isArray) { ArrayAttributeArray* ptr; bucketImpl.arrayAttributeArrays.allocateEntry(std::move(name), &ptr); new (ptr) ArrayAttributeArray(platform, attrTypeC, *typeinfo); } else { ScalarAttributeArray* ptr; bucketImpl.scalarAttributeArrays.allocateEntry(std::move(name), &ptr); new (ptr) ScalarAttributeArray(platform, attrTypeC, *typeinfo); } } else { std::cout << "Error: Typeinfo for " << attrType << " not found. Please add it using addType()." << std::endl; } } return bucketIdAndImpl.first; } else { return iter->second; } } // Multiple attribute methods inline void PathToAttributesMap::getAttributesRdC(const void** attrsOut, const PathC* paths, const TokenC* attrNames, size_t attrCount) { // TODO: make optimized version instead of calling getAttributeRdC for (size_t i = 0; i != attrCount; i++) { attrsOut[i] = getAttributeRdC(paths[i], attrNames[i]).ptr; } } inline void PathToAttributesMap::getAttributesRdGpuC(const void** attrsOut, const PathC* paths, const TokenC* attrNames, size_t attrCount, PtrToPtrKind ptrToPtrKind) { // TODO: make optimized version instead of calling getAttributeRdGpuC for (size_t i = 0; i != attrCount; i++) { attrsOut[i] = getAttributeRdGpuC(paths[i], attrNames[i], ptrToPtrKind).ptr; } } inline void PathToAttributesMap::getArraysRdC(const void** attrsOut, const Bucket& bucket, const TokenC* attrNames, size_t attrCount) { // TODO: make optimized version instead of calling getArrayRdC for (size_t i = 0; i != attrCount; i++) { attrsOut[i] = getArrayRdC(bucket, attrNames[i]); } } inline void PathToAttributesMap::getAttributesWrC(void** attrsOut, const PathC& path, const TokenC* attrNames, size_t attrCount) { // TODO: make optimized version instead of calling getAttributeWrC for (size_t i = 0; i != attrCount; i++) { attrsOut[i] = getAttributeWrC(path, attrNames[i]).ptr; } } inline void PathToAttributesMap::getAttributesWrGpuC(void** attrsOut, const PathC& path, const TokenC* attrNames, size_t attrCount, PtrToPtrKind ptrToPtrKind) { // TODO: make optimized version instead of calling getAttributeWrGpuC for (size_t i = 0; i != attrCount; i++) { attrsOut[i] = getAttributeWrGpuC(path, attrNames[i], ptrToPtrKind).ptr; } } inline void PathToAttributesMap::getArraysWrC(void** attrsOut, const Bucket& bucket, const TokenC* attrNames, size_t attrCount) { // TODO: make optimized version instead of calling getArrayWrC for (size_t i = 0; i != attrCount; i++) { attrsOut[i] = getArrayWrC(bucket, attrNames[i]); } } // Algorithm: // Check whether bucket already has a bucketId // If it does: // Check whether bucketId has a bucketImpl // If it does: // return (bucketId, bucketImpl) // Else: // Print error message // return (bucketId, empty bucketImpl) // Else: // Allocate a bucketId // attrNameSetToBucketId += (bucket->bucketId) // buckets += (bucketId->empty bucketImpl) // inline std::pair<BucketId, BucketImpl&> PathToAttributesMap::findOrCreateBucket(const Bucket& bucket) { auto iter = attrNameSetToBucketId.find(bucket); bool foundBucketAndId = (iter != attrNameSetToBucketId.end()); BucketId bucketId; if (foundBucketAndId) { bucketId = iter->second; auto implPtr = buckets.find(bucketId); if (implPtr) { return { bucketId, *implPtr }; } else { // This is an error, but make an impl so that we can return gracefully CARB_LOG_ERROR("BucketId->impl not found"); // Allocate an impl and id->impl mapping and then set the // attrNameSetToBucketId to the slot of the new impl auto idAndImpl = buckets.emplace(platform.allocator.new_<BucketImpl>(platform)); iter->second = idAndImpl.first; idAndImpl.second.SetBucket(bucket); return idAndImpl; } } // Allocate an impl and place in vector auto idAndImpl = buckets.emplace(platform.allocator.new_<BucketImpl>(platform)); // Store bucket->Id mapping attrNameSetToBucketId.emplace(bucket, idAndImpl.first); idAndImpl.second.SetBucket(bucket); return idAndImpl; } inline void PathToAttributesMap::eraseBucket(const Bucket& bucket) { auto iter = attrNameSetToBucketId.find(bucket); bool foundBucketAndId = (iter != attrNameSetToBucketId.end()); BucketId bucketId; if (foundBucketAndId) { bucketId = iter->second; auto implPtr = buckets.find(bucketId); if (implPtr) { buckets.erase(bucketId); } else { CARB_LOG_ERROR("BucketId->impl not found"); } attrNameSetToBucketId.erase(bucket); } else { // Nothing to do } } // Add an attribute to all elements of a bucket // Note that this might cause a merge with an existing bucket // // Here are the maps we have to update: // pathToBucketElem :: path -> (bucketId, arrayIndex) // buckets :: bucketId -> bucketImpl // attrNameSetToBucketId :: bucket-> bucketId // inline BucketImpl& PathToAttributesMap::addAttributeC(BucketImpl& bucketImpl, const Bucket& bucket, const TokenC& attrName, TypeC ctype, const void* value) { // TODO: should we warn on missing type? const Typeinfo& typeinfo = getTypeInfo(ctype); if (typeinfo.isArray && value) { CARB_LOG_ERROR("addAttributeC: Attempted to add array-value attribute with default values. Use addArrayAttribute instead."); return bucketImpl; } return addAttributeInternal(bucketImpl, bucket, attrName, ctype, value, typeinfo, 0); } inline BucketImpl& PathToAttributesMap::addArrayAttributeC(BucketImpl& bucketImpl, const Bucket& bucket, const TokenC& attrName, TypeC ctype, const void* value, const size_t arrayElemCount) { CARB_ASSERT(!arrayElemCount || value); // TODO: should we warn on missing type? const Typeinfo& typeinfo = getTypeInfo(ctype); return addAttributeInternal(bucketImpl, bucket, attrName, ctype, value, typeinfo, arrayElemCount); } // Add an attribute to all elements of a bucket inline void PathToAttributesMap::addAttributeC( const Bucket& bucket, const TokenC& attrName, TypeC ctype, const void* value) { const auto iter = attrNameSetToBucketId.find(bucket); if (iter != attrNameSetToBucketId.end()) { const BucketId bucketId = iter->second; BucketImpl *const implPtr = buckets.find(bucketId); if (implPtr) { // TODO: should we warn on missing type? const Typeinfo& typeinfo = getTypeInfo(ctype); if (typeinfo.isArray && value) { CARB_LOG_ERROR("addAttributeC: Attempted to add array-value attribute with default values. Use addArrayAttribute instead."); return; } addAttributeInternal(*implPtr, bucket, attrName, ctype, value, typeinfo, 0); } } } inline void PathToAttributesMap::addArrayAttributeC(const Bucket& bucket, const TokenC& attrName, TypeC ctype, const void* value, const size_t arrayElemCount) { CARB_ASSERT(!arrayElemCount || value); const auto iter = attrNameSetToBucketId.find(bucket); if (iter != attrNameSetToBucketId.end()) { const BucketId bucketId = iter->second; BucketImpl *const implPtr = buckets.find(bucketId); if (implPtr) { // TODO: should we warn on missing type? const Typeinfo& typeinfo = getTypeInfo(ctype); addAttributeInternal(*implPtr, bucket, attrName, ctype, value, typeinfo, arrayElemCount); } } } template <typename T> void PathToAttributesMap::addAttribute( const Bucket& bucket, const TokenC& attrName, TypeC type, const T& value) { APILOGGER("addAttribute", apiLogEnabled, attrName); // TODO: check that type is compatible return addAttributeC(bucket, attrName, type, &value); } inline size_t PathToAttributesMap::getElementCount(const BucketImpl& bucketImpl) const { return bucketImpl.elemToPath.size(); } inline size_t PathToAttributesMap::getElementCount(BucketId bucketId) const { const auto implPtr = buckets.find(bucketId); if (implPtr) { return implPtr->elemToPath.size(); } return 0; } inline size_t PathToAttributesMap::getElementCount(const Bucket& bucket) const { const auto iter = attrNameSetToBucketId.find(bucket); if (iter != attrNameSetToBucketId.end()) { BucketId bucketId = iter->second; return getElementCount(bucketId); } return 0; } inline PathToAttributesMap::ArrayOfArrayInfo PathToAttributesMap::getArrayOfArrayInfo(Typeinfo typeInfo, BucketImpl& bucketImpl, TokenC attrName) { if (typeInfo.isArray) { ArrayAttributeArray *array; if (bucketImpl.arrayAttributeArrays.find(AttrName{ attrName, NameSuffix::none }, &array)) { return getArrayOfArrayInfo(*array); } } return { nullptr, nullptr, nullptr, nullptr }; } inline PathToAttributesMap::ArrayOfArrayInfo PathToAttributesMap::getArrayOfArrayInfo(ArrayAttributeArray &arrayAttributeArray) { MirroredArray *const arraySizeArray = &arrayAttributeArray.elemCounts; MirroredArray *const arrayCpuCapacityArray = &arrayAttributeArray.cpuElemCounts; MirroredArray *const arrayGpuCapacityArray = &arrayAttributeArray.gpuElemCounts; MirroredArray *const arrayGpuPtrArray = &arrayAttributeArray.gpuPtrs; return { (size_t*)arraySizeArray->cpuData(), arrayCpuCapacityArray, arrayGpuCapacityArray, arrayGpuPtrArray }; } inline PathToAttributesMap::ConstArrayOfArrayInfo PathToAttributesMap::getArrayOfArrayInfo(Typeinfo typeInfo, const BucketImpl& bucketImpl, TokenC attrName) const { if (typeInfo.isArray) { const ArrayAttributeArray *array; if (bucketImpl.arrayAttributeArrays.find(AttrName{ attrName, NameSuffix::none }, &array)) { return getArrayOfArrayInfo(*array); } } return { nullptr, nullptr, nullptr, nullptr }; } inline PathToAttributesMap::ConstArrayOfArrayInfo PathToAttributesMap::getArrayOfArrayInfo(const ArrayAttributeArray &arrayAttributeArray) const { const MirroredArray *const arraySizeArray = &arrayAttributeArray.elemCounts; const MirroredArray *const arrayCpuCapacityArray = &arrayAttributeArray.cpuElemCounts; const MirroredArray *const arrayGpuCapacityArray = &arrayAttributeArray.gpuElemCounts; const MirroredArray *const arrayGpuPtrArray = &arrayAttributeArray.gpuPtrs; return { arraySizeArray, arrayCpuCapacityArray, arrayGpuCapacityArray, arrayGpuPtrArray }; } inline std::vector<size_t> PathToAttributesMap::getElementCounts(const std::vector<Bucket>& buckets) const { size_t bucketCount = buckets.size(); std::vector<size_t> retval(bucketCount); for (size_t i = 0; i != bucketCount; i++) { retval[i] = getElementCount(buckets[i]); } return retval; } inline void PathToAttributesMap::addElementToTrackers(size_t elemIndex, BucketImpl& bucketImpl) { // Update change trackers // We allocate them lazily, so we have to iterate over listenerIdToChangeTrackerConfig // then allocate bucketImpl.listenerIdToChanges if necessary listenerIdToChangeTrackerConfig.forEach([this, &bucketImpl, &elemIndex](ListenerId& listenerId, ChangeTrackerConfig& config) { if (config.changeTrackingEnabled) { // Allocate changes if necessary Changes* changes; if (bucketImpl.listenerIdToChanges.allocateEntry(listenerId, &changes)) { new (changes) Changes(); } changes->addNewPrim(elemIndex); } }); } inline void PathToAttributesMap::allocElement(ScalarAttributeArray &scalar) { const size_t allocSize = scalar.typeinfo.size; const size_t newSize = scalar.size() + allocSize; scalar.resize(newSize); // Only resize GPU mirror if it was previously allocated if (scalar.gpuCapacity != 0) { scalar.resizeGpu(platform.gpuCuda, platform.gpuCudaCtx, newSize, allocSize); } scalar.count++; } inline void PathToAttributesMap::allocElement(ArrayAttributeArray &arrayAttributeArray) { static_assert(sizeof(ArrayAttributeArray) == 5 * sizeof(MirroredArray), "ArrayAttributeArray has unexpected size"); allocElement(arrayAttributeArray.values); allocElement(arrayAttributeArray.elemCounts); allocElement(arrayAttributeArray.cpuElemCounts); allocElement(arrayAttributeArray.gpuElemCounts); allocElement(arrayAttributeArray.gpuPtrs); // For array-valued attributes, initialize CPU and GPU element counts static_assert(sizeof(ArrayAttributeArray) == 5 * sizeof(MirroredArray), "ArrayAttributeArray has unexpected size"); reinterpret_cast<size_t*>(arrayAttributeArray.elemCounts.cpuData())[arrayAttributeArray.elemCounts.count-1] = 0; arrayAttributeArray.elemCounts.cpuValid = true; reinterpret_cast<size_t*>(arrayAttributeArray.cpuElemCounts.cpuData())[arrayAttributeArray.cpuElemCounts.count - 1] = 0; arrayAttributeArray.cpuElemCounts.cpuValid = true; reinterpret_cast<size_t*>(arrayAttributeArray.gpuElemCounts.cpuData())[arrayAttributeArray.gpuElemCounts.count - 1] = 0; arrayAttributeArray.gpuElemCounts.cpuValid = true; } inline size_t PathToAttributesMap::allocElement(BucketImpl& bucketImpl) { // I moved this here to support old-style ArrayBase::resize // TODO: Now that ArrayBase is gone, check whether we can move it back const size_t element = bucketImpl.elemToPath.size(); bucketImpl.elemToPath.emplace_back(); // Allocate an empty path, it gets set later bucketImpl.scalarAttributeArrays.forEach([this, &bucketImpl](const AttrName& name, ScalarAttributeArray &array) { allocElement(array); CARB_UNUSED(bucketImpl); CARB_ASSERT(array.count == bucketImpl.elemToPath.size()); }); bucketImpl.arrayAttributeArrays.forEach([this, &bucketImpl](const AttrName& name, ArrayAttributeArray &array) { allocElement(array); CARB_UNUSED(bucketImpl); CARB_ASSERT(array.values.count == bucketImpl.elemToPath.size()); CARB_ASSERT(array.elemCounts.count == bucketImpl.elemToPath.size()); CARB_ASSERT(array.cpuElemCounts.count == bucketImpl.elemToPath.size()); CARB_ASSERT(array.gpuElemCounts.count == bucketImpl.elemToPath.size()); CARB_ASSERT(array.gpuPtrs.count == bucketImpl.elemToPath.size()); }); addElementToTrackers(element, bucketImpl); return element; } // CPU and GPU valid bits are per SoA array, not per-prim per-attribute. // Suppose we have a prim with attr whose GPU mirror is not valid, and we want // to add it to a bucket that has a valid GPU mirror of that attribute. What // should we set the bucket array's gpuValid to after the add? // // Option 1: set bucket's gpuValid to false. // If cpuValid were true for the bucket, then this would be inefficient but // correct. But, if cpuValid were false, then we'd have to copy all the bucket's // data from GPU to CPU to avoid invalidating the only valid copy of the data. // That would be very inefficient for a bucket with a lot of prims, or an // array of array-valued attributes. // // Option 2: set bucket's gpuValid to true. // For the bucket plus our new element to be gpuValid, we need to make the new // element gpuValid by copying it from CPU to GPU. // // We've chosen Option 2 as it is the most efficient and makeSrcValidIfDestValid // implements it. // The explanation above was for GPU mirrors, but it applies equally to CPU. // // We are changing the srcArray mirrors to match destArray, so // counterintuitively destArray is const and srcArray is not. inline void PathToAttributesMap::makeSrcValidIfDestValid(MirroredArray& srcArray, BucketImpl& srcBucketImpl, const ArrayOfArrayInfo& srcAoaInfo, const MirroredArray& destArray, const AttrName& name) { bool srcCpuValid = srcArray.cpuValid; bool srcGpuValid = srcArray.gpuValid; bool destCpuValid = destArray.cpuValid; bool destGpuValid = destArray.gpuValid; if (srcCpuValid && !srcGpuValid && destGpuValid) { // Possible states: // srcCpu srcGpu destCpu destGpu // 1 0 0 1 // 1 0 1 1 // With a valid CPU source, this will copy data to the GPU to make it valid // We don't set dirty indices here because this method gives read-only access getArraySpanC(srcArray, name, srcAoaInfo, srcBucketImpl, CudaReadConfig()); // srcCpu srcGpu destCpu destGpu // 1 1 0 1 // 1 1 1 1 } else if (!srcCpuValid && !srcGpuValid && !destCpuValid && destGpuValid) { // srcCpu srcGpu destCpu destGpu // 0 0 0 1 // Without a valid CPU source, just allocate memory so it can be "valid" even if not initialized getArraySpanC(srcArray, name, srcAoaInfo, srcBucketImpl, CudaWriteConfig()); // srcCpu srcGpu destCpu destGpu // 0 1 0 1 } else if (!srcCpuValid && srcGpuValid && destCpuValid) { // srcCpu srcGpu destCpu destGpu // 0 1 1 0 // 0 1 1 1 // With a valid GPU source, this will copy data back to the CPU to make it valid // We don't set dirty indices here because this method gives read-only access getArraySpanC(srcArray, name, srcAoaInfo, srcBucketImpl, CpuReadConfig()); // srcCpu srcGpu destCpu destGpu // 1 1 1 0 // 1 1 1 1 } else if (!srcCpuValid && !srcGpuValid && destCpuValid && !destGpuValid) { // srcCpu srcGpu destCpu destGpu // 0 0 1 0 // Without a valid GPU source, just allocate memory so it can be "valid" even if not initialized getArraySpanC(srcArray, name, srcAoaInfo, srcBucketImpl, CpuWriteConfig()); // srcCpu srcGpu destCpu destGpu // 1 0 1 0 } else if (!srcCpuValid && !srcGpuValid && destCpuValid && destGpuValid) { // srcCpu srcGpu destCpu destGpu // 0 0 1 1 // Without a valid GPU source, just allocate memory so it can be "valid" even if not initialized getArraySpanC(srcArray, name, srcAoaInfo, srcBucketImpl, CudaWriteConfig()); // srcCpu srcGpu destCpu destGpu // 0 1 1 1 // This one clears gpuValid, because we assume that the user is going to write to it // But, we're not passing the allocated pointer to the user so... getArraySpanC(srcArray, name, srcAoaInfo, srcBucketImpl, CpuWriteConfig()); // srcCpu srcGpu destCpu destGpu // 1 0 1 1 // ..we can safely set gpuValid to true srcArray.gpuValid = true; // srcCpu srcGpu destCpu destGpu // 1 1 1 1 } } inline void PathToAttributesMap::allocElementForMove(BucketImpl& srcBucketImpl, const ArrayOfArrayInfo &srcAoaInfo, const AttrName& name, MirroredArray &destArray, MirroredArray *const srcArray) { bool srcGpuAlloced = false; if (srcArray) { makeSrcValidIfDestValid(*srcArray, srcBucketImpl, srcAoaInfo, destArray, name); srcGpuAlloced = (srcArray->gpuCapacity != 0); } if (srcArray) { if (destArray.type != srcArray->type) { if (destArray.typeinfo.size != srcArray->typeinfo.size) { CARB_LOG_ERROR_ONCE("PathToAttributesMap (%p) contains attributes with duplicate name \"%s\" with different types and different per-element sizes (%zu vs %zu). Data will almost certainly become corrupted during request to move elements between buckets!", this, toTfToken(name.name).GetString().c_str(), destArray.typeinfo.size, srcArray->typeinfo.size); } else { CARB_LOG_WARN_ONCE("PathToAttributesMap (%p) contains attributes with duplicate name \"%s\" with different types but same per-element size. Data may become corrupted during request to move elements between buckets!", this, toTfToken(name.name).GetString().c_str()); } } } const size_t allocSize = destArray.typeinfo.size; const size_t newSize = destArray.size() + allocSize; destArray.resize(newSize); const bool destGpuAlloced = (destArray.gpuCapacity != 0); if (srcGpuAlloced || destGpuAlloced) { destArray.resizeGpu(platform.gpuCuda, platform.gpuCudaCtx, destArray.size(), destArray.typeinfo.size); } destArray.count++; } // When moving elements between buckets we want to only allocate GPU storage if // the source had a valid GPU mirror. inline size_t PathToAttributesMap::allocElementForMove(BucketImpl& destBucketImpl, BucketImpl& srcBucketImpl, const PathC& path) { const size_t element = destBucketImpl.elemToPath.size(); destBucketImpl.elemToPath.emplace_back(); // Allocate an empty path, it gets set later // Only allocate dest GPU mirror if src has GPU mirror destBucketImpl.scalarAttributeArrays.forEach([this, &srcBucketImpl](const AttrName& name, ScalarAttributeArray &array) { ScalarAttributeArray *srcArray = srcBucketImpl.scalarAttributeArrays.find(name, &srcArray) ? srcArray : nullptr; const ArrayOfArrayInfo aoa = ScalarArrayOfArrayInfo(); allocElementForMove(srcBucketImpl, aoa, name, array, srcArray); }); destBucketImpl.arrayAttributeArrays.forEach([this, &srcBucketImpl](const AttrName& name, ArrayAttributeArray &array) { ArrayAttributeArray *srcArray = srcBucketImpl.arrayAttributeArrays.find(name, &srcArray) ? srcArray : nullptr; static_assert(sizeof(ArrayAttributeArray) == 5 * sizeof(MirroredArray), "ArrayAttributeArray has unexpected size"); const ArrayOfArrayInfo aoa = srcArray ? getArrayOfArrayInfo(*srcArray) : ScalarArrayOfArrayInfo(); allocElementForMove(srcBucketImpl, aoa, name, array.values, srcArray ? &srcArray->values : nullptr); allocElementForMove(srcBucketImpl, aoa, name, array.elemCounts, srcArray ? &srcArray->elemCounts : nullptr); allocElementForMove(srcBucketImpl, aoa, name, array.cpuElemCounts, srcArray ? &srcArray->cpuElemCounts : nullptr); allocElementForMove(srcBucketImpl, aoa, name, array.gpuElemCounts, srcArray ? &srcArray->gpuElemCounts : nullptr); allocElementForMove(srcBucketImpl, aoa, name, array.gpuPtrs, srcArray ? &srcArray->gpuPtrs : nullptr); // For array-valued attributes, initialize CPU and GPU element counts static_assert(sizeof(ArrayAttributeArray) == 5 * sizeof(MirroredArray), "ArrayAttributeArray has unexpected size"); reinterpret_cast<size_t*>(array.elemCounts.cpuData())[array.elemCounts.count - 1] = 0; array.elemCounts.cpuValid = true; reinterpret_cast<size_t*>(array.cpuElemCounts.cpuData())[array.cpuElemCounts.count - 1] = 0; array.cpuElemCounts.cpuValid = true; reinterpret_cast<size_t*>(array.gpuElemCounts.cpuData())[array.gpuElemCounts.count - 1] = 0; array.gpuElemCounts.cpuValid = true; }); addElementToTrackers(element, destBucketImpl); return element; } // Array resize that does not preserve previous data inline void PathToAttributesMap::destructiveResizeIfNecessary(uint8_t*& cpuData, size_t& capacity, size_t desiredElemCount, size_t elemByteCount) { // Resize iff (capacity < desiredElemCount) if (capacity != desiredElemCount) { size_t byteCount = desiredElemCount * elemByteCount; if (!USE_PINNED_MEMORY || !platform.gpuCuda) { free(cpuData); cpuData = reinterpret_cast<uint8_t*>(malloc(byteCount)); } else if (platform.gpuCuda) { // Use page-locked memory CPU for CUDA platform.gpuCuda->freeHost(*platform.gpuCudaCtx, cpuData); platform.gpuCuda->hostAlloc(*platform.gpuCudaCtx, (void**)&cpuData, byteCount); } capacity = desiredElemCount; } } // Flatcache only stores POD types, with the following exceptions: // eToken (pxr::TfToken) // eAsset (std::array<pxr::TfToken, 2>) // // The following code constructs an array of objects of one of these types, // filling memory[newCpuData + oldByteCount .. newCpuData + newByteCount) // It is called when enlarging arrays of such types template <typename T> void constructInPlace(uint8_t* newCpuData, size_t oldByteCount, size_t newByteCount) { T* begin = reinterpret_cast<T*>(newCpuData + oldByteCount); T* end = reinterpret_cast<T*>(newCpuData + newByteCount); for (T* current = begin; current != end; current++) { new (current) T; } } // We plan to move TfToken and AssetPath construction to IToken. // Until we do we have to declare this here and depend on USD headers. struct AssetPath { pxr::TfToken assetPath; pxr::TfToken resolvedPath; }; inline bool PathToAttributesMap::__validateArrayInvariants() const { bool encounteredFailure = false; // loop over the buckets BucketId id{ 0 }; for (unsigned int i = 0; i < this->buckets.end(); ++i, ++id) { const auto bucketImplPtr = buckets.find(id); if (!bucketImplPtr) continue; const auto& bucketImpl = *bucketImplPtr; if (bucketImpl.elemToPath.size() == 0) continue; //loop over all the arrays bucketImpl.arrayAttributeArrays.forEach([&encounteredFailure](const AttrName& name, const ArrayAttributeArray& local_array) { const Typeinfo& typeInfo = local_array.values.typeinfo; const size_t elemSize = typeInfo.size; // only care about actual data if (name.suffix != NameSuffix::none) return; // look up array info const MirroredArray* arraySizeArray = &local_array.elemCounts; const MirroredArray* arrayCpuCapacityArray = &local_array.cpuElemCounts; // skip tags and not arrays if (elemSize != 0) { //number of elements const size_t elemCount = local_array.values.count; // pointers to data const uint8_t* const* elemToArrayCpuData = reinterpret_cast<const uint8_t* const*>(local_array.values.cpuData()); for (size_t elem = 0; elem != elemCount; elem++) { // get the actual pointer const uint8_t* cpuData = elemToArrayCpuData[elem]; // look up the cpu capacity const size_t& cpuCapacity = reinterpret_cast<const size_t*>(arraySizeArray->cpuData())[elem]; const size_t& desiredElemCount = reinterpret_cast<const size_t*>(arrayCpuCapacityArray->cpuData())[elem]; if (cpuCapacity == desiredElemCount) { // we should have valid data if (cpuCapacity != 0 && !cpuData) { std::cout << "Invalid array name = " << toTfToken(name.name).GetString() << std::endl; encounteredFailure = true; } } } } }); } return encounteredFailure; } // Array resize that preserves previous data inline void PathToAttributesMap::resizeIfNecessary( uint8_t*& cpuData, size_t& capacity, size_t desiredElemCount, size_t elemByteCount, TypeC typeC) { // TODO: reduce number of reallocations by allocating capacity larger than size // and not always reallocating when desiredElemCount<capacity if (capacity < desiredElemCount) { size_t oldByteCount = capacity * elemByteCount; size_t newByteCount = desiredElemCount * elemByteCount; uint8_t* newCpuData = nullptr; if (!USE_PINNED_MEMORY || !platform.gpuCuda) { newCpuData = reinterpret_cast<uint8_t*>(malloc(newByteCount)); } else if (platform.gpuCuda) { platform.gpuCuda->hostAlloc(*platform.gpuCudaCtx, reinterpret_cast<void**>(&newCpuData), newByteCount); } if (cpuData) { size_t copyByteCount = std::min(oldByteCount, newByteCount); memcpy(newCpuData, cpuData, copyByteCount); if (!USE_PINNED_MEMORY || !platform.gpuCuda) { free(cpuData); } else if (platform.gpuCuda) { platform.gpuCuda->freeHost(*platform.gpuCudaCtx, cpuData); } } // If type has a constructor, construct any new elements if (oldByteCount < newByteCount) { Type type(typeC); const uint8_t kScalar = 1; const uint8_t kArray = 1; if (type == Type(BaseDataType::eToken, kScalar, kArray)) { constructInPlace<pxr::TfToken>(newCpuData, oldByteCount, newByteCount); } else if (type == Type(BaseDataType::eAsset, kScalar, kArray)) { constructInPlace<flatcache::AssetPath>(newCpuData, oldByteCount, newByteCount); } else if (type == Type(BaseDataType::eConnection, kScalar, kArray)) { constructInPlace<flatcache::Connection>(newCpuData, oldByteCount, newByteCount); } } cpuData = newCpuData; capacity = desiredElemCount; } } inline void PathToAttributesMap::enableCpuReadImpl(PathToAttributesMap::MirroredArray& array, const size_t* elemToArraySize, PathToAttributesMap::MirroredArray* elemToArrayCpuCapacity, PathToAttributesMap::MirroredArray* elemToArrayGpuCapacity, PathToAttributesMap::MirroredArray* gpuArrayDataArray, bool printWarnings) { using omni::gpucompute::MemcpyKind; bool& usdValid = array.usdValid; bool& cpuValid = array.cpuValid; bool& gpuValid = array.gpuValid; bool& usingCuda = array.gpuAllocedWithCuda; uint8_t* cpuArray = array.cpuData(); uint8_t*& gpuArray = array.gpuArray; // If CPU copy is valid, nothing to do // If GPU copy is valid, copy to CPU // If USD copy is valid, copy to CPU if (cpuValid) { // Nothing to do } else if (!cpuValid && gpuValid) { size_t byteCount = array.size(); // Select which API to use omni::gpucompute::GpuCompute* computeAPI = nullptr; omni::gpucompute::Context* computeCtx = nullptr; if (usingCuda) { computeAPI = platform.gpuCuda; computeCtx = platform.gpuCudaCtx; } else if (!usingCuda) { computeAPI = platform.gpuD3dVk; computeCtx = platform.gpuD3dVkCtx; } const Typeinfo &typeinfo = array.typeinfo; std::lock_guard<MirroredArray::AttributeMutex> hostDeviceLock(array.attributeMutex); if (typeinfo.isArray) { size_t elemCount = array.count; uint8_t** elemToArrayCpuData = reinterpret_cast<uint8_t**>(cpuArray); uint8_t** elemToArrayGpuData = reinterpret_cast<uint8_t**>(gpuArrayDataArray->cpuData()); for (size_t elem = 0; elem != elemCount; elem++) { // Make sure that the dest (CPU) buffer is large enough uint8_t*& cpuData = elemToArrayCpuData[elem]; // dest const uint8_t* const& gpuData = elemToArrayGpuData[elem]; // src size_t& destCapacity = reinterpret_cast<size_t*>(elemToArrayCpuCapacity->cpuData())[elem]; size_t desiredElemCount = elemToArraySize[elem]; destructiveResizeIfNecessary(cpuData, destCapacity, desiredElemCount, typeinfo.arrayElemSize); // Copy from GPU to CPU size_t copyByteCount = desiredElemCount * typeinfo.arrayElemSize; if(gpuData) computeAPI->memcpy(*computeCtx, cpuData, gpuData, copyByteCount, MemcpyKind::deviceToHost); } // Don't copy the outer array to CPU, because GPU is not allowed to change outer array } else { log("array values: from GPU\n"); computeAPI->memcpy(*computeCtx, cpuArray, gpuArray, byteCount, MemcpyKind::deviceToHost); } cpuValid = true; } else if (!cpuValid && usdValid) { // printf("TODO: read data lazily from USD\n"); } else { if (printWarnings) CARB_LOG_WARN("No source has valid data array=%p usdValid=%i cpuValid=%i gpuValid=%i gpuAllocedWithCuda=%i", &array, array.usdValid, array.cpuValid, array.gpuValid, array.gpuAllocedWithCuda); } } inline void PathToAttributesMap::enableCpuReadIfValid(PathToAttributesMap::MirroredArray& array, const size_t* elemToArraySize, PathToAttributesMap::MirroredArray* elemToArrayCpuCapacity, PathToAttributesMap::MirroredArray* elemToArrayGpuCapacity, PathToAttributesMap::MirroredArray* gpuArrayDataArray) { enableCpuReadImpl(array, elemToArraySize, elemToArrayCpuCapacity, elemToArrayGpuCapacity, gpuArrayDataArray, false); } inline void PathToAttributesMap::enableCpuRead(PathToAttributesMap::MirroredArray& array, const size_t* elemToArraySize, PathToAttributesMap::MirroredArray* elemToArrayCpuCapacity, PathToAttributesMap::MirroredArray* elemToArrayGpuCapacity, PathToAttributesMap::MirroredArray* gpuArrayDataArray) { enableCpuReadImpl(array, elemToArraySize, elemToArrayCpuCapacity, elemToArrayGpuCapacity, gpuArrayDataArray, true); } inline void PathToAttributesMap::enableCpuWrite(PathToAttributesMap::MirroredArray& array, const size_t* elemToArraySize, PathToAttributesMap::MirroredArray* elemToArrayCpuCapacity, PathToAttributesMap::MirroredArray* elemToArrayGpuCapacity, PathToAttributesMap::MirroredArray* elemToArrayGpuData) { using omni::gpucompute::MemcpyKind; bool& usdValid = array.usdValid; bool& cpuValid = array.cpuValid; bool& gpuValid = array.gpuValid; const Typeinfo &typeinfo = array.typeinfo; // Array-valued elements are lazily allocated, meaning they are only // resized when write access is requested. // Write access has been requested, so resize if necessary std::lock_guard<MirroredArray::AttributeMutex> hostDeviceLock(array.attributeMutex); if (typeinfo.isArray) { size_t elemCount = array.count; uint8_t** elemToArrayCpuData = reinterpret_cast<uint8_t**>(array.cpuData()); CARB_ASSERT(elemToArrayCpuCapacity->cpuValid); for (size_t elem = 0; elem != elemCount; elem++) { uint8_t*& cpuData = elemToArrayCpuData[elem]; size_t& cpuCapacity = reinterpret_cast<size_t*>(elemToArrayCpuCapacity->cpuData())[elem]; size_t desiredElemCount = elemToArraySize[elem]; resizeIfNecessary(cpuData, cpuCapacity, desiredElemCount, typeinfo.arrayElemSize, array.type); } } // New state usdValid = false; cpuValid = true; gpuValid = false; } inline ArrayAndDirtyIndices PathToAttributesMap::getArraySpanC(MirroredArray& array, const AttrName& name, const ArrayOfArrayInfo& aoa, BucketImpl& bucketImpl, const IOConfig& io) { const size_t elemCount = bucketImpl.elemToPath.size(); const Typeinfo& typeinfo = array.typeinfo; const size_t elemSize = typeinfo.size; log("begin getArrayC\n"); bool isTag = (elemSize == 0); if (isTag) { // If is a tag, then array.data() will be zero, so set special value // to distinguish from tag absent case return { SpanC{ (uint8_t*)-1, elemCount, 0 }, {} }; } // Read enable must come before write enable if (io.enableRead) { (this->*io.enableRead)(array, aoa.arraySizeArray, aoa.arrayCpuCapacityArray, aoa.arrayGpuCapacityArray, aoa.arrayGpuPtrArray); // If requesting GPU access to array-of-array, additionally // enable array of GPU pointers for GPU read if (typeinfo.isArray && io.device == Device::eCudaGPU) { (this->*io.enableRead)(*aoa.arrayGpuPtrArray, nullptr, nullptr, nullptr, nullptr); } } if (io.enableWrite) { (this->*io.enableWrite)(array, aoa.arraySizeArray, aoa.arrayCpuCapacityArray, aoa.arrayGpuCapacityArray, aoa.arrayGpuPtrArray); // If requesting GPU access to array-of-array, additionally // enable array of GPU pointers for GPU _read_ // This is necessary because the pointers may have been // reallocated on CPU, and the GPU needs to _read_ these new // pointers if (typeinfo.isArray && io.device == Device::eCudaGPU) { (this->*io.enableRdPtrForWrite)(*aoa.arrayGpuPtrArray, nullptr, nullptr, nullptr, nullptr); } } // If CPU pointer requested // return CPU pointer // If GPU pointer requested and not array of array // return GPU pointer // If GPU pointer requested and array of array // return GPU pointer to GPU pointer array uint8_t* retPtr = nullptr; if (io.device == Device::eCPU) { retPtr = array.cpuData(); } else if (io.device == Device::eCudaGPU && !typeinfo.isArray) { retPtr = array.gpuArray; } else if (io.device == Device::eCudaGPU && typeinfo.isArray && io.ptrToPtrKind == PtrToPtrKind::eGpuPtrToGpuPtr) { retPtr = aoa.arrayGpuPtrArray->gpuArray; } else if (io.device == Device::eCudaGPU && typeinfo.isArray && io.ptrToPtrKind == PtrToPtrKind::eCpuPtrToGpuPtr) { retPtr = aoa.arrayGpuPtrArray->cpuData(); } else if (io.device == Device::eD3dVkGPU && !typeinfo.isArray) { retPtr = array.gpuArray; } else if (io.device == Device::eD3dVkGPU && typeinfo.isArray) { retPtr = aoa.arrayGpuPtrArray->cpuData(); } // If enabling write, // for each enabled listener listening to this attribute // if changedIndices exists // add to vector // else // create changedIndices and add to vector // return vector // else // return empty vector std::vector<ChangedIndicesImpl*> changedIndicesForEachListener; changedIndicesForEachListener.reserve(listenerIdToChangeTrackerConfig.size()); if (io.enableWrite) { // optimization because the cost to create attrNameAndType is non-trivial, // but they are loop invariant so we should try to only do it once. bool costlyInvariantsInitialized = false; AttrNameAndType *const attrNameAndType = (AttrNameAndType*)alloca(sizeof(AttrNameAndType)); // stack-allocate here for scope, but lazily-initialize below listenerIdToChangeTrackerConfig.forEach([this, &bucketImpl, &costlyInvariantsInitialized, &name, &attrNameAndType, &array, &elemCount, &changedIndicesForEachListener](ListenerId& listenerId, ChangeTrackerConfig& config) { // Create listener if it doesn't exist in bucket Changes* changes; if (bucketImpl.listenerIdToChanges.allocateEntry(listenerId, &changes)) { new (changes) Changes(); } if (config.changeTrackingEnabled && config.attrNamesToLog.contains(name.name)) { if (!costlyInvariantsInitialized) { new (attrNameAndType) AttrNameAndType(Type(array.type), name.name, name.suffix); costlyInvariantsInitialized = true; } auto iter = changes->changedAttributes.find(*attrNameAndType); bool foundChangedIndices = (iter != changes->changedAttributes.end()); if (!foundChangedIndices) { // TODO: move this into a new ordered_map class auto& keys = changes->changedAttributes.v; auto& values = changes->changedIndices; auto insertIter = lower_bound(keys.begin(), keys.end(), *attrNameAndType); ptrdiff_t insertIndex = insertIter - keys.begin(); keys.insert(insertIter, *attrNameAndType); values.insert(values.begin() + insertIndex, ChangedIndicesImpl(elemCount)); changedIndicesForEachListener.push_back(&values[insertIndex]); } else { ptrdiff_t attrIndex = iter - changes->changedAttributes.begin(); changedIndicesForEachListener.push_back(&changes->changedIndices[attrIndex]); } } }); } return { SpanC{ retPtr, elemCount, typeinfo.size }, changedIndicesForEachListener }; } inline ArrayAndDirtyIndices PathToAttributesMap::getArraySpanC(BucketId bucketId, TokenC attrName, const IOConfig &io, NameSuffix suffix) { BucketImpl *const bucketImpl = buckets.find(bucketId); if (!bucketImpl) { return { SpanC{ nullptr, 0, 0 }, {} }; } const AttrName name{ attrName, suffix }; { ScalarAttributeArray *array; if (bucketImpl->scalarAttributeArrays.find(name, &array)) { const ArrayOfArrayInfo aoa = ScalarArrayOfArrayInfo(); return getArraySpanC(*array, name, aoa, *bucketImpl, io); } } { ArrayAttributeArray *array; if (bucketImpl->arrayAttributeArrays.find(name, &array)) { const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*array); return getArraySpanC(array->values, name, aoa, *bucketImpl, io); } } return { SpanC{ nullptr, 0, 0 }, {} }; } template <typename T> inline const T* PathToAttributesMap::getArrayRd(const Bucket& bucket, const TokenC& attrName) { APILOGGER("getArrayRd", apiLogEnabled, attrName); // TODO: check that T is the correct type return reinterpret_cast<const T*>(getArrayRdC(bucket, attrName, NameSuffix::none)); } template <typename T> inline const T* PathToAttributesMap::getArrayRd(BucketId bucketId, const TokenC& attrName) { APILOGGER("getArrayRd", apiLogEnabled, attrName); // TODO: check that T is the correct type return reinterpret_cast<const T*>(getArrayRdC(bucketId, attrName, NameSuffix::none).ptr); } template <typename T> inline T* PathToAttributesMap::getArrayWr(const Bucket& bucket, const TokenC& attrName) { APILOGGER("getArrayWr", apiLogEnabled, attrName); // TODO: check that T is the correct type return reinterpret_cast<T*>(getArrayWrC(bucket, attrName, NameSuffix::none)); } template <typename T> inline T* PathToAttributesMap::getArray(const Bucket& bucket, const TokenC& attrName) { APILOGGER("getArray", apiLogEnabled, attrName); // TODO: check that T is the correct type return reinterpret_cast<T*>(getArrayC(bucket, attrName, NameSuffix::none)); } template <typename T> inline std::vector<const T*> PathToAttributesMap::getArraysRd(const std::vector<Bucket>& buckets, const TokenC& attrName) { size_t bucketCount = buckets.size(); std::vector<const T*> retval(bucketCount); for (size_t i = 0; i != bucketCount; i++) { retval[i] = getArrayRd<T>(buckets[i], attrName); } return retval; } template <typename T> inline std::vector<T*> PathToAttributesMap::getArraysWr(const std::vector<Bucket>& buckets, const TokenC& attrName) { size_t bucketCount = buckets.size(); std::vector<const T*> retval(bucketCount); for (size_t i = 0; i != bucketCount; i++) { retval[i] = getArrayWr<T>(buckets[i], attrName); } return retval; } template <typename T> inline std::vector<T*> PathToAttributesMap::getArrays(const std::vector<Bucket>& buckets, const TokenC& attrName) { size_t bucketCount = buckets.size(); std::vector<T*> retval(bucketCount); for (size_t i = 0; i != bucketCount; i++) { retval[i] = getArray<T>(buckets[i], attrName); } return retval; } inline BucketId PathToAttributesMap::findBucketId(const Bucket& bucket) { auto iter = attrNameSetToBucketId.find(bucket); bool found = iter != attrNameSetToBucketId.end(); if (!found) return { kInvalidBucketId }; return iter->second; } inline ConstSpanC PathToAttributesMap::getArraySpanRdC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix) { APILOGGER("getArraySpanRdC", apiLogEnabled, attrName); // Get read-only CPU access BucketId bucketId = findBucketId(bucket); if (bucketId == kInvalidBucketId) return { nullptr, 0, 0 }; const ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(bucketId, attrName, CpuReadConfig(), suffix); // We don't set dirty indices here because this method gives read-only access return arrayAndDirtyIndices.array; } inline const void* PathToAttributesMap::getArrayRdC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix) { APILOGGER("getArrayRdC", apiLogEnabled, attrName); // Get read-only CPU access BucketId bucketId = findBucketId(bucket); if (bucketId == kInvalidBucketId) return nullptr; const ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(bucketId, attrName, CpuReadConfig(), suffix); // We don't set dirty indices here because this method gives read-only access return arrayAndDirtyIndices.array.ptr; } inline ConstSpanC PathToAttributesMap::getArrayRdC(BucketId bucketId, const TokenC& attrName, NameSuffix suffix) { APILOGGER("getArrayRdC", apiLogEnabled, attrName); // Get read-only CPU access const ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(bucketId, attrName, CpuReadConfig(), suffix); // We don't set dirty indices here because this method gives read-only access return arrayAndDirtyIndices.array; } inline SpanC PathToAttributesMap::getArraySpanWrC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix) { APILOGGER("getArraySpanWrC", apiLogEnabled, attrName); BucketId bucketId = findBucketId(bucket); if (bucketId == kInvalidBucketId) return { nullptr, 0, 0 }; ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(bucketId, attrName, CpuWriteConfig(), suffix); setArrayDirty(arrayAndDirtyIndices); return arrayAndDirtyIndices.array; } inline void* PathToAttributesMap::getArrayWrC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix) { APILOGGER("getArrayWrC", apiLogEnabled, attrName); BucketId bucketId = findBucketId(bucket); if (bucketId == kInvalidBucketId) return nullptr; ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(bucketId, attrName, CpuWriteConfig(), suffix); setArrayDirty(arrayAndDirtyIndices); return arrayAndDirtyIndices.array.ptr; } inline SpanC PathToAttributesMap::getArrayWrC(BucketId bucketId, const TokenC& attrName, NameSuffix suffix) { APILOGGER("getArrayWrC", apiLogEnabled, attrName); ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(bucketId, attrName, CpuWriteConfig(), suffix); setArrayDirty(arrayAndDirtyIndices); return arrayAndDirtyIndices.array; } inline SpanC PathToAttributesMap::getOrCreateArrayWrC( BucketId bucketId, const TokenC& attrName, TypeC type, NameSuffix suffix) { APILOGGER("getOrCreateArrayWrC", apiLogEnabled, attrName); auto bucketImpl = buckets.find(bucketId); if (!bucketImpl) { return SpanC{ nullptr, 0, 0 }; } const AttrName name{ attrName, suffix }; ArrayOfArrayInfo aoa; MirroredArray* array = nullptr; const Typeinfo& typeinfo = getTypeInfo(type); if (typeinfo.isArray) { ArrayAttributeArray *arrayAttributeArray; if (!bucketImpl->arrayAttributeArrays.find(name, &arrayAttributeArray)) { Bucket bucket = getNamesAndTypes(bucketId); bucketImpl = &addAttributeC(*bucketImpl, bucket, attrName, type); const bool found = bucketImpl->arrayAttributeArrays.find({ attrName, NameSuffix::none }, &arrayAttributeArray); CARB_ASSERT(found); CARB_UNUSED(found); array = &arrayAttributeArray->values; aoa = getArrayOfArrayInfo(*arrayAttributeArray); } else { array = &arrayAttributeArray->values; aoa = getArrayOfArrayInfo(*arrayAttributeArray); } } else { ScalarAttributeArray *scalarAttributeArray; if (!bucketImpl->scalarAttributeArrays.find(name, &scalarAttributeArray)) { Bucket bucket = getNamesAndTypes(bucketId); bucketImpl = &addAttributeC(*bucketImpl, bucket, attrName, type); const bool found = bucketImpl->scalarAttributeArrays.find({ attrName, NameSuffix::none }, &scalarAttributeArray); CARB_ASSERT(found); CARB_UNUSED(found); array = scalarAttributeArray; aoa = ScalarArrayOfArrayInfo(); } else { array = scalarAttributeArray; aoa = ScalarArrayOfArrayInfo(); } } CARB_ASSERT(type == array->type); ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(*array, name, aoa, *bucketImpl, CpuWriteConfig()); setArrayDirty(arrayAndDirtyIndices); return arrayAndDirtyIndices.array; } inline SpanC PathToAttributesMap::getArraySpanC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix) { APILOGGER("getArraySpanC", apiLogEnabled, attrName); BucketId bucketId = findBucketId(bucket); if (bucketId == kInvalidBucketId) return { nullptr, 0, 0 }; ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(bucketId, attrName, CpuReadWriteConfig(), suffix); setArrayDirty(arrayAndDirtyIndices); return arrayAndDirtyIndices.array; } inline void* PathToAttributesMap::getArrayC(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix) { APILOGGER("getArrayC", apiLogEnabled, attrName); BucketId bucketId = findBucketId(bucket); if (bucketId == kInvalidBucketId) return nullptr; ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(bucketId, attrName, CpuReadWriteConfig(), suffix); setArrayDirty(arrayAndDirtyIndices); return arrayAndDirtyIndices.array.ptr; } inline void PathToAttributesMap::setArrayDirty(ArrayAndDirtyIndices& array) { for (ChangedIndicesImpl* listener : array.changedIndicesForEachListener) { listener->dirtyAll(); } } inline void PathToAttributesMap::setArrayElementDirty(ArrayAndDirtyIndices& array, size_t elemIndex) { for (ChangedIndicesImpl* listener : array.changedIndicesForEachListener) { listener->insert(elemIndex, array.array.elementCount); } } inline SpanC PathToAttributesMap::getArrayC(BucketId bucketId, const TokenC& attrName, NameSuffix suffix) { APILOGGER("getArrayC", apiLogEnabled, attrName); ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(bucketId, attrName, CpuReadWriteConfig(), suffix); setArrayDirty(arrayAndDirtyIndices); return arrayAndDirtyIndices.array; } inline const PathC* PathToAttributesMap::getPathArray(const Bucket& bucket) const { auto iter = attrNameSetToBucketId.find(bucket); bool found = (iter != attrNameSetToBucketId.end()); if (found) { BucketId bucketId = iter->second; const auto implPtr = buckets.find(bucketId); if (implPtr) { return reinterpret_cast<const PathC*>(implPtr->elemToPath.data()); } else { CARB_LOG_WARN_ONCE("Found in attrNameSetToBucketId, but didn't find BucketId %zu in buckets\n", size_t(bucketId)); } } CARB_LOG_ERROR("getPathArray: Bucket not found"); printBucket(bucket); std::cout << "\n"; CARB_LOG_INFO("Bucket list:"); printBucketNamesAndTypes(); return nullptr; } inline ConstPathCSpan PathToAttributesMap::getPathArray(BucketId bucketId) const { const auto implPtr = buckets.find(bucketId); if (implPtr) { const BucketImpl& bucketImpl = *implPtr; return { reinterpret_cast<const Path*>(bucketImpl.elemToPath.data()), bucketImpl.elemToPath.size() }; } else { CARB_LOG_WARN_ONCE("Found in attrNameSetToBucketId, but didn't find BucketId %zu in buckets\n", size_t(bucketId)); } return { nullptr, 0 }; } inline Bucket PathToAttributesMap::getNamesAndTypes(BucketId bucketId) const { auto implPtr = buckets.find(bucketId); if (implPtr) { const BucketImpl& bucketImpl = *implPtr; size_t maxCount = bucketImpl.scalarAttributeArrays.size(); set<flatcache::AttrNameAndType> bucket; bucket.reserve(maxCount); bucketImpl.scalarAttributeArrays.forEach([&bucket](const AttrName& name, const ScalarAttributeArray& array) { const TypeC& type = array.type; if (name.suffix == NameSuffix::none || name.suffix == NameSuffix::connection) { AttrNameAndType attrNameAndType; attrNameAndType.type = carb::flatcache::Type(type); attrNameAndType.name = name.name; attrNameAndType.suffix = name.suffix; bucket.insert(attrNameAndType); } }); bucketImpl.arrayAttributeArrays.forEach([&bucket](const AttrName& name, const ArrayAttributeArray& array) { const TypeC& type = array.values.type; if (name.suffix == NameSuffix::none || name.suffix == NameSuffix::connection) { AttrNameAndType attrNameAndType; attrNameAndType.type = Type(type); attrNameAndType.name = name.name; attrNameAndType.suffix = name.suffix; bucket.insert(attrNameAndType); } }); return bucket; } else { CARB_LOG_ERROR_ONCE("getNamesAndTypes, bucketId %zu not found\n", size_t(bucketId)); return set<AttrNameAndType>(); } } inline void PathToAttributesMap::checkInvariants() { for (auto& bucketAndId : attrNameSetToBucketId) { const Bucket& correctBucket = bucketAndId.first; BucketId bucketId = bucketAndId.second; Bucket candidateBucket = getNamesAndTypes(bucketId); if (candidateBucket.size() != correctBucket.size()) { CARB_BREAK_POINT(); } for (size_t i = 0; i != candidateBucket.size(); i++) { const AttrNameAndType& candidateNameAndType = candidateBucket.v[i]; const AttrNameAndType& correctNameAndType = correctBucket.v[i]; if (!(candidateNameAndType == correctNameAndType)) { std::stringstream ss; ss << "Candidate: " << Type(candidateNameAndType.type) << " " << Token(candidateNameAndType.name).getText() << toString(candidateNameAndType.suffix) << " " << " Correct: " << Type(correctNameAndType.type) << " " << Token(correctNameAndType.name).getText() << toString(correctNameAndType.suffix) << " " << "\n"; CARB_LOG_ERROR("%s", ss.str().c_str()); CARB_BREAK_POINT(); } } } } inline std::pair<bool, std::vector<AttrNameAndType>::const_iterator> findAttrNameAndType(const Bucket& bucket, const TokenC& attrName) { #if 0 // Do O(log n) search of bucket for attrName, ignoring type auto cmp = [](const AttrNameAndType& a, AttrNameAndTime b) { return a.name < b; }; auto i = lower_bound(bucket.begin(), bucket.end(), attrName, cmp); // There can be multiple elements with same attrName, so check them all while (i != bucket.end() && i->name == attrName && i->suffix != NameSuffix::none) i++; // At this point i is either at the end, or at the end of the elements with attrName, or pointing to an element with // suffix==none // If didn't get to the end, and didn't get to the end of the elements with attrName, then must be pointing to // attrName with suffix==none bool found = (i != bucket.end() && i->name == attrName); return found ? (i->tfType) : (pxr::TfType()); #else // Until we fix the order of the fields in the tuple to make equal attrNames contiguous, do a linear search auto i = bucket.begin(); while (i != bucket.end() && !(i->name == attrName && i->suffix == NameSuffix::none)) i++; bool found = (i != bucket.end()); return make_pair(found, i); #endif } inline TypeC PathToAttributesMap::getType(const Bucket& bucket, const TokenC& attrName) const { APILOGGER("getType", apiLogEnabled, attrName); std::vector<AttrNameAndType>::const_iterator pAttrNameAndType; bool found; std::tie(found, pAttrNameAndType) = findAttrNameAndType(bucket, attrName); return found ? TypeC(pAttrNameAndType->type) : TypeC(); } inline void PathToAttributesMap::addPath(const PathC& path, const Bucket& destBucket) { std::pair<BucketId, ArrayIndex> *pathAndBucketElem; if (pathToBucketElem.allocateEntry(path, &pathAndBucketElem)) { auto bucketIdAndImpl = findOrCreateBucket(destBucket); BucketId bucketId = bucketIdAndImpl.first; BucketImpl& bucketImpl = bucketIdAndImpl.second; bucketImpl.SetBucket(destBucket); size_t endElement = allocElement(bucketImpl); *pathAndBucketElem = std::make_pair(bucketId, endElement); bucketImpl.elemToPath[endElement] = { toSdfPath(path) }; } else { auto iter = attrNameSetToBucketId.find(destBucket); bool destBucketExists = (iter != attrNameSetToBucketId.end()); BucketId destBucketId = destBucketExists ? iter->second : kInvalidBucketId; BucketId currentBucketId = pathAndBucketElem->first; bool destBucketSpecified = (destBucket.size() != 0); if (!destBucketSpecified || (destBucketExists && destBucketId == currentBucketId)) { // If the dest bucket is not specified, or if already in the right // bucket, then leave path in current bucket return; } else if (destBucketSpecified && (destBucketId != currentBucketId)) { moveElementBetweenBuckets(path, destBucketId, currentBucketId, destBucket); } } } // renames a path in a bucket inline void PathToAttributesMap::renamePath(const PathC& oldPath, const PathC& newPath) { // TODO: should this early exit if oldPath == newPath? std::pair<BucketId, ArrayIndex> *oldPathAndBucketElem; if (pathToBucketElem.find(oldPath, &oldPathAndBucketElem)) { BucketImpl* bucketImplPtr = buckets.find(oldPathAndBucketElem->first); bucketImplPtr->elemToPath[oldPathAndBucketElem->second] = toSdfPath(newPath); std::pair<BucketId, ArrayIndex> *newPathAndBucketElem; pathToBucketElem.allocateEntry(newPath, &newPathAndBucketElem); *newPathAndBucketElem = std::move(*oldPathAndBucketElem); pathToBucketElem.freeEntry(oldPath); } else { CARB_LOG_WARN_ONCE("PathToAttributesMap::renamePath(%s,%s) - cannot find bucket to rename\n", Path(oldPath).getText(), Path(newPath).getText()); return; } } // present - Whether this path has a bucket // bucket - Pointer to bucket if it does // element - Index corresponding to path in this bucket's arrays inline std::tuple<bool, BucketId, size_t> PathToAttributesMap::getPresentAndBucketAndElement(const PathC& path) const { const std::pair<BucketId, ArrayIndex>* bucketElem; if (!pathToBucketElem.find(path, &bucketElem)) { return { false, kInvalidBucketId, 0 }; } return { true, bucketElem->first, bucketElem->second }; } inline BucketId PathToAttributesMap::getBucketId(const PathC& path) const { std::tuple<bool, flatcache::BucketId, size_t> presentAndBucketAndElement = getPresentAndBucketAndElement(path); bool present = std::get<0>(presentAndBucketAndElement); if (!present) return flatcache::kInvalidBucketId; return std::get<1>(presentAndBucketAndElement); } inline SpanC PathToAttributesMap::getArrayElementPtr(SpanC array, size_t bucketElement) const { if (array.ptr == nullptr) return { nullptr, 0, 0 }; size_t elemSize = array.elementSize; return { array.ptr + bucketElement * elemSize, 1, elemSize }; } inline ConstSpanC PathToAttributesMap::getArrayElementPtr(ConstSpanC array, size_t bucketElement) const { if (array.ptr == nullptr) return { nullptr, 0, 0 }; size_t elemSize = array.elementSize; return { array.ptr + bucketElement * elemSize, 1, elemSize }; } inline SpanC PathToAttributesMap::getAttributeC(const PathC& path, const TokenC& attrName, NameSuffix suffix) { APILOGGER("getAttributeC", apiLogEnabled, attrName); bool present; // Whether this path has a bucket BucketId bucketId; // Pointer to the bucket if it does size_t element; // Index corresponding to path in this bucket's arrays std::tie(present, bucketId, element) = getPresentAndBucketAndElement(path); if (!present) return { nullptr, 0, 0 }; ArrayAndDirtyIndices arrayAndchangedIndices = getArraySpanC(bucketId, attrName, CpuReadWriteConfig(), suffix); setArrayElementDirty(arrayAndchangedIndices, element); SpanC array = arrayAndchangedIndices.array; return getArrayElementPtr(array, element); } inline ConstSpanC PathToAttributesMap::getAttributeRdC(const PathC& path, const TokenC& attrName, NameSuffix suffix) { APILOGGER("getAttributeRdC", apiLogEnabled, attrName); bool present; BucketId bucketId; size_t element; std::tie(present, bucketId, element) = getPresentAndBucketAndElement(path); if (!present) return { nullptr, 0, 0 }; const ConstSpanC array = getArraySpanC(bucketId, attrName, CpuReadConfig(), suffix).array; // We don't set dirty indices here because this method gives read-only access return getArrayElementPtr(array, element); } inline SpanC PathToAttributesMap::getAttributeWrC(const PathC& path, const TokenC& attrName, NameSuffix suffix) { APILOGGER("getAttributeWrC", apiLogEnabled, path, attrName); bool present; BucketId bucketId; size_t element; std::tie(present, bucketId, element) = getPresentAndBucketAndElement(path); if (!present) return { nullptr, 0, 0 }; // Writing an element is a RMW on the whole array, so get read/write CPU access ArrayAndDirtyIndices arrayAndchangedIndices = getArraySpanC(bucketId, attrName, CpuReadIfValidWriteConfig(), suffix); setArrayElementDirty(arrayAndchangedIndices, element); SpanC array = arrayAndchangedIndices.array; return getArrayElementPtr(array, element); } inline SpanC PathToAttributesMap::setArrayAttributeSizeAndGet(PathC path, const TokenC& attrName, size_t newSize) { bool present; // Whether this path has a bucket BucketId bucketId; // Pointer to the bucket if it does size_t elementIndex; // Index corresponding to path in this bucket's arrays std::tie(present, bucketId, elementIndex) = getPresentAndBucketAndElement(path); if (!present) return { nullptr, 0, 0 }; return setArrayAttributeSizeAndGet(bucketId, elementIndex, attrName, newSize); } inline SpanC PathToAttributesMap::setArrayAttributeSizeAndGet( BucketId bucketId, size_t elementIndex, const TokenC& attrName, size_t newSize) { APILOGGER("setArrayAttributeSizeAndGet", apiLogEnabled, attrName); // TODO: remove double hash lookup here ArrayAndDirtyIndices sizeArray; { BucketImpl* bucketImpl; ArrayAttributeArray *arrayAttributeArray; if (findArrayAttributeArrayForBucketId(bucketId, attrName, bucketImpl, arrayAttributeArray)) { const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray); sizeArray = getArraySpanC(arrayAttributeArray->elemCounts, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CpuReadWriteConfig()); } else { sizeArray = { 0 }; } } if (sizeArray.array.elementCount <= elementIndex) return { nullptr, 0, 0 }; // Set the size size_t* sizePtr = reinterpret_cast<size_t*>(getArrayElementPtr(sizeArray.array, elementIndex).ptr); if (!sizePtr) return { nullptr, 0, 0 }; *sizePtr = newSize; // TODO: does this need to be moved higher next to getArraySpanC above? setArrayElementDirty(sizeArray, elementIndex); // Get the new array-valued element ArrayAndDirtyIndices arrayAndchangedIndices = getArraySpanC(bucketId, attrName, CpuReadWriteConfig(), NameSuffix::none); setArrayElementDirty(arrayAndchangedIndices, elementIndex); SpanC array = arrayAndchangedIndices.array; uint8_t** arrayData = reinterpret_cast<uint8_t**>(getArrayElementPtr(array, elementIndex).ptr); if (!arrayData) return { nullptr, 0, 0 }; return { *arrayData, newSize, 0 }; } template <typename T> T* PathToAttributesMap::getAttribute(const PathC& path, const TokenC& attrName) { APILOGGER("getAttribute", apiLogEnabled, path, attrName); // TODO: check that T is the correct type return reinterpret_cast<T*>(getAttributeC(path, attrName, NameSuffix::none).ptr); } template <typename T> const T* PathToAttributesMap::getAttributeRd(const PathC& path, const TokenC& attrName) { APILOGGER("getAttributeRd", apiLogEnabled, path, attrName); // TODO: check that T is the correct type return reinterpret_cast<const T*>(getAttributeRdC(path, attrName, NameSuffix::none).ptr); } template <typename T> T* PathToAttributesMap::getAttributeWr(const PathC& path, const TokenC& attrName) { APILOGGER("getAttributeWr", apiLogEnabled, path, attrName); // TODO: check that T is the correct type return reinterpret_cast<T*>(getAttributeWrC(path, attrName, NameSuffix::none).ptr); } template <typename T> T* PathToAttributesMap::getAttributeWr(const PathC& path, const TokenC& attrName, NameSuffix suffix) { APILOGGER("getAttributeWr", apiLogEnabled, path, attrName); // TODO: check that T is the correct type return reinterpret_cast<T*>(getAttributeWrC(path, attrName, suffix).ptr); } inline ValidMirrors PathToAttributesMap::getAttributeValidBits(const PathC& path, const TokenC& attrName, ArrayAttributeArray::MirroredArrays subArray) const { APILOGGER("getAttributeValidBits", apiLogEnabled, path, attrName); bool present; // Whether this path has a bucket BucketId bucketId; // Pointer to the bucket if it does size_t element; // Index corresponding to path in this bucket's arrays std::tie(present, bucketId, element) = getPresentAndBucketAndElement(path); if (!present) return ValidMirrors::eNone; const BucketImpl* bucketImplPtr = buckets.find(bucketId); if (!bucketImplPtr) return ValidMirrors::eNone; const MirroredArray *array = nullptr; const ScalarAttributeArray *scalarAttributeArray; const AttrName name{ attrName, NameSuffix::none }; if (bucketImplPtr->scalarAttributeArrays.find(name, &scalarAttributeArray)) { array = scalarAttributeArray; } else { const ArrayAttributeArray *arrayAttributeArray; if (bucketImplPtr->arrayAttributeArrays.find(name, &arrayAttributeArray)) { // This if statement is only needed because we ported OM-70434 fix from 105 to 104.2 // In 105, these mirrored arrays are stored in an array, indexed by subarray if (subArray == ArrayAttributeArray::MirroredArrays::Values) { array = &arrayAttributeArray->values; } else if (subArray == ArrayAttributeArray::MirroredArrays::ElemCounts) { array = &arrayAttributeArray->elemCounts; } else if (subArray == ArrayAttributeArray::MirroredArrays::CpuElemCounts) { array = &arrayAttributeArray->cpuElemCounts; } else if (subArray == ArrayAttributeArray::MirroredArrays::GpuElemCounts) { array = &arrayAttributeArray->gpuElemCounts; } else if (subArray == ArrayAttributeArray::MirroredArrays::GpuPtrs) { array = &arrayAttributeArray->gpuPtrs; } } else { return ValidMirrors::eNone; } } const size_t elemSize = array->typeinfo.size; const bool isTag = (elemSize == 0); if (isTag) return ValidMirrors::eNone; ValidMirrors retval = ValidMirrors::eNone; if (array->cpuValid) retval = retval | ValidMirrors::eCPU; if (array->gpuValid && array->gpuAllocedWithCuda) retval = retval | ValidMirrors::eCudaGPU; if (array->gpuValid && !array->gpuAllocedWithCuda) retval = retval | ValidMirrors::eGfxGPU; return retval; } inline bool PathToAttributesMap::findArrayAttributeArrayForPath(const PathC& path, const TokenC& attrName, size_t& outElementIndex, BucketImpl*& outBucketImpl, ArrayAttributeArray*& outArrayAttributeArray) { BucketId bucketId; bool found; std::tie(found, bucketId, outElementIndex) = getPresentAndBucketAndElement(path); if (found) { outBucketImpl = buckets.find(bucketId); if (outBucketImpl) { ArrayAttributeArray* array; if (outBucketImpl->arrayAttributeArrays.find({ attrName, NameSuffix::none }, &array)) { outArrayAttributeArray = array; return true; } } } CARB_LOG_WARN_ONCE("Warning: %s not found\n", toTfToken(attrName).GetText()); return false; } inline bool PathToAttributesMap::findArrayAttributeArrayForBucketId(const BucketId bucketId, const TokenC& attrName, BucketImpl*& outBucketImpl, ArrayAttributeArray*& outArrayAttributeArray) { if (bucketId != kInvalidBucketId) { outBucketImpl = buckets.find(bucketId); if (outBucketImpl) { ArrayAttributeArray* array; if (outBucketImpl->arrayAttributeArrays.find({ attrName, NameSuffix::none }, &array)) { outArrayAttributeArray = array; return true; } } } CARB_LOG_WARN_ONCE("Warning: %s not found\n", toTfToken(attrName).GetText()); return false; } inline size_t* PathToAttributesMap::getArrayAttributeSize(const PathC& path, const TokenC& attrName) { APILOGGER("getArrayAttributeSize", apiLogEnabled, path, attrName); size_t element; BucketImpl* bucketImpl; ArrayAttributeArray *arrayAttributeArray; if (findArrayAttributeArrayForPath(path, attrName, element, bucketImpl, arrayAttributeArray)) { const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray); ArrayAndDirtyIndices arrayAndchangedIndices = getArraySpanC(arrayAttributeArray->elemCounts, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CpuReadWriteConfig()); setArrayElementDirty(arrayAndchangedIndices, element); const SpanC array = arrayAndchangedIndices.array; return reinterpret_cast< size_t*>(getArrayElementPtr(array, element).ptr); } return nullptr; } inline const size_t* PathToAttributesMap::getArrayAttributeSizeRd(const PathC& path, const TokenC& attrName) { APILOGGER("getArrayAttributeSizeRd", apiLogEnabled, path, attrName); size_t element; BucketImpl* bucketImpl; ArrayAttributeArray *arrayAttributeArray; if (findArrayAttributeArrayForPath(path, attrName, element, bucketImpl, arrayAttributeArray)) { const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray); const ArrayAndDirtyIndices arrayAndchangedIndices = getArraySpanC(arrayAttributeArray->elemCounts, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CpuReadConfig()); const ConstSpanC array = arrayAndchangedIndices.array; return reinterpret_cast<const size_t*>(getArrayElementPtr(array, element).ptr); } return nullptr; } inline size_t* PathToAttributesMap::getArrayAttributeSizeWr(const PathC& path, const TokenC& attrName) { APILOGGER("getArrayAttributeSizeWr", apiLogEnabled, path, attrName); size_t element; BucketImpl* bucketImpl; ArrayAttributeArray *arrayAttributeArray; if (findArrayAttributeArrayForPath(path, attrName, element, bucketImpl, arrayAttributeArray)) { const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray); ArrayAndDirtyIndices arrayAndchangedIndices = getArraySpanC(arrayAttributeArray->elemCounts, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CpuReadIfValidWriteConfig()); setArrayElementDirty(arrayAndchangedIndices, element); const SpanC array = arrayAndchangedIndices.array; return reinterpret_cast<size_t*>(getArrayElementPtr(array, element).ptr); } return nullptr; } inline const size_t* PathToAttributesMap::getArrayAttributeSizeRdGpu(const PathC& path, const TokenC& attrName) { APILOGGER("getArrayAttributeSizeRdGpu", apiLogEnabled, path, attrName); size_t element; BucketImpl* bucketImpl; ArrayAttributeArray *arrayAttributeArray; if (findArrayAttributeArrayForPath(path, attrName, element, bucketImpl, arrayAttributeArray)) { const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray); const ArrayAndDirtyIndices arrayAndchangedIndices = getArraySpanC(arrayAttributeArray->gpuElemCounts, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CudaReadConfig()); const ConstSpanC array = arrayAndchangedIndices.array; return reinterpret_cast<const size_t*>(getArrayElementPtr(array, element).ptr); } return nullptr; } inline const size_t* PathToAttributesMap::getArrayAttributeSizesRdGpu(const Bucket& bucket, const TokenC& attrName) { APILOGGER("getArrayAttributeSizesRdGpu", apiLogEnabled, attrName); BucketImpl* bucketImpl; ArrayAttributeArray *arrayAttributeArray; const BucketId bucketId = findBucketId(bucket); if (findArrayAttributeArrayForBucketId(bucketId, attrName, bucketImpl, arrayAttributeArray)) { const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray); const ArrayAndDirtyIndices arrayAndchangedIndices = getArraySpanC(arrayAttributeArray->gpuElemCounts, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CudaReadConfig()); const ConstSpanC array = arrayAndchangedIndices.array; return reinterpret_cast<const size_t*>(array.ptr); } return nullptr; } inline ConstSpanSizeC PathToAttributesMap::getArrayAttributeSizesRdGpu(BucketId bucketId, const TokenC& attrName) { APILOGGER("getArrayAttributeSizesRdGpu", apiLogEnabled, attrName); BucketImpl* bucketImpl; ArrayAttributeArray *arrayAttributeArray; if (findArrayAttributeArrayForBucketId(bucketId, attrName, bucketImpl, arrayAttributeArray)) { const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray); const ArrayAndDirtyIndices arrayAndchangedIndices = getArraySpanC(arrayAttributeArray->gpuElemCounts, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CudaReadConfig()); const ConstSpanC array = arrayAndchangedIndices.array; return { reinterpret_cast<const size_t*>(array.ptr), array.elementCount }; } return { nullptr, 0 }; } inline size_t* PathToAttributesMap::getArrayAttributeSizes(const Bucket& bucket, const TokenC& attrName) { APILOGGER("getArrayAttributeSizes", apiLogEnabled, attrName); BucketImpl* bucketImpl; ArrayAttributeArray *arrayAttributeArray; const BucketId bucketId = findBucketId(bucket); if (findArrayAttributeArrayForBucketId(bucketId, attrName, bucketImpl, arrayAttributeArray)) { const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray); ArrayAndDirtyIndices arrayAndchangedIndices = getArraySpanC(arrayAttributeArray->elemCounts, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CpuReadWriteConfig()); setArrayDirty(arrayAndchangedIndices); const SpanC array = arrayAndchangedIndices.array; return reinterpret_cast<size_t*>(array.ptr); } return nullptr; } inline SpanSizeC PathToAttributesMap::getArrayAttributeSizes(BucketId bucketId, const TokenC& attrName) { APILOGGER("getArrayAttributeSizes", apiLogEnabled, attrName); BucketImpl* bucketImpl; ArrayAttributeArray *arrayAttributeArray; if (findArrayAttributeArrayForBucketId(bucketId, attrName, bucketImpl, arrayAttributeArray)) { const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray); ArrayAndDirtyIndices arrayAndchangedIndices = getArraySpanC(arrayAttributeArray->elemCounts, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CpuReadWriteConfig()); setArrayDirty(arrayAndchangedIndices); const SpanC array = arrayAndchangedIndices.array; CARB_ASSERT(array.elementSize == sizeof(size_t)); return { reinterpret_cast<size_t*>(array.ptr), array.elementCount }; } return { 0, 0 }; } inline ConstSpanSizeC PathToAttributesMap::getArrayAttributeSizesRd(BucketId bucketId, const TokenC& attrName) { APILOGGER("getArrayAttributeSizesRd", apiLogEnabled, attrName); BucketImpl* bucketImpl; ArrayAttributeArray *arrayAttributeArray; if (findArrayAttributeArrayForBucketId(bucketId, attrName, bucketImpl, arrayAttributeArray)) { const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray); ArrayAndDirtyIndices arrayAndchangedIndices = getArraySpanC(arrayAttributeArray->elemCounts, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CpuReadConfig()); const ConstSpanC array = arrayAndchangedIndices.array; CARB_ASSERT(array.elementSize == sizeof(size_t)); return { reinterpret_cast<const size_t*>(array.ptr), array.elementCount }; } return { 0, 0 }; } inline SpanSizeC PathToAttributesMap::getArrayAttributeSizesWr(BucketId bucketId, const TokenC& attrName) { APILOGGER("getArrayAttributeSizesWr", apiLogEnabled, attrName); BucketImpl* bucketImpl; ArrayAttributeArray *arrayAttributeArray; if (findArrayAttributeArrayForBucketId(bucketId, attrName, bucketImpl, arrayAttributeArray)) { const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray); ArrayAndDirtyIndices arrayAndchangedIndices = getArraySpanC(arrayAttributeArray->elemCounts, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CpuWriteConfig()); setArrayDirty(arrayAndchangedIndices); const SpanC array = arrayAndchangedIndices.array; CARB_ASSERT(array.elementSize == sizeof(size_t)); return { reinterpret_cast<size_t*>(array.ptr), array.elementCount }; } return { 0, 0 }; } inline ArrayPointersAndSizesC PathToAttributesMap::getArrayAttributeArrayWithSizes(BucketId bucketId, const TokenC& attrName) { APILOGGER("getArrayAttributeArrayWithSizes", apiLogEnabled, attrName); BucketImpl* bucketImpl; ArrayAttributeArray *arrayAttributeArray; if (findArrayAttributeArrayForBucketId(bucketId, attrName, bucketImpl, arrayAttributeArray)) { const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray); SpanC spanOfPointers; { ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(arrayAttributeArray->values, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CpuReadWriteConfig()); setArrayDirty(arrayAndDirtyIndices); spanOfPointers = arrayAndDirtyIndices.array; } ConstSpanC spanOfSizes; { const ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(arrayAttributeArray->elemCounts, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CpuReadConfig()); spanOfSizes = arrayAndDirtyIndices.array; } CARB_ASSERT(spanOfPointers.elementSize == sizeof(uint8_t*)); CARB_ASSERT(spanOfSizes.elementSize == sizeof(size_t)); CARB_ASSERT(spanOfPointers.elementCount == spanOfSizes.elementCount); return { reinterpret_cast<uint8_t* const*>(spanOfPointers.ptr), reinterpret_cast<const size_t*>(spanOfSizes.ptr), spanOfPointers.elementCount }; } return ArrayPointersAndSizesC{ 0, 0, 0 }; } inline ConstArrayPointersAndSizesC PathToAttributesMap::getArrayAttributeArrayWithSizesRd(BucketId bucketId, const TokenC& attrName) { APILOGGER("getArrayAttributeArrayWithSizesRd", apiLogEnabled, attrName); BucketImpl* bucketImpl; ArrayAttributeArray *arrayAttributeArray; if (findArrayAttributeArrayForBucketId(bucketId, attrName, bucketImpl, arrayAttributeArray)) { const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray); ConstSpanC spanOfPointers; { const ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(arrayAttributeArray->values, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CpuReadConfig()); spanOfPointers = arrayAndDirtyIndices.array; } ConstSpanC spanOfSizes; { const ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(arrayAttributeArray->elemCounts, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CpuReadConfig()); spanOfSizes = arrayAndDirtyIndices.array; } CARB_ASSERT(spanOfPointers.elementSize == sizeof(uint8_t*)); CARB_ASSERT(spanOfSizes.elementSize == sizeof(size_t)); CARB_ASSERT(spanOfPointers.elementCount == spanOfSizes.elementCount); return { reinterpret_cast<uint8_t* const*>(spanOfPointers.ptr), reinterpret_cast<const size_t*>(spanOfSizes.ptr), spanOfPointers.elementCount }; } return ConstArrayPointersAndSizesC{ 0, 0, 0 }; } inline ArrayPointersAndSizesC PathToAttributesMap::getArrayAttributeArrayWithSizesWr(BucketId bucketId, const TokenC& attrName) { APILOGGER("getArrayAttributeArrayWithSizesWr", apiLogEnabled, attrName); BucketImpl* bucketImpl; ArrayAttributeArray *arrayAttributeArray; if (findArrayAttributeArrayForBucketId(bucketId, attrName, bucketImpl, arrayAttributeArray)) { const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray); SpanC spanOfPointers; { ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(arrayAttributeArray->values, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CpuReadWriteConfig()); setArrayDirty(arrayAndDirtyIndices); spanOfPointers = arrayAndDirtyIndices.array; } ConstSpanC spanOfSizes; { const ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(arrayAttributeArray->elemCounts, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CpuReadConfig()); spanOfSizes = arrayAndDirtyIndices.array; } CARB_ASSERT(spanOfPointers.elementSize == sizeof(uint8_t*)); CARB_ASSERT(spanOfSizes.elementSize == sizeof(size_t)); CARB_ASSERT(spanOfPointers.elementCount == spanOfSizes.elementCount); return { reinterpret_cast<uint8_t* const*>(spanOfPointers.ptr), reinterpret_cast<const size_t*>(spanOfSizes.ptr), spanOfPointers.elementCount }; } return ArrayPointersAndSizesC{ 0, 0, 0 }; } inline const size_t* PathToAttributesMap::getArrayAttributeSizesRd(const Bucket& bucket, const TokenC& attrName) { APILOGGER("getArrayAttributeSizesRd", apiLogEnabled, attrName); const BucketId bucketId = findBucketId(bucket); BucketImpl* bucketImpl; ArrayAttributeArray *arrayAttributeArray; if (findArrayAttributeArrayForBucketId(bucketId, attrName, bucketImpl, arrayAttributeArray)) { const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray); const ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(arrayAttributeArray->elemCounts, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CpuReadConfig()); return reinterpret_cast<const size_t*>(arrayAndDirtyIndices.array.ptr); } return nullptr; } inline size_t* PathToAttributesMap::getArrayAttributeSizesWr(const Bucket& bucket, const TokenC& attrName) { APILOGGER("getArrayAttributeSizesWr", apiLogEnabled, attrName); const BucketId bucketId = findBucketId(bucket); BucketImpl* bucketImpl; ArrayAttributeArray *arrayAttributeArray; if (findArrayAttributeArrayForBucketId(bucketId, attrName, bucketImpl, arrayAttributeArray)) { const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray); ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(arrayAttributeArray->elemCounts, AttrName{ attrName, NameSuffix::none }, aoa, *bucketImpl, CpuWriteConfig()); setArrayDirty(arrayAndDirtyIndices); return reinterpret_cast<size_t*>(arrayAndDirtyIndices.array.ptr); } return nullptr; } // Intersect set<AttrNameAndType> and set<AttrNameAndType> comparing type, // name and suffix, and ignoring tfType inline void set_intersection2(set<AttrNameAndType>::const_iterator first1, set<AttrNameAndType>::const_iterator last1, set<AttrNameAndType>::const_iterator first2, set<AttrNameAndType>::const_iterator last2, std::back_insert_iterator<std::vector<AttrNameAndType>> d_first) { // Note that in the name comparisons below TokenC masks off USD's lifetime bit. // For example, tokens created from the same string are considered equal even // if one was created with finite lifetime and the other infinite lifetime. auto comp1 = [](const AttrNameAndType& a, const AttrNameAndType& b) { if (TypeC(a.type) < TypeC(b.type)) return true; if (TypeC(b.type) < TypeC(a.type)) return false; if (TokenC(a.name) < TokenC(b.name)) return true; if (TokenC(b.name) < TokenC(a.name)) return false; return a.suffix < b.suffix; }; auto comp2 = [](const AttrNameAndType& a, const AttrNameAndType& b) { if (TypeC(a.type) < TypeC(b.type)) return true; if (TypeC(b.type) < TypeC(a.type)) return false; if (TokenC(a.name) < TokenC(b.name)) return true; if (TokenC(b.name) < TokenC(a.name)) return false; return a.suffix < b.suffix; }; while (first1 != last1 && first2 != last2) { if (comp1(*first1, *first2)) { ++first1; } else { if (!comp2(*first2, *first1)) { *d_first++ = *first1++; } ++first2; } } } inline flatcache::set<BucketId> PathToAttributesMap::findBuckets(const set<AttrNameAndType>& all, const set<AttrNameAndType>& any, const set<AttrNameAndType>& none) const { flatcache::set<BucketId> retval; retval.reserve(256); // TODO: Do this in a less brute-force way for (auto& bucketAndId : attrNameSetToBucketId) { const Bucket& bucketTypes = bucketAndId.first; BucketId bucketId = bucketAndId.second; bool bucketEmpty = getElementCount(bucketId) == 0; if (bucketEmpty) continue; std::vector<AttrNameAndType> allTypesPresent; std::vector<AttrNameAndType> anyTypesPresent; std::vector<AttrNameAndType> noneTypesPresent; set_intersection2( all.begin(), all.end(), bucketTypes.begin(), bucketTypes.end(), std::back_inserter(allTypesPresent)); set_intersection2( any.begin(), any.end(), bucketTypes.begin(), bucketTypes.end(), std::back_inserter(anyTypesPresent)); set_intersection2( none.begin(), none.end(), bucketTypes.begin(), bucketTypes.end(), std::back_inserter(noneTypesPresent)); bool allOfAllTypesPresent = (allTypesPresent.size() == all.size()); bool oneOfAnyTypesPresent = (any.size() == 0) || (anyTypesPresent.size() != 0); bool noneOfNoneTypesPresent = (noneTypesPresent.size() == 0); if (allOfAllTypesPresent && oneOfAnyTypesPresent && noneOfNoneTypesPresent) { retval.v.push_back(bucketId); } } // Sort the vector to make it a flatcache::set std::sort(retval.begin(), retval.end()); return retval; } inline std::tuple<BucketId, ArrayIndex> PathToAttributesMap::getBucketAndArrayIndex(const PathC& path) const { const std::pair<BucketId, ArrayIndex>* bucketAndElem; if (pathToBucketElem.find(path, &bucketAndElem)) { const BucketId& bucketId = bucketAndElem->first; const ArrayIndex& arrayIndex = bucketAndElem->second; return { bucketId, arrayIndex }; } else { // Commenting out the error in 104.2 as there is no hasPrim API in 104.2 yet // and FabricSD needs to check for existence of a prim without causing an error to be logged // CARB_LOG_ERROR_ONCE("getBucketAndArrayIndex called on non-existent path '%s'\n", Path(path).getText()); return { kInvalidBucketId, kInvalidArrayIndex }; } } inline Bucket PathToAttributesMap::getTypes(const PathC& path) const { const std::pair<BucketId, ArrayIndex>* bucketAndElem; if (pathToBucketElem.find(path, &bucketAndElem)) { const BucketImpl* bucketImpl = buckets.find(bucketAndElem->first); if (bucketImpl) { return bucketImpl->GetBucket(); } } CARB_LOG_WARN_ONCE("getTypes called on non-existent path %s\n", Path(path).getText()); return Bucket(); } inline size_t PathToAttributesMap::getAttributeCount(const PathC& path) const { const std::pair<BucketId, ArrayIndex>* bucketAndElem; if (pathToBucketElem.find(path, &bucketAndElem)) { const BucketImpl* bucketImpl = buckets.find(bucketAndElem->first); if (bucketImpl) { return bucketImpl->GetBucket().size(); } } CARB_LOG_ERROR_ONCE("getAttributeCount called on non-existent path %s\n", Path(path).getText()); return 0; } inline TypeC PathToAttributesMap::getType(const PathC& path, const TokenC& attrName) const { APILOGGER("getType", apiLogEnabled, path, attrName); const std::pair<BucketId, ArrayIndex>* bucketAndElem; if (!pathToBucketElem.find(path, &bucketAndElem)) { CARB_LOG_WARN_ONCE("getTfType called on non-existent path %s\n", Path(path).getText()); return kUnknownType; } const BucketId &bucketId = bucketAndElem->first; const BucketImpl* bucketImplPtr = buckets.find(bucketId); if (!bucketImplPtr) { CARB_LOG_WARN_ONCE( "getTfType called on non-existent bucket pathAndBucketElem is broken %s\n", Path(path).getText()); return kUnknownType; } const AttrName name{ attrName, NameSuffix::none }; const ScalarAttributeArray *scalarAttributeArray; if (bucketImplPtr->scalarAttributeArrays.find(name, &scalarAttributeArray)) { return scalarAttributeArray->type; } else { const ArrayAttributeArray *arrayAttributeArray; if (bucketImplPtr->arrayAttributeArrays.find(name, &arrayAttributeArray)) { return arrayAttributeArray->values.type; } } CARB_LOG_WARN_ONCE("getType called on non-existent attribute %s %s\n", Path(path).getText(), Token(attrName).getText()); return kUnknownType; } // Return 1 if attribute is present at path, 0 otherwise inline size_t PathToAttributesMap::count(const PathC& path, const TokenC& attrName) const { bool present; // Whether this path has a bucket BucketId bucketId; // Pointer to the bucket if it does size_t element; // Index corresponding to path in this bucket's arrays std::tie(present, bucketId, element) = getPresentAndBucketAndElement(path); if (!present) { return 0; } const BucketImpl* bucketImplPtr = buckets.find(bucketId); if (!bucketImplPtr) return 0; const AttrName name{ attrName, NameSuffix::none }; const ScalarAttributeArray *scalarAttributeArray; if (bucketImplPtr->scalarAttributeArrays.find(name, &scalarAttributeArray)) { return 1; } else { const ArrayAttributeArray *arrayAttributeArray; if (bucketImplPtr->arrayAttributeArrays.find(name, &arrayAttributeArray)) { return 1; } } return 0; } inline void PathToAttributesMap::moveElementScalarData(ScalarAttributeArray &destArray, const size_t destElemIndex, const ScalarAttributeArray &srcArray, const size_t srcElemIndex) { if (srcArray.type != destArray.type) { return; } const Typeinfo& typeinfo = srcArray.typeinfo; const size_t size = typeinfo.size; const bool isArray = typeinfo.isArray; // Ideally usdValid would be driven by the change tracker // however I can't use that until we have a way to tie into that // Ideally there would be a subsscriber for "copying back to USD" // and that could be parsed to see if an element in invalid. At this point // we just have to do it per-attribute since that is as fine-grained as we // have data right now. This happens in moveElement to avoid having to repeat // logic about matching attriubte arrays that the function already does if (!srcArray.usdValid) { destArray.usdValid = srcArray.usdValid; } // // In the case where this is the first element in the new bucket // then the validity of the data needs to be moved from the old // bucket. // if (destElemIndex == 0) { destArray.cpuValid = srcArray.cpuValid; destArray.gpuValid = srcArray.gpuValid; } if (destArray.cpuValid && !srcArray.cpuValid) { // This should not happen because of makeSrcValidIfDestValid CARB_LOG_ERROR_ONCE("Invalid state while moving element: srcArray.cpuValid=%i destArray.cpuValid=%i", srcArray.cpuValid, destArray.cpuValid); assert(false); } if (destArray.gpuValid && !srcArray.gpuValid) { // This should not happen because of makeSrcValidIfDestValid CARB_LOG_ERROR_ONCE("Invalid state while moving element: srcArray.gpuValid=%i destArray.gpuValid=%i", srcArray.gpuValid, destArray.gpuValid); assert(false); } // // As noted above the validity of the src was already matched to the destinations needs // in the case, where for example the src has valid but the dest doesn't that is ok // but we do still move data, because in the case of array-of-array you get to at least // avoid another malloc. // if (srcArray.cpuValid || isArray) { uint8_t* destArrayData = destArray.cpuData(); uint8_t* destPtr = destArrayData + destElemIndex * size; const uint8_t* srcArrayData = srcArray.cpuData(); const uint8_t* srcPtr = srcArrayData + srcElemIndex * size; memcpy(destPtr, srcPtr, size); } if (srcArray.gpuValid) { if (isArray) { if (srcArray.gpuCapacity) { destArray.resizeGpu(platform.gpuCuda, platform.gpuCudaCtx, srcArray.gpuCapacity, typeinfo.size); platform.gpuCuda->memcpyAsync(*platform.gpuCudaCtx, destArray.gpuArray, srcArray.gpuArray, srcArray.gpuCapacity, omni::gpucompute::MemcpyKind::deviceToDevice); } } else { uint8_t* destArrayData = destArray.gpuArray; uint8_t* destPtr = destArrayData + destElemIndex * size; const uint8_t* srcArrayData = srcArray.gpuArray; const uint8_t* srcPtr = srcArrayData + srcElemIndex * size; platform.gpuCuda->memcpyAsync(*platform.gpuCudaCtx, destPtr, srcPtr, size, omni::gpucompute::MemcpyKind::deviceToDevice); } destArray.gpuAllocedWithCuda = true; } } inline void PathToAttributesMap::moveElementArrayData(ArrayAttributeArray &destArray, const size_t destElemIndex, const ArrayAttributeArray &srcArray, const size_t srcElemIndex) { static_assert(sizeof(ArrayAttributeArray) == 5 * sizeof(MirroredArray), "ArrayAttributeArray has unexpected size"); moveElementScalarData(destArray.values, destElemIndex, srcArray.values, srcElemIndex); moveElementScalarData(destArray.elemCounts, destElemIndex, srcArray.elemCounts, srcElemIndex); moveElementScalarData(destArray.cpuElemCounts, destElemIndex, srcArray.cpuElemCounts, srcElemIndex); moveElementScalarData(destArray.gpuElemCounts, destElemIndex, srcArray.gpuElemCounts, srcElemIndex); moveElementScalarData(destArray.gpuPtrs, destElemIndex, srcArray.gpuPtrs, srcElemIndex); } inline void PathToAttributesMap::moveElement(BucketImpl& destBucket, size_t destElemIndex, BucketImpl& srcBucket, size_t srcElemIndex) { srcBucket.scalarAttributeArrays.forEach([this, &destBucket, &destElemIndex, &srcElemIndex](const AttrName& name, const ScalarAttributeArray& srcArray) { // If bucket move is due to removal then one destTupleIndex will be // invalid. So we check for invalid here ScalarAttributeArray *destArray; if (destBucket.scalarAttributeArrays.find(name, &destArray)) { moveElementScalarData(*destArray, destElemIndex, srcArray, srcElemIndex); } }); srcBucket.arrayAttributeArrays.forEach([this, &destBucket, &destElemIndex, &srcElemIndex](const AttrName& name, const ArrayAttributeArray& srcArray) { // If bucket move is due to removal then one destTupleIndex will be // invalid. So we check for invalid here ArrayAttributeArray *destArray; if (destBucket.arrayAttributeArrays.find(name, &destArray)) { moveElementArrayData(*destArray, destElemIndex, srcArray, srcElemIndex); } }); destBucket.elemToPath[destElemIndex] = std::move(srcBucket.elemToPath[srcElemIndex]); } inline void PathToAttributesMap::destroyElement(BucketId bucketId, size_t elemIndex, bool destroyDataPointedTo) { BucketImpl* srcBucketImplPtr = buckets.find(bucketId); if (!srcBucketImplPtr) return; // nothing to delete BucketImpl& srcBucketImpl = *srcBucketImplPtr; size_t elemCount = PathToAttributesMap::getElementCount(srcBucketImpl); if (elemCount == 0) return; // nothing to delete if (destroyDataPointedTo) { // Destruct element about to be overwritten srcBucketImpl.arrayAttributeArrays.forEach([this, &elemIndex](const AttrName& name, ArrayAttributeArray& array) { // If a CPU array has been allocated, delete it uint8_t** arrayCpuPtrArray = reinterpret_cast<uint8_t**>(array.values.cpuData()); uint8_t*& cpuPtrToDelete = arrayCpuPtrArray[elemIndex]; if (cpuPtrToDelete) { if (!USE_PINNED_MEMORY || !platform.gpuCuda) { free(cpuPtrToDelete); } else if (platform.gpuCuda) { platform.gpuCuda->freeHost(*platform.gpuCudaCtx, cpuPtrToDelete); } cpuPtrToDelete = nullptr; } // If a GPU array has been allocated, delete it uint8_t** arrayGpuPtrArray = reinterpret_cast<uint8_t**>(array.gpuPtrs.cpuData()); uint8_t*& gpuPtrToDelete = arrayGpuPtrArray[elemIndex]; if (gpuPtrToDelete) { platform.gpuCuda->free(*platform.gpuCudaCtx, gpuPtrToDelete); gpuPtrToDelete = nullptr; } size_t* arrayCpuCapacityArray = reinterpret_cast<size_t*>(array.cpuElemCounts.cpuData()); arrayCpuCapacityArray[elemIndex] = 0; size_t* arrayGpuCapacityArray = reinterpret_cast<size_t*>(array.gpuElemCounts.cpuData()); arrayGpuCapacityArray[elemIndex] = 0; size_t* arraySizeArray = reinterpret_cast<size_t*>(array.elemCounts.cpuData()); arraySizeArray[elemIndex] = 0; }); } // Copy last element to element to be deleted PathC movedElemPath; size_t lastElemIndex = elemCount - 1; bool deletingLastElement = (elemIndex == lastElemIndex); // If bucket has more than one element, move last element to deleted element if (!deletingLastElement) { moveElement(srcBucketImpl, elemIndex, srcBucketImpl, lastElemIndex); movedElemPath = asInt(srcBucketImpl.elemToPath[elemIndex]); // For all attributes, dirty[elemIndex] := dirty[lastElemIndex] srcBucketImpl.listenerIdToChanges.forEach([&elemIndex, &lastElemIndex, &elemCount](const ListenerId& listenerId, Changes& changes) { size_t trackedAttrCount = changes.changedAttributes.size(); for (size_t i = 0; i != trackedAttrCount; i++) { ChangedIndicesImpl& changedIndices = changes.changedIndices[i]; if (changedIndices.contains(lastElemIndex) && !changedIndices.contains(elemIndex)) { changedIndices.insert(elemIndex, elemCount - 1); } else if (!changedIndices.contains(lastElemIndex) && changedIndices.contains(elemIndex)) { changedIndices.erase(elemIndex, elemCount - 1); } } }); } // Remove last element from change tracker srcBucketImpl.listenerIdToChanges.forEach([&elemCount](const ListenerId& listenerId, Changes& changes) { size_t trackedAttrCount = changes.changedAttributes.size(); for (size_t i = 0; i != trackedAttrCount; i++) { ChangedIndicesImpl& changedIndices = changes.changedIndices[i]; changedIndices.decrementN(elemCount - 1); } }); { const auto removeLastElementFromMirroredArray = [this](MirroredArray& array) { if (array.count > 0) { const size_t newSize = array.size() - array.typeinfo.size; array.count--; array.resize(newSize); } if (array.gpuCapacity != 0) { array.resizeGpu(platform.gpuCuda, platform.gpuCudaCtx, array.size(), array.typeinfo.size); } }; // Reduce element count srcBucketImpl.scalarAttributeArrays.forEach([this, &removeLastElementFromMirroredArray](const AttrName& name, ScalarAttributeArray& array) { removeLastElementFromMirroredArray(array); }); srcBucketImpl.arrayAttributeArrays.forEach([this, &removeLastElementFromMirroredArray](const AttrName& name, ArrayAttributeArray& array) { removeLastElementFromMirroredArray(array.values); removeLastElementFromMirroredArray(array.elemCounts); removeLastElementFromMirroredArray(array.cpuElemCounts); removeLastElementFromMirroredArray(array.gpuElemCounts); removeLastElementFromMirroredArray(array.gpuPtrs); }); } srcBucketImpl.elemToPath.pop_back(); // If bucket has more than one element, remap path that pointed to last element if (!deletingLastElement) { std::pair<BucketId, ArrayIndex>* movedBucketAndElemIndex; if (pathToBucketElem.find(movedElemPath, &movedBucketAndElemIndex)) { movedBucketAndElemIndex->second = elemIndex; } else { CARB_LOG_ERROR_ONCE("destroyElement attempted to re-index missing path %s\n", Path(movedElemPath).getText()); } } // Update change trackers // We allocate them lazily, so we have to iterate over listenerIdToChangeTrackerConfig // then allocate bucketImpl.listenerIdToChanges if necessary auto bucketImpl = buckets.find(bucketId); if (bucketImpl) { listenerIdToChangeTrackerConfig.forEach([this, &bucketImpl, &elemIndex, &lastElemIndex, &deletingLastElement](ListenerId& listenerId, ChangeTrackerConfig& config) { if (config.changeTrackingEnabled) { // Allocate changes if necessary Changes* changes; if (bucketImpl->listenerIdToChanges.allocateEntry(listenerId, &changes)) { new (changes) Changes(); } // // Since we may be moving within a bucket we need to inform the change tracker // if (!deletingLastElement) { if (changes->addedIndices.contains(lastElemIndex)) { // only need to track that it moved if we already // cared about it changes->addNewPrim(elemIndex); } changes->removePrim(lastElemIndex); } else { changes->removePrim(elemIndex); } } }); } } inline void PathToAttributesMap::moveElementBetweenBuckets(const PathC& path, BucketId destBucketId, BucketId srcBucketId, const Bucket& destBucket) { if (destBucketId == srcBucketId) return; // Get source BucketImpl BucketImpl* srcPtr = buckets.find(srcBucketId); if (!srcPtr) { CARB_LOG_ERROR("moveElementBetweenBuckets failed to move path \"%s\" to from bucketId %zu to bucketId %zu, could not find source bucket\n", Path(path).getText(), size_t(srcBucketId), size_t(destBucketId)); return; } // Get bucket and elem index std::pair<BucketId, ArrayIndex>* srcBucketAndElemIndex; if (!pathToBucketElem.find(path, &srcBucketAndElemIndex)) { CARB_LOG_ERROR_ONCE("moveElementBetweenBuckets failed to move path \"%s\" to from bucketId %zu to bucketId %zu, could not find path\n", Path(path).getText(), size_t(srcBucketId), size_t(destBucketId)); return; } // Get dest BucketImpl BucketImpl* destPtr = buckets.find(destBucketId); if (!destPtr) { CARB_LOG_ERROR("moveElementBetweenBuckets failed to move path \"%s\" to from bucketId %zu to bucketId %zu, could not find destination bucket\n", Path(path).getText(), size_t(srcBucketId), size_t(destBucketId)); return; } size_t destElemIndex = PathToAttributesMap::getElementCount(*destPtr); // Allocate element in new bucket allocElementForMove(*destPtr, *srcPtr, path); // Copy values from src to dest // // Ideally usdValid would be driven by the change tracker // however I can't use that until we have a way to tie into that // Ideally there would be a subsscriber for "copying back to USD" // and that could be parsed to see if an element in invalid. At this point // we just have to do it per-attribute since that is as fine-grained as we // have data right now. This happens in moveElement to avoid having to repeat // logic about matching attriubte arrays that the function already does moveElement(*destPtr, destElemIndex, *srcPtr, srcBucketAndElemIndex->second); // Delete element in old bucket // Don't destroy data pointed to, because we want dest element to point to it const bool destroyDataPointedTo = false; destroyElement(srcBucketId, srcBucketAndElemIndex->second, destroyDataPointedTo); // Map path to new bucket *srcBucketAndElemIndex = std::make_pair(destBucketId, destElemIndex); // Convert destBucket to a set<AttrNameAndType> set<AttrNameAndType> destBucket_v2; destBucket_v2.v.resize(destBucket.size()); for (size_t i = 0; i != destBucket.size(); i++) { destBucket_v2.v[i] = AttrNameAndType(Type(destBucket.v[i].type), Token(destBucket.v[i].name), destBucket.v[i].suffix); } // Copy dirty bits to new bucket srcPtr->listenerIdToChanges.forEach([&destPtr, &destBucket_v2, &destElemIndex](const ListenerId &listener, const Changes& srcChanges) { // Create if listenerId doesn't exist on dest bucket Changes* destChanges; if (destPtr->listenerIdToChanges.allocateEntry(listener, &destChanges)) { new (destChanges) Changes(); } size_t changedAttrCount = srcChanges.changedAttributes.size(); size_t destNewElemCount = destPtr->elemToPath.size(); for (size_t i = 0; i != changedAttrCount; i++) { const AttrNameAndType& nameAndType = srcChanges.changedAttributes.v[i]; // TODO: we could optimize this by taking advantage of destBucket_v2 // and changeAttributes being sorted. This would allow us to iterate // through both at the same time, and avoid doing n O(log n) lookups. if (destBucket_v2.contains(nameAndType)) { destChanges->setDirty(nameAndType, destElemIndex, destNewElemCount); } } }); } inline void PathToAttributesMap::addAttributeC( const PathC& path, const TokenC& attrName, TypeC type, const void* value) { addAttributeC(path, attrName, NameSuffix::none, type, value); } inline void PathToAttributesMap::addArrayAttributeC( const PathC& path, const TokenC& attrName, TypeC type, const void* value, const size_t arrayElemCount) { addArrayAttributeC(path, attrName, NameSuffix::none, type, value, arrayElemCount); } /** * @brief adds attributes to a primitive * * @details As opposed to addAttributeC this function allows the user to add multiple attributes to * a primitive at the same time and only does re-bucketing once after adding all of them. * This should be faster than adding them one-by-one and re-bucketing after each of them. * @param path - primitive path * @param attrNames - vector of the attribute names as tokens * @param tfTypes - vector of dynamic runtime types of the attributes * @param typeCs - vector of identifiers for types * * */ inline void PathToAttributesMap::addAttributesToPrim(const PathC& path, const std::vector<TokenC>& attrNames, const std::vector<TypeC>& typeCs) { CARB_ASSERT(attrNames.size() == typeCs.size()); addAttributesToBucket(path, attrNames, typeCs); } // Find bucketId the path is currently in // Find the bucket from the bucketId inline void PathToAttributesMap::addAttributesToBucket(const PathC& path, const std::vector<TokenC>& attrNames, const std::vector<TypeC>& typeCs) { NameSuffix suffix = NameSuffix::none; BucketId srcBucketId; ArrayIndex srcElemIndex; std::tie(srcBucketId, srcElemIndex) = getBucketAndArrayIndex(path); bool pathIsInFlatcache = (srcBucketId != kInvalidBucketId); if (!pathIsInFlatcache) { addPath(path); std::tie(srcBucketId, srcElemIndex) = getBucketAndArrayIndex(path); } // Get dest bucket // Dest bucket types = union(source bucket types, new type) std::pair<BucketId, ArrayIndex>* bucketAndElemIndex; VALIDATE_TRUE(pathToBucketElem.find(path, &bucketAndElemIndex)); BucketImpl *const bucketImplPtr = buckets.find(bucketAndElemIndex->first); CARB_ASSERT(bucketImplPtr); Bucket destBucket = bucketImplPtr->GetBucket(); for (uint32_t c = 0; c < attrNames.size(); ++c) { Token attrName(attrNames[c]); AttrNameAndType nameAndType(Type(typeCs[c]), attrName, NameSuffix::none); // Early out if attribute already present if (destBucket.find(nameAndType) != destBucket.end()) continue; // When adding a new attribute that shadows the name of an existing // attribute, but with a new type, then we choose to drop the old attribute // on the floor. // Unfortunatly since we are searching on name since we will not know the type // we have to scan the list of attributes. for (const AttrNameAndType& bucketNameAndType : destBucket) { if (bucketNameAndType.name == attrNames[c] && bucketNameAndType.suffix == suffix && TypeC(bucketNameAndType.type) != typeCs[c]) { // we can stop here since this enforces uniquness of attribute names // todo: check that USD already enforces this destBucket.erase(bucketNameAndType); break; } } destBucket.insert(nameAndType); } BucketId destBucketId = addBucket(destBucket); if (srcBucketId != destBucketId) { moveElementBetweenBuckets(path, destBucketId, srcBucketId, destBucket); } } // Find bucketId the path is currently in // Find the bucket from the bucketId inline std::tuple<BucketId, ArrayIndex> PathToAttributesMap::addAttributeGetBucketAndArrayIndex( const PathC& path, const TokenC& attrNameC, NameSuffix nameSuffix, TypeC type) { BucketId srcBucketId; ArrayIndex srcElemIndex; std::tie(srcBucketId, srcElemIndex) = getBucketAndArrayIndex(path); bool pathIsInFlatcache = (srcBucketId != kInvalidBucketId); if (!pathIsInFlatcache) { addPath(path); std::tie(srcBucketId, srcElemIndex) = getBucketAndArrayIndex(path); } // Get dest bucket // Dest bucket types = union(source bucket types, new type) std::pair<BucketId, ArrayIndex>* bucketAndElemIndex; VALIDATE_TRUE(pathToBucketElem.find(path, &bucketAndElemIndex)); BucketImpl *const bucketImplPtr = buckets.find(bucketAndElemIndex->first); CARB_ASSERT(bucketImplPtr); Bucket destBucket = bucketImplPtr->GetBucket(); Token attrName(attrNameC); AttrNameAndType nameAndType(Type(type), attrName, nameSuffix); // Early out if attribute already present if (destBucket.find(nameAndType) != destBucket.end()) return { srcBucketId, srcElemIndex }; // When adding a new attribute that shadows the name of an existing // attribute, but with a new type, then we choose to drop the old attribute // on the floor. // Unfortunatly since we are searching on name since we will not know the type // we have to scan the list of attributes. for (const AttrNameAndType& bucketNameAndType : destBucket) { if (bucketNameAndType.name == attrNameC && bucketNameAndType.suffix == nameSuffix && TypeC(bucketNameAndType.type) != type) { // we can stop here since this enforces uniquness of attribute names // todo: check that USD already enforces this destBucket.erase(bucketNameAndType); break; } } destBucket.insert(nameAndType); BucketId destBucketId = addBucket(destBucket); size_t destElemIndex; if (srcBucketId != destBucketId) { destElemIndex = getElementCount(destBucket); moveElementBetweenBuckets(path, destBucketId, srcBucketId, destBucket); } else { destElemIndex = srcElemIndex; } return { destBucketId, destElemIndex }; } inline void PathToAttributesMap::addAttributeC( const PathC& path, const TokenC& attrNameC, NameSuffix nameSuffix, TypeC type, const void* value) { const Typeinfo& typeinfo = getTypeInfo(type); if (typeinfo.isArray && value) { CARB_LOG_ERROR("addAttributeC: Attempted to add array-value attribute with default values. Use addArrayAttribute instead."); return; } addAttributeInternal(path, attrNameC, nameSuffix, type, value, typeinfo, 0); } inline void PathToAttributesMap::addArrayAttributeC(const PathC& path, const TokenC& attrName, NameSuffix suffix, TypeC type, const void* value, const size_t arrayElemCount) { const Typeinfo& typeinfo = getTypeInfo(type); addAttributeInternal(path, attrName, suffix, type, value, typeinfo, arrayElemCount); } inline SpanC PathToAttributesMap::getOrCreateAttributeWrC(const PathC& path, const TokenC& attrName, TypeC type) { APILOGGER("getOrCreateAttributeWrC", apiLogEnabled, path, attrName); BucketId bucketId; ArrayIndex elemIndex; std::tie(bucketId, elemIndex) = addAttributeGetBucketAndArrayIndex(path, attrName, NameSuffix::none, type); ArrayAndDirtyIndices arrayAndchangedIndices = getArraySpanC(bucketId, attrName, CpuReadWriteConfig(), NameSuffix::none); setArrayElementDirty(arrayAndchangedIndices, elemIndex); SpanC array = arrayAndchangedIndices.array; return getArrayElementPtr(array, elemIndex); } template <typename T> void PathToAttributesMap::addAttribute( const PathC& path, const TokenC& attrName, TypeC type, const T& value) { APILOGGER("addAttribute", apiLogEnabled, path, attrName); // TODO: check that type is compatible return addAttributeC(path, attrName, type, &value); } template <typename T> void PathToAttributesMap::addSubAttribute( const PathC& path, const TokenC& attrName, NameSuffix suffix, TypeC type, const T& value) { APILOGGER("addSubAttribute", apiLogEnabled, path, attrName); // TODO: check that type is compatible return addAttributeC(path, attrName, suffix, type, &value); } // Return a new bucket with all sub-attributes of a single attribute removed inline Bucket removeAllSubAttributesFromBucket(const Bucket& bucket, const TokenC& attrName) { Bucket newBucket; // TODO: implement set::delete for (auto nameAndType : bucket) { // Don't compare suffix and type to delete all suffix variants of name if (nameAndType.name != attrName) { newBucket.insert(nameAndType); } } return newBucket; } // Return a new bucket with all sub-attributes of all named attributes removed inline Bucket removeAllSubAttributesFromBucket(const Bucket& bucket, const std::vector<TokenC>& attrNames) { Bucket newBucket; for (auto nameAndType : bucket) { if (std::find_if(attrNames.begin(), attrNames.end(), [&](auto attrName) { return nameAndType.name == attrName; }) == attrNames.end()) { newBucket.insert(nameAndType); } } return newBucket; } inline Bucket removeSubAttributeFromBucket(const Bucket& bucket, const TokenC& attrName, NameSuffix suffix) { Bucket newBucket; // TODO: implement set::delete for (auto nameAndType : bucket) { // Don't compare suffix and type to delete all suffix variants of name if (!(nameAndType.name == attrName && nameAndType.suffix == suffix)) { newBucket.insert(nameAndType); } } return newBucket; } // Remove an attribute and all its subattributes (suffixes). inline void PathToAttributesMap::removeAttribute(const PathC& path, const TokenC& attrName) { APILOGGER("removeAttribute", apiLogEnabled, path, attrName); BucketId srcBucketId; ArrayIndex srcElemIndex; std::tie(srcBucketId, srcElemIndex) = getBucketAndArrayIndex(path); if (srcBucketId == kInvalidBucketId) return; // srcBucketId != kInvalidBucketId guarantees find will succeed std::pair<BucketId, ArrayIndex>* bucketAndElemIndex; VALIDATE_TRUE(pathToBucketElem.find(path, &bucketAndElemIndex)); BucketImpl *const bucketImplPtr = buckets.find(bucketAndElemIndex->first); CARB_ASSERT(bucketImplPtr); const Bucket& srcTypes = bucketImplPtr->GetBucket(); const Bucket destBucket = removeAllSubAttributesFromBucket(srcTypes, attrName); const BucketId destBucketId = addBucket(destBucket); moveElementBetweenBuckets(path, destBucketId, srcBucketId, destBucket); } inline void PathToAttributesMap::removeAttributesFromPath(const PathC& path, const std::vector<TokenC>& attrNames) { BucketId srcBucketId; ArrayIndex srcElemIndex; std::tie(srcBucketId, srcElemIndex) = getBucketAndArrayIndex(path); if (srcBucketId == kInvalidBucketId) return; // srcBucketId != kInvalidBucketId guarantees find will succeed std::pair<BucketId, ArrayIndex>* bucketAndElemIndex; VALIDATE_TRUE(pathToBucketElem.find(path, &bucketAndElemIndex)); BucketImpl *const bucketImplPtr = buckets.find(bucketAndElemIndex->first); CARB_ASSERT(bucketImplPtr); const Bucket& bucket = bucketImplPtr->GetBucket(); const Bucket destBucket = removeAllSubAttributesFromBucket(bucket, attrNames); const BucketId destBucketId = addBucket(destBucket); if (srcBucketId != destBucketId) { moveElementBetweenBuckets(path, destBucketId, srcBucketId, destBucket); } } inline void PathToAttributesMap::removeAttributesFromBucket(const Bucket& bucket, const std::vector<TokenC>& attrNames) { // first we need to find the actual bucketImpl that we will be // deleting attributes from auto iter = attrNameSetToBucketId.find(bucket); bool found = (iter != attrNameSetToBucketId.end()); if (!found) return; BucketId bucketId = iter->second; BucketImpl* bucketImplPtr = buckets.find(bucketId); if (!bucketImplPtr) return; // Buckets are found based on a set of the attributes, so we build // the new set based on the attribute names Bucket newBucket = removeAllSubAttributesFromBucket(bucket, attrNames); std::pair<BucketId, BucketImpl&> newBucketIdAndImpl = findOrCreateBucket(newBucket); BucketImpl& newBucketImpl = newBucketIdAndImpl.second; const size_t origSize = newBucketImpl.elemToPath.size(); if (origSize == 0) { // In the case where it is a new bucket we prefer just deleting the no longer needed arrays. newBucketImpl = *bucketImplPtr; newBucketImpl.SetBucket(std::move(newBucket)); // loop finding and deleting attribute arrays for (auto attrName : attrNames) { newBucketImpl.scalarAttributeArrays.forEach([this, &attrName, &newBucketImpl](const AttrName& name, ScalarAttributeArray& array) { if (name.name == attrName) { newBucketImpl.scalarAttributeArrays.freeEntry(name); } }); newBucketImpl.arrayAttributeArrays.forEach([this, &attrName, &newBucketImpl](const AttrName& name, ArrayAttributeArray& array) { if (name.name == attrName) { newBucketImpl.arrayAttributeArrays.freeEntry(name); } }); } } else { // TODO : there should be a faster way to do this but more to discuss here later for (const auto path : bucketImplPtr->elemToPath) { moveElementBetweenBuckets( asInt(path), newBucketIdAndImpl.first, bucketId, newBucket); } } // // need to update the pathToBucketElem for all the items that just "moved" buckets // TODO: make this work with moving buckets for (size_t i = origSize; i < newBucketImpl.elemToPath.size(); ++i) { std::pair<BucketId, ArrayIndex>* bucketAndElemIndex; pathToBucketElem.allocateEntry(asInt(newBucketImpl.elemToPath[i]), &bucketAndElemIndex); *bucketAndElemIndex = std::make_pair(newBucketIdAndImpl.first, i); } buckets.erase(bucketId); attrNameSetToBucketId.erase(bucket); } // Remove a particular (name,suffix) pair, for example the connection of an attribute inline void PathToAttributesMap::removeSubAttribute(const PathC& path, const TokenC& attrName, NameSuffix suffix) { APILOGGER("removeSubAttribute", apiLogEnabled, path, attrName); BucketId srcBucketId; ArrayIndex srcElemIndex; std::tie(srcBucketId, srcElemIndex) = getBucketAndArrayIndex(path); if (srcBucketId == kInvalidBucketId) return; // srcBucketId != kInvalidBucketId guarantees find will succeed std::pair<BucketId, ArrayIndex>* bucketAndElemIndex; VALIDATE_TRUE(pathToBucketElem.find(path, &bucketAndElemIndex)); BucketImpl *const bucketImplPtr = buckets.find(bucketAndElemIndex->first); CARB_ASSERT(bucketImplPtr); const Bucket& srcTypes = bucketImplPtr->GetBucket(); bool pathFound = (srcBucketId != kInvalidBucketId); if (!pathFound) { CARB_LOG_ERROR_ONCE("removeSubAttribute called on non-existent path %s \n", Path(path).getText()); return; } const Bucket destBucket = removeSubAttributeFromBucket(srcTypes, attrName, suffix); const BucketId destBucketId = addBucket(destBucket); moveElementBetweenBuckets(path, destBucketId, srcBucketId, destBucket); } // Removes an attribute (and all its subattributes) for all paths in a bucket inline void PathToAttributesMap::removeAttributeC(const Bucket& bucket, const TokenC& attrName, TypeC type) { APILOGGER("removeAttributeC", apiLogEnabled, attrName); // first we need to find the actual bucketImpl that we will be // deleting attributes from auto iter = attrNameSetToBucketId.find(bucket); bool found = (iter != attrNameSetToBucketId.end()); if (!found) return; BucketId bucketId = iter->second; BucketImpl* bucketImplPtr = buckets.find(bucketId); if (!bucketImplPtr) return; // Buckets are found based on a set of the attributes, so we build // the new set based on the attribute names Bucket newBucket = removeAllSubAttributesFromBucket(bucket, attrName); BucketId newBucketId = findBucketId(newBucket); if (newBucketId == kInvalidBucketId) { // In the case where it is a new bucket we prefer just deleting the no longer needed arrays. // This means that nothing needs to be updated as all the prims are in the same place bucketImplPtr->SetBucket(std::move(newBucket)); if (getTypeInfo(type).isArray) { bucketImplPtr->arrayAttributeArrays.forEach([this, &attrName, bucketImplPtr](const AttrName& name, ArrayAttributeArray& array) { if (name.name == attrName) { bucketImplPtr->arrayAttributeArrays.freeEntry(name); } }); } else { bucketImplPtr->scalarAttributeArrays.forEach([this, &attrName, bucketImplPtr](const AttrName& name, ScalarAttributeArray& array) { if (name.name == attrName) { bucketImplPtr->scalarAttributeArrays.freeEntry(name); } }); } attrNameSetToBucketId[newBucket] = bucketId; attrNameSetToBucketId.erase(bucket); } else { std::pair<BucketId, BucketImpl&> newBucketIdAndImpl = findOrCreateBucket(newBucket); BucketImpl& newBucketImpl = newBucketIdAndImpl.second; const size_t origSize = newBucketImpl.elemToPath.size(); // TODO : there should be a faster way to do this but more to discuss here later // currently this pulls from the front, this ensures that elements stay in the same // "order" when moving buckets -> but this means moving stuff around in elemToPath // Doing this in "bulk" will make all of this better. while(bucketImplPtr->elemToPath.size()) { moveElementBetweenBuckets( asInt(bucketImplPtr->elemToPath.front()), newBucketIdAndImpl.first, bucketId, newBucket); } // // need to update the pathToBucketElem for all the items that just "moved" buckets // TODO: make this work with moving buckets for (size_t i = origSize; i < newBucketImpl.elemToPath.size(); ++i) { std::pair<BucketId, ArrayIndex>* bucketAndElemIndex; pathToBucketElem.allocateEntry(asInt(newBucketImpl.elemToPath[i]), &bucketAndElemIndex); *bucketAndElemIndex = std::make_pair(newBucketIdAndImpl.first, i); } } } inline void PathToAttributesMap::removePath(const PathC& path) { std::pair<BucketId, ArrayIndex>* bucketAndElemIndex; if (!pathToBucketElem.find(path, &bucketAndElemIndex)) { CARB_LOG_ERROR_ONCE("removePath called on non-existent path %s \n", Path(path).getText()); return; } const BucketId &bucketId = bucketAndElemIndex->first; const size_t &elemIndex = bucketAndElemIndex->second; const bool destroyDataPointedTo = true; destroyElement(bucketId, elemIndex, destroyDataPointedTo); pathToBucketElem.freeEntry(path); } inline size_t PathToAttributesMap::count(const PathC& path) const { const std::pair<BucketId, ArrayIndex>* bucketAndElemIndex; if (pathToBucketElem.find(path, &bucketAndElemIndex)) { return 1; } else { return 0; } } struct ViewIterator { size_t bucketIndex = 0; size_t elementIndex = 0; std::vector<size_t>::const_iterator bucketElemCount; ViewIterator& operator++() { elementIndex++; if (elementIndex == *bucketElemCount) { bucketIndex++; bucketElemCount++; elementIndex = 0; } return *this; } bool operator!=(const ViewIterator& rhs) const { return bucketIndex != rhs.bucketIndex || elementIndex != rhs.elementIndex; } ViewIterator& operator*() { return *this; } }; // Array resize that does not preserve previous data inline void PathToAttributesMap::destructiveResizeIfNecessaryGPU(MirroredArray& gpuPointerArray, size_t elem, size_t& capacity, size_t desiredElemCount, size_t elemByteCount, omni::gpucompute::GpuCompute* computeAPI, omni::gpucompute::Context* computeCtx) { uint8_t** elemToArrayGpuData = reinterpret_cast<uint8_t**>(gpuPointerArray.cpuData()); uint8_t*& gpuData = elemToArrayGpuData[elem]; // Resize iff (capacity < desiredElemCount) if (capacity != desiredElemCount || gpuData == nullptr) { size_t byteCount = desiredElemCount * elemByteCount; if (gpuData) { platform.gpuCuda->freeAsync(*platform.gpuCudaCtx, gpuData); } gpuData = reinterpret_cast<uint8_t*>(computeAPI->mallocAsync(*computeCtx, byteCount, elemByteCount)); // We've written to gpuData on the CPU so we have to invalidate any GPU mirror of it gpuPointerArray.gpuValid = false; gpuPointerArray.cpuValid = true; capacity = desiredElemCount; } } // Array resize that preserves previous data inline void PathToAttributesMap::resizeIfNecessaryGPU(MirroredArray& gpuPointerArray, size_t elem, size_t& capacity, size_t desiredElemCount, size_t elemByteCount, omni::gpucompute::GpuCompute* computeAPI, omni::gpucompute::Context* computeCtx) { // TODO: reduce number of reallocations by allocating capacity larger than size // and not always reallocating when desiredElemCount<capacity // if gpuCapacity is 0 that means the array was recently copied and needs to be reallocated if(computeAPI && (capacity != desiredElemCount || gpuPointerArray.gpuCapacity == 0)) { size_t oldByteCount = capacity * elemByteCount; size_t newByteCount = desiredElemCount * elemByteCount; uint8_t** elemToArrayGpuData = reinterpret_cast<uint8_t**>(gpuPointerArray.cpuData()); uint8_t*& gpuData = elemToArrayGpuData[elem]; uint8_t* newGpuData = reinterpret_cast<uint8_t*>(computeAPI->mallocAsync(*computeCtx, newByteCount, elemByteCount)); if (gpuData) { using omni::gpucompute::MemcpyKind; size_t copyByteCount = std::min(oldByteCount, newByteCount); computeAPI->memcpy(*computeCtx, newGpuData, gpuData, copyByteCount, MemcpyKind::deviceToDevice); // Note that this free has to be async even though the previous // memcpy is sync. The reason is that deviceToDevice memcpys // are always async, even if you call the sync version of cudaMemcpy. // So if you do "sync" memcpy and then sync free, the free can // execute on the CPU before the copy executes on the GPU. // See https://nvidia-omniverse.atlassian.net/browse/OM-46051 computeAPI->freeAsync(*computeCtx, gpuData); } gpuData = newGpuData; // We've written to gpuData on the CPU so we have to invalidate any GPU mirror of it gpuPointerArray.gpuValid = false; gpuPointerArray.cpuValid = true; capacity = desiredElemCount; } } // This function is called when we are just about to do a transfer to GPU, to // make sure that GPU array is large enough. // It is called only when !gpuValid, so we don't have to preserve any existing // GPU array. // // Algorithm: // If capacity is sufficient, do nothing // If not, free any existing allocation, then allocate inline void PathToAttributesMap::allocGpuMemIfNecessary(PathToAttributesMap::MirroredArray& array, size_t byteCount, size_t elemSize, omni::gpucompute::GpuCompute* computeAPI, omni::gpucompute::Context* computeCtx) { bool capacitySufficient = (byteCount <= array.gpuCapacity); if (!capacitySufficient) { if (array.gpuArray) { computeAPI->freeAsync(*computeCtx, array.gpuArray); } array.gpuArray = reinterpret_cast<uint8_t*>(computeAPI->mallocAsync(*computeCtx, byteCount, elemSize)); array.gpuCapacity = byteCount; } } inline PrimBucketListImpl PathToAttributesMap::getChanges(ListenerId listenerId) { PrimBucketListImpl changesOut; // For now, iterate over all buckets // We'll probably want the user to specify a subset of buckets for change logging changesOut.buckets.reserve(buckets.end()); changesOut.changes.reserve(buckets.end()); BucketId id{ 0 }; for (unsigned int i = 0; i < buckets.end(); ++i, ++id) { BucketImpl* bucketPtr = buckets.find(id); if (!bucketPtr) continue; BucketImpl& bucketImpl = *bucketPtr; BucketId bucketId = id; Changes* changesIn; if (!bucketImpl.listenerIdToChanges.find(listenerId, &changesIn)) { continue; } size_t changedAttrCount = changesIn->changedAttributes.size(); size_t primCount = bucketImpl.elemToPath.size(); bool attributesChanged = (changedAttrCount != 0 && primCount != 0); bool primsAdded = (changesIn->getNewPrimCount() != 0); if (attributesChanged || primsAdded) { changesOut.buckets.v.push_back(bucketId); changesOut.changes.push_back(BucketChangesImpl()); BucketChangesImpl& bucketChanges = changesOut.changes.back(); // Write changed attributes bucketChanges.changedAttributes = changesIn->changedAttributes; bucketChanges.changedIndices.resize(changedAttrCount); for (size_t j = 0; j != changedAttrCount; j++) { bucketChanges.changedIndices[j] = { changesIn->changedIndices[j].allIndicesChanged, { changesIn->changedIndices[j].changedIndices.data(), changesIn->changedIndices[j].changedIndices.size() } }; } bucketChanges.pathArray = { reinterpret_cast<const Path*>(bucketImpl.elemToPath.data()), bucketImpl.elemToPath.size() }; // Write added prims bucketChanges.addedIndices = { changesIn->addedIndices.data(), changesIn->addedIndices.size() }; } } return changesOut; } inline void PathToAttributesMap::popChanges(ListenerId listenerId) { BucketId id{ 0 }; for (unsigned int i = 0; i < buckets.end(); ++i, ++id) { BucketImpl* bucketPtr = buckets.find(id); if (bucketPtr) { // Create listenerId if it doesn't exist Changes* changes; if (bucketPtr->listenerIdToChanges.allocateEntry(listenerId, &changes)) { new (changes) Changes; } changes->changedAttributes.clear(); changes->changedIndices.clear(); changes->addedIndices.clear(); } } } inline PathToAttributesMap::MirroredArray::MirroredArray(Platform& platform_, const TypeC &type, const Typeinfo& typeinfo) noexcept : cpuArray() , platform(platform_) , type(type) , typeinfo(typeinfo) , gpuArray(nullptr) , gpuCapacity(0) , d3dArrays() , count(0) , usdValid(true) , cpuValid(false) , gpuValid(false) , gpuAllocedWithCuda(false) , attributeMutex() { } inline PathToAttributesMap::MirroredArray::~MirroredArray() { // clean up any non-array gpu data if (gpuArray) { if (gpuAllocedWithCuda) { platform.gpuCuda->freeAsync(*platform.gpuCudaCtx, gpuArray); } else { // @TODO Fix crash during D3dVk free! N.B. that this backend is incomplete and not in active use // gpuD3dVk->freeAsync(*gpuD3dVkCtx, gpuArray); } gpuArray = nullptr; } gpuValid = false; } inline PathToAttributesMap::MirroredArray& PathToAttributesMap::MirroredArray::operator=(const MirroredArray& other) noexcept { if (!other.isArrayOfArray()) { cpuArray = other.cpuArray; } else { // Here we set all pointers in dest array to nullptr // The allocation and data copy happens in PathToAttributesMap's operator= cpuArray.resize(other.size()); uint8_t** destPtrs = reinterpret_cast<uint8_t**>(cpuData()); for (size_t elemIndex = 0; elemIndex != other.count; elemIndex++) { destPtrs[elemIndex] = nullptr; } } // platform = other.platform; // intentionally not copy-assigning platform type = other.type; typeinfo = other.typeinfo; usdValid = other.usdValid; cpuValid = other.cpuValid; count = other.count; // GPU data needs to be copied explicitly using the gpu compute API gpuArray = nullptr; gpuCapacity = 0; gpuValid = false; gpuAllocedWithCuda = false; if (other.gpuValid) { // Also need to empty the cpuArray as it is a cpu pointer to now-invalid GPU data uint8_t** destPtrs = reinterpret_cast<uint8_t**>(cpuData()); for (size_t elemIndex = 0; elemIndex != other.count; elemIndex++) { destPtrs[elemIndex] = nullptr; } } return *this; } inline PathToAttributesMap::MirroredArray::MirroredArray(MirroredArray&& other) noexcept : cpuArray(std::move(other.cpuArray)), platform(other.platform), type(other.type), typeinfo(other.typeinfo), gpuArray(other.gpuArray), gpuCapacity(other.gpuCapacity), d3dArrays(std::move(other.d3dArrays)), count(other.count), usdValid(other.usdValid), cpuValid(other.cpuValid), gpuValid(other.gpuValid), gpuAllocedWithCuda(other.gpuAllocedWithCuda), attributeMutex() // intentionally not move constructing the mutex { other.gpuArray = nullptr; } inline PathToAttributesMap::MirroredArray& PathToAttributesMap::MirroredArray::operator=(MirroredArray&& other) noexcept { MirroredArray tmp(std::move(other)); swap(*this, tmp); return *this; } inline void swap(PathToAttributesMap::MirroredArray& a, PathToAttributesMap::MirroredArray& b) noexcept { using std::swap; swap(a.cpuArray, b.cpuArray); swap(a.type, b.type); swap(a.typeinfo, b.typeinfo); swap(a.gpuArray, b.gpuArray); swap(a.gpuCapacity, b.gpuCapacity); swap(a.d3dArrays, b.d3dArrays); swap(a.count, b.count); swap(a.usdValid, b.usdValid); swap(a.cpuValid, b.cpuValid); swap(a.gpuValid, b.gpuValid); swap(a.gpuAllocedWithCuda, b.gpuAllocedWithCuda); // swap(a.attributeMutex, b.attributeMutex); // intentionally NOT swapping attribute mutex because it is not move-constructable } inline PathToAttributesMap::ArrayAttributeArray::ArrayAttributeArray(Platform& platform_, const TypeC& type, const Typeinfo& typeinfo) noexcept : values(platform_, type, typeinfo) , elemCounts(platform_, PTAM_SIZE_TYPEC, Typeinfo{sizeof(PTAM_SIZE_TYPE), false, 0}) , cpuElemCounts(platform_, PTAM_SIZE_TYPEC, Typeinfo{ sizeof(PTAM_SIZE_TYPE), false, 0 }) , gpuElemCounts(platform_, PTAM_SIZE_TYPEC, Typeinfo{ sizeof(PTAM_SIZE_TYPE), false, 0 }) , gpuPtrs(platform_, PTAM_POINTER_TYPEC, Typeinfo{ sizeof(PTAM_POINTER_TYPE), false, 0 }) { static_assert(sizeof(ArrayAttributeArray) == 5 * sizeof(MirroredArray), "ArrayAttributeArray has unexpected size"); } inline PathToAttributesMap::ArrayAttributeArray::~ArrayAttributeArray() { Platform& platform = values.platform; CARB_ASSERT(&platform == &elemCounts.platform); CARB_ASSERT(&platform == &cpuElemCounts.platform); CARB_ASSERT(&platform == &gpuElemCounts.platform); CARB_ASSERT(&platform == &gpuPtrs.platform); if (values.count) { uint8_t** elemToCpuPtr = reinterpret_cast<uint8_t**>(values.cpuData()); for (size_t elemIndex = 0; elemIndex != values.count; elemIndex++) { // If a CPU array has been allocated, free it uint8_t*& cpuPtrToDelete = elemToCpuPtr[elemIndex]; if (cpuPtrToDelete) { if (!USE_PINNED_MEMORY || !platform.gpuCuda) { free(cpuPtrToDelete); } else if (platform.gpuCuda) { platform.gpuCuda->freeHost(*platform.gpuCudaCtx, cpuPtrToDelete); } cpuPtrToDelete = nullptr; values.cpuValid = false; } } } if (gpuPtrs.count) { // CPU array of GPU pointers uint8_t** elemToGpuPtr = reinterpret_cast<uint8_t**>(gpuPtrs.cpuData()); for (size_t elemIndex = 0; elemIndex != gpuPtrs.count; elemIndex++) { // If a GPU array has been allocated, free it uint8_t*& gpuPtrToDelete = elemToGpuPtr[elemIndex]; if (gpuPtrToDelete) { CARB_ASSERT(platform.gpuCuda); CARB_ASSERT(platform.gpuCudaCtx); platform.gpuCuda->freeAsync(*platform.gpuCudaCtx, gpuPtrToDelete); gpuPtrToDelete = nullptr; } } } } inline PathToAttributesMap::ArrayAttributeArray& PathToAttributesMap::ArrayAttributeArray::operator=(const ArrayAttributeArray& other) noexcept { static_assert(sizeof(ArrayAttributeArray) == 5 * sizeof(MirroredArray), "ArrayAttributeArray has unexpected size"); values = other.values; elemCounts = other.elemCounts; cpuElemCounts = other.cpuElemCounts; gpuElemCounts = other.gpuElemCounts; gpuPtrs = other.gpuPtrs; return *this; } inline PathToAttributesMap::ArrayAttributeArray::ArrayAttributeArray(ArrayAttributeArray&& other) noexcept : values(std::move(other.values)) , elemCounts(std::move(other.elemCounts)) , cpuElemCounts(std::move(other.cpuElemCounts)) , gpuElemCounts(std::move(other.gpuElemCounts)) , gpuPtrs(std::move(other.gpuPtrs)) { static_assert(sizeof(ArrayAttributeArray) == 5 * sizeof(MirroredArray), "ArrayAttributeArray has unexpected size"); other.values.count = 0; other.elemCounts.count = 0; other.cpuElemCounts.count = 0; other.gpuElemCounts.count = 0; other.gpuPtrs.count = 0; } inline PathToAttributesMap::ArrayAttributeArray& PathToAttributesMap::ArrayAttributeArray::operator=(ArrayAttributeArray&& other) noexcept { ArrayAttributeArray tmp(std::move(other)); swap(*this, tmp); return *this; } inline void swap(PathToAttributesMap::ArrayAttributeArray& a, PathToAttributesMap::ArrayAttributeArray& b) noexcept { using std::swap; swap(a.values, b.values); swap(a.elemCounts, b.elemCounts); swap(a.cpuElemCounts, b.cpuElemCounts); swap(a.gpuElemCounts, b.gpuElemCounts); swap(a.gpuPtrs, b.gpuPtrs); } inline void PathToAttributesMap::printBucket(const Bucket& bucket) const { bool multiLine = (1 < bucket.size()); printf("{"); if (multiLine) printf("\n"); for (const auto& b : bucket) { TokenC attrName = b.name; NameSuffix suffix = b.suffix; Type type(b.type); if (multiLine) printf(" "); Token attrNameToken(attrName); std::cout << "TypeC(" << type << ") " << attrNameToken.getText() << suffix; if (multiLine) printf("\n"); } printf("} "); // Print arrays in bucket auto iter = attrNameSetToBucketId.find(bucket); bool found = (iter != attrNameSetToBucketId.end()); if (!found) return; BucketId bucketId = iter->second; auto bucketImplPtr = buckets.find(bucketId); if (bucketImplPtr) { const BucketImpl& bucketImpl = *bucketImplPtr; bucketImpl.scalarAttributeArrays.forEach([&multiLine](const AttrName& name, const ScalarAttributeArray& array) { #if ENABLE_USD_DEBUGGING std::cout << toTfToken(name.name).GetText() << " " << toString(name.suffix) << " "; #else std::cout << name.name.token << " "; #endif std::cout << array.size() << "bytes "; if (multiLine) std::cout << "\n"; }); bucketImpl.arrayAttributeArrays.forEach([&multiLine](const AttrName& name, const ArrayAttributeArray& array) { #if ENABLE_USD_DEBUGGING std::cout << toTfToken(name.name).GetText() << " " << toString(name.suffix) << " "; #else std::cout << name.name.token << " "; #endif std::cout << array.values.size() << "bytes "; if (multiLine) std::cout << "\n"; }); } } inline void PathToAttributesMap::printBucketName(const Bucket& bucketTypes, BucketId bucketId) const { const BucketImpl* bucketImplPtr = buckets.find(bucketId); if (!bucketImplPtr) return; const BucketImpl& bucketImpl = *bucketImplPtr; std::cout << "Id: " << size_t(bucketId) << " "; size_t bucketPrimCount = bucketImpl.elemToPath.size(); std::cout << "PrimCount: " << bucketPrimCount << " "; // Find USD prim type for (auto attrNameAndType : bucketTypes) { Type type(attrNameAndType.type); if (type.role == AttributeRole::ePrimTypeName) { Token nameToken(attrNameAndType.name); std::cout << "PrimType: " << nameToken.getText() << " "; } } std::cout << "AttributeNames: "; for (auto attrNameAndType : bucketTypes) { Token nameToken(attrNameAndType.name); std::cout << nameToken.getText() << toString(attrNameAndType.suffix) << " "; } std::cout << "\n"; } inline void PathToAttributesMap::printBucketNames() const { std::cout << "Buckets:\n"; for (auto& bucketIdAndBucket : attrNameSetToBucketId) { const Bucket& bucketTypes = bucketIdAndBucket.first; BucketId bucketId = bucketIdAndBucket.second; std::cout << " "; printBucketName(bucketTypes, bucketId); } } inline void PathToAttributesMap::printBucketNamesAndTypes() const { std::cout << "Buckets:\n"; for (auto& bucketIdAndBucket : attrNameSetToBucketId) { std::cout << " "; const Bucket& bucketTypes = bucketIdAndBucket.first; for (AttrNameAndType attrNameAndType : bucketTypes) { Type type(attrNameAndType.type); Token nameToken(attrNameAndType.name); std::cout << "(" << type << " " << nameToken.getText() << " " << attrNameAndType.suffix << " " << "TypeC(" << attrNameAndType.type << ") "; } std::cout << "\n"; } } inline void PathToAttributesMap::bucketImplCopyScalarAttributeArray(ScalarAttributeArray &dest, const ScalarAttributeArray &src) { CARB_ASSERT(dest.type == src.type); if (src.gpuValid) { dest.resizeGpu(platform.gpuCuda, platform.gpuCudaCtx, src.size(), dest.typeinfo.size); platform.gpuCuda->memcpyAsync(*platform.gpuCudaCtx, dest.gpuArray, src.gpuArray, src.gpuCapacity, omni::gpucompute::MemcpyKind::deviceToDevice); dest.gpuValid = true; dest.gpuCapacity = src.gpuCapacity; dest.gpuAllocedWithCuda = src.gpuAllocedWithCuda; } } inline void PathToAttributesMap::bucketImplCopyArrayAttributeArray(BucketImpl& destBucketImpl, const AttrName& destName, ArrayAttributeArray &dest, const ArrayAttributeArray &src) { CARB_ASSERT(dest.values.type == src.values.type); const Typeinfo &typeInfo = dest.values.typeinfo; const size_t arrayElemSize = typeInfo.arrayElemSize; MirroredArray *const destSizeArray = &dest.elemCounts; MirroredArray *const destCpuCapacityArray = &dest.cpuElemCounts; const ArrayOfArrayInfo destAOA = getArrayOfArrayInfo(dest); // TODO: Figure out how to remove this fixup step in a more "clean" way // Need to set capacity to zero, because capacity will // have been erroneously copied from source in // MirroredArray copy constructor ArrayAndDirtyIndices arrayAndchangedIndices = getArraySpanC(*destCpuCapacityArray, destName, destAOA, destBucketImpl, CpuWriteConfig()); setArrayDirty(arrayAndchangedIndices); SpanC destCapacitySpan = arrayAndchangedIndices.array; size_t* destCapacities = reinterpret_cast<size_t*>(destCapacitySpan.ptr); for (size_t elemIndex = 0; elemIndex != destCapacitySpan.elementCount; elemIndex++) { destCapacities[elemIndex] = 0; } // getArrayWrC to allocate data for arrays ArrayAndDirtyIndices destSpan = getArraySpanC(dest.values, destName, destAOA, destBucketImpl, CpuWriteConfig()); setArrayDirty(destSpan); const size_t* elemCounts = reinterpret_cast<const size_t*>(destSizeArray->cpuData()); if (src.values.cpuValid) { // TODO: Isn't this redundant with the call to getArraySpanC above with cpu write access? enableCpuWrite(dest.values, elemCounts, destCpuCapacityArray, nullptr, nullptr); uint8_t** destPtrs = reinterpret_cast<uint8_t**>(destSpan.array.ptr); uint8_t* const* srcPtrs = reinterpret_cast<uint8_t* const*>(src.values.cpuData()); for (size_t elemIndex = 0; elemIndex != destSpan.array.elementCount; elemIndex++) { uint8_t* destPtr = destPtrs[elemIndex]; const uint8_t* srcPtr = srcPtrs[elemIndex]; size_t sizeBytes = elemCounts[elemIndex] * arrayElemSize; memcpy(destPtr, srcPtr, sizeBytes); } } else { dest.values.cpuValid = false; } if (src.values.gpuValid) { MirroredArray *const destGpuElemCountArray = &dest.gpuElemCounts; MirroredArray *const destGpuPtrArray = &dest.gpuPtrs; enableGpuWrite(dest.values, elemCounts, destCpuCapacityArray, destGpuElemCountArray, destGpuPtrArray); const MirroredArray *const srcGpuPtrArray = &src.gpuPtrs; // Select which API to use omni::gpucompute::GpuCompute* computeAPI = nullptr; omni::gpucompute::Context* computeCtx = nullptr; if (src.values.gpuAllocedWithCuda) { computeAPI = platform.gpuCuda; computeCtx = platform.gpuCudaCtx; } if (computeAPI) { uint8_t** destPtrs = reinterpret_cast<uint8_t**>(destGpuPtrArray->cpuData()); uint8_t* const* srcPtrs = reinterpret_cast<uint8_t* const*>(srcGpuPtrArray->cpuData()); for (size_t elemIndex = 0; elemIndex != destSpan.array.elementCount; elemIndex++) { uint8_t* destPtr = destPtrs[elemIndex]; const uint8_t* srcPtr = srcPtrs[elemIndex]; size_t sizeBytes = elemCounts[elemIndex] * arrayElemSize; computeAPI->memcpyAsync(*computeCtx, destPtr, srcPtr, sizeBytes, omni::gpucompute::MemcpyKind::deviceToDevice); } destGpuPtrArray->gpuAllocedWithCuda = src.values.gpuAllocedWithCuda; } } } inline void PathToAttributesMap::bucketImplCopyArrays(BucketImpl& destBucketImpl, BucketId destBucketId, const BucketImpl& srcBucketImpl, BucketId srcBucketId, const carb::flatcache::set<AttrNameAndType_v2>& attrFilter) { destBucketImpl.scalarAttributeArrays.forEach([this, &srcBucketImpl, &attrFilter](const AttrName& destName, ScalarAttributeArray& dest) { AttrNameAndType_v2 destNameV2( carb::flatcache::Type(dest.type), destName.name, destName.suffix); const bool attrIsInFilter = attrFilter.size() == 0 || (attrFilter.find(destNameV2) != attrFilter.end()); if (attrIsInFilter && destName.suffix == NameSuffix::none) { const ScalarAttributeArray* src; VALIDATE_TRUE(srcBucketImpl.scalarAttributeArrays.find(destName, &src)); CARB_ASSERT(src); bucketImplCopyScalarAttributeArray(dest, *src); } }); destBucketImpl.arrayAttributeArrays.forEach([this, &destBucketImpl, &srcBucketImpl, &attrFilter](const AttrName& destName, ArrayAttributeArray& dest) { AttrNameAndType_v2 destNameV2( carb::flatcache::Type(dest.values.type), destName.name, destName.suffix); const bool attrIsInFilter = attrFilter.size() == 0 || (attrFilter.find(destNameV2) != attrFilter.end()); if (attrIsInFilter && destName.suffix == NameSuffix::none) { const ArrayAttributeArray* src; VALIDATE_TRUE(srcBucketImpl.arrayAttributeArrays.find(destName, &src)); CARB_ASSERT(src); bucketImplCopyArrayAttributeArray(destBucketImpl, destName, dest, *src); } }); } template<typename CallbackT> void inline PathToAttributesMap::BucketImpl::forEachValueArray(CallbackT callback) { scalarAttributeArrays.forEach([&callback](const AttrName& name, ScalarAttributeArray& array) { callback(name, array); }); arrayAttributeArrays.forEach([&callback](const AttrName& name, ArrayAttributeArray& array) { callback(name, array.values); static_assert(sizeof(PathToAttributesMap::ArrayAttributeArray) == 5 * sizeof(PathToAttributesMap::MirroredArray), "ArrayAttributeArray has unexpected size"); // Intentionally skips these // callback(name, array.elemCounts); // callback(name, array.cpuElemCounts); // callback(name, array.gpuElemCounts); // callback(name, array.gpuPtrs); }); } inline void PathToAttributesMap::Serializer::init(uint8_t *const _buf, uint8_t *const _end) { p = buf = _buf; end = _end; bytesWritten = 0; overflowed = false; } inline bool PathToAttributesMap::Serializer::writeBytes(const uint8_t *const src, uint64_t size) { CARB_ASSERT(src); bytesWritten += size; if (p != nullptr && p + size <= end) { memcpy(p, src, size); p += size; return true; } overflowed = true; return false; } inline bool PathToAttributesMap::Serializer::writeString(const char* const s, const size_t len) { bool OK = true; if (!write<size_t>(len)) { OK = false; } if (!writeBytes(reinterpret_cast<const uint8_t*>(s), len)) { OK = false; } return OK; } inline bool PathToAttributesMap::Serializer::writeString(const std::string &s) { bool OK = true; if (!write<size_t>(s.length())) { OK = false; } if (!writeBytes(reinterpret_cast<const uint8_t*>(s.data()), s.length())) { OK = false; } return OK; } template<typename T> bool PathToAttributesMap::Serializer::write(const T &t) { static_assert(std::is_pod<T>::value, "T must be POD"); return writeBytes(reinterpret_cast<const uint8_t*>(&t), sizeof(T)); } inline void PathToAttributesMap::Deserializer::init(const uint8_t *const _buf, const uint8_t *const _end) { p = buf = _buf; end = _end; bytesRead = 0; overflowed = false; } inline bool PathToAttributesMap::Deserializer::readBytes(uint8_t *const dst, uint64_t size) { CARB_ASSERT(dst); bytesRead += size; if (p + size <= end) { memcpy(dst, p, size); p += size; return true; } overflowed = true; return false; }; inline bool PathToAttributesMap::Deserializer::readString(std::string &s) { size_t len; read<size_t>(len); s.resize(len); return readBytes(reinterpret_cast<uint8_t*>(&s[0]), len); } template<typename T> bool PathToAttributesMap::Deserializer::read(T &t) { static_assert(std::is_pod<T>::value, "T must be POD"); return readBytes(reinterpret_cast<uint8_t*>(&t), sizeof(T)); } inline void PathToAttributesMap::serializeMirroredArrayMetadata(const AttrName& srcName, MirroredArray &srcValuesArray, Serializer &out) { out.writeString(toTfToken(srcName.name).GetString()); out.write<NameSuffix>(srcName.suffix); // TfToken are actually pointers, so we need to serialize the encoded TypeC out.write<TypeC>(srcValuesArray.type); out.write<bool>(srcValuesArray.cpuValid); out.write<bool>(srcValuesArray.usdValid); out.write<size_t>(srcValuesArray.count); } pxr::TfType typeCtoTfType(TypeC typeC); template<typename ArraysT, typename ArraysMapT> inline void PathToAttributesMap::deserializeMirroredArrayMetadata(Platform& platform, ArraysMapT& arraysMap, AttrName &destName, Typeinfo *&typeInfo, ArraysT *&destArray, Deserializer &in) { std::string nameStr; in.readString(nameStr); in.read<NameSuffix>(destName.suffix); destName.name = asInt(pxr::TfToken(nameStr)); TypeC destType; { in.read<TypeC>(destType); } // typeToInfo is deserialized before all mirrored arrays, so the type must exist. VALIDATE_TRUE(typeToInfo.find(destType, &typeInfo)); if (!arraysMap.allocateEntry(destName, &destArray)) { CARB_LOG_ERROR("Failed to insert dest mirrored array"); return; } CARB_ASSERT(destArray); new (destArray) ArraysT(platform, destType, *typeInfo); MirroredArray *const destValuesArray = destArray->getValuesArray(); destValuesArray->type = destType; in.read<bool>(destValuesArray->cpuValid); in.read<bool>(destValuesArray->usdValid); in.read<size_t>(destValuesArray->count); destValuesArray->resize(destValuesArray->count * typeInfo->size); } inline uint64_t PathToAttributesMap::serializeScalarAttributeArray(BucketImpl& srcBucketImpl, const BucketId& srcBucketId, const AttrName& srcName, ScalarAttributeArray& srcScalarAttributeArray, Serializer &out) { const size_t bytesBegin = out.bytesWritten; MirroredArray &srcValuesArray = srcScalarAttributeArray; serializeMirroredArrayMetadata(srcName, srcValuesArray, out); if (srcValuesArray.cpuValid) { const ConstSpanC srcSpan = getArraySpanC(srcValuesArray, srcName, ScalarArrayOfArrayInfo(), srcBucketImpl, CpuReadConfig()).array; CARB_ASSERT(srcSpan.elementCount == srcValuesArray.count); const uint8_t* srcPtr = reinterpret_cast<const uint8_t*>(srcSpan.ptr); const Typeinfo &typeInfo = srcValuesArray.typeinfo; CARB_ASSERT(!typeInfo.isArray); out.writeBytes(srcPtr, srcSpan.elementCount * typeInfo.size); } return out.bytesWritten - bytesBegin; } inline bool PathToAttributesMap::deserializeScalarAttributeArray(BucketImpl& destBucketImpl, const BucketId& destBucketId, Deserializer &in) { AttrName destName; Typeinfo *typeInfo; ScalarAttributeArray *destArray; deserializeMirroredArrayMetadata(destBucketImpl.platform, destBucketImpl.scalarAttributeArrays, destName, typeInfo, destArray, in); CARB_ASSERT(typeInfo); CARB_ASSERT(destArray); CARB_ASSERT(!typeInfo->isArray); if (destArray->cpuValid) { uint8_t* destPtr = reinterpret_cast<uint8_t*>(destArray->cpuData()); in.readBytes(destPtr, destArray->count * typeInfo->size); } return true; } inline uint64_t PathToAttributesMap::serializeArrayAttributeArray(BucketImpl& srcBucketImpl, const BucketId& srcBucketId, const AttrName& srcName, ArrayAttributeArray& srcArrayAttributeArray, Serializer &out) { const size_t bytesBegin = out.bytesWritten; MirroredArray &srcValuesArray = srcArrayAttributeArray.values; serializeMirroredArrayMetadata(srcName, srcValuesArray, out); // write scalar metadata auto writeScalarArrayOfArrayMetadata = [this](BucketImpl& srcBucketImpl, const BucketId& srcBucketId, const AttrName& srcName, ScalarAttributeArray& srcScalarAttributeArray, Serializer &out) { // similar to serializeScalarAttributeArray, but we can skip some metadata because it should be inferrable out.write<bool>(srcScalarAttributeArray.cpuValid); out.write<bool>(srcScalarAttributeArray.usdValid); out.write<size_t>(srcScalarAttributeArray.count); if (srcScalarAttributeArray.cpuValid) { const ConstSpanC srcSpan = getArraySpanC(srcScalarAttributeArray, srcName, ScalarArrayOfArrayInfo(), srcBucketImpl, CpuReadConfig()).array; CARB_ASSERT(srcSpan.elementCount == srcScalarAttributeArray.count); const uint8_t* srcPtr = reinterpret_cast<const uint8_t*>(srcSpan.ptr); const Typeinfo &typeInfo = srcScalarAttributeArray.typeinfo; CARB_ASSERT(!typeInfo.isArray); out.writeBytes(srcPtr, srcSpan.elementCount * typeInfo.size); } }; static_assert(sizeof(ArrayAttributeArray) == 5 * sizeof(MirroredArray), "ArrayAttributeArray has unexpected size"); writeScalarArrayOfArrayMetadata(srcBucketImpl, srcBucketId, srcName, srcArrayAttributeArray.elemCounts, out); writeScalarArrayOfArrayMetadata(srcBucketImpl, srcBucketId, srcName, srcArrayAttributeArray.cpuElemCounts, out); // TODO: Can we omit these? writeScalarArrayOfArrayMetadata(srcBucketImpl, srcBucketId, srcName, srcArrayAttributeArray.gpuElemCounts, out); writeScalarArrayOfArrayMetadata(srcBucketImpl, srcBucketId, srcName, srcArrayAttributeArray.gpuPtrs, out); // write array-of-array values if (srcValuesArray.cpuValid) { const ArrayOfArrayInfo destAOA = getArrayOfArrayInfo(srcArrayAttributeArray); const ConstSpanC srcSpan = getArraySpanC(srcValuesArray, srcName, destAOA, srcBucketImpl, CpuReadConfig()).array; CARB_ASSERT(srcSpan.elementCount == srcValuesArray.count); // TODO: Should this be cpuElemCounts instead of elemCounts? The requested capacity may not have been applied yet.. const size_t* elemCounts = reinterpret_cast<const size_t*>(srcArrayAttributeArray.elemCounts.cpuData()); const Typeinfo &typeInfo = srcValuesArray.typeinfo; CARB_ASSERT(typeInfo.isArray); uint8_t* const* srcPtrs = reinterpret_cast<uint8_t* const*>(srcSpan.ptr); for (size_t elemIndex = 0; elemIndex != srcSpan.elementCount; elemIndex++) { const uint8_t* srcPtr = srcPtrs[elemIndex]; const size_t elemCount = elemCounts[elemIndex]; out.writeBytes(srcPtr, elemCount * typeInfo.arrayElemSize); } } return out.bytesWritten - bytesBegin; } inline bool PathToAttributesMap::deserializeArrayAttributeArray(BucketImpl& destBucketImpl, const BucketId& destBucketId, Deserializer &in) { AttrName destName; Typeinfo *typeInfo; ArrayAttributeArray *destArray; deserializeMirroredArrayMetadata(destBucketImpl.platform, destBucketImpl.arrayAttributeArrays, destName, typeInfo, destArray, in); CARB_ASSERT(typeInfo); CARB_ASSERT(destArray); CARB_ASSERT(typeInfo->isArray); // write scalar metadata auto readScalarArrayOfArrayMetadata = [this](BucketImpl& destBucketImpl, const BucketId& destBucketId, const AttrName& destName, ScalarAttributeArray& destScalarAttributeArray, Deserializer &in) { // similar to deserializeScalarAttributeArray, but we can skip some metadata because it should be inferrable in.read<bool>(destScalarAttributeArray.cpuValid); in.read<bool>(destScalarAttributeArray.usdValid); in.read<size_t>(destScalarAttributeArray.count); const Typeinfo &typeInfo = destScalarAttributeArray.typeinfo; destScalarAttributeArray.resize(typeInfo.size * destScalarAttributeArray.count); if (destScalarAttributeArray.cpuValid) { CARB_ASSERT(destScalarAttributeArray.size() == (getTypeInfo(destScalarAttributeArray.type).size * destScalarAttributeArray.count)); uint8_t *const destPtr = reinterpret_cast<uint8_t*>(destScalarAttributeArray.cpuData()); CARB_ASSERT(!typeInfo.isArray); in.readBytes(destPtr, destScalarAttributeArray.count * typeInfo.size); } }; static_assert(sizeof(ArrayAttributeArray) == 5 * sizeof(MirroredArray), "ArrayAttributeArray has unexpected size"); readScalarArrayOfArrayMetadata(destBucketImpl, destBucketId, destName, destArray->elemCounts, in); readScalarArrayOfArrayMetadata(destBucketImpl, destBucketId, destName, destArray->cpuElemCounts, in); // TODO: Can we omit these? readScalarArrayOfArrayMetadata(destBucketImpl, destBucketId, destName, destArray->gpuElemCounts, in); readScalarArrayOfArrayMetadata(destBucketImpl, destBucketId, destName, destArray->gpuPtrs, in); // read array-of-array values MirroredArray& destValuesArray = destArray->values; if (destValuesArray.cpuValid) { // Need to set capacity to zero, because capacity will // have been erroneously copied from source const ArrayOfArrayInfo destAOA = getArrayOfArrayInfo(*destArray); const SpanC destCapacitySpan = getArraySpanC(destArray->cpuElemCounts, destName, destAOA, destBucketImpl, CpuReadWriteConfig()).array; size_t *const destCapacities = reinterpret_cast<size_t*>(destCapacitySpan.ptr); for (size_t elemIndex = 0; elemIndex != destCapacitySpan.elementCount; elemIndex++) { destCapacities[elemIndex] = 0; } const size_t* elemCounts = reinterpret_cast<const size_t*>(destArray->elemCounts.cpuData()); // getArrayWrC to allocate data for arrays const SpanC destSpan = getArraySpanC(destValuesArray, destName, destAOA, destBucketImpl, CpuReadWriteConfig()).array; uint8_t** destPtrs = reinterpret_cast<uint8_t**>(destSpan.ptr); for (size_t elemIndex = 0; elemIndex != destValuesArray.count; elemIndex++) { uint8_t* destPtr = destPtrs[elemIndex]; size_t elemCount = elemCounts[elemIndex]; in.readBytes(destPtr, elemCount * typeInfo->arrayElemSize); } destValuesArray.cpuValid = true; } return true; } inline BucketImpl& PathToAttributesMap::addAttributeInternal(BucketImpl& prevBucketImpl, const Bucket& prevBucket, const TokenC& attrName, const TypeC type, const void* value, const Typeinfo& typeinfo, const size_t arrayElemCount) { APILOGGER("addAttributeInternal", apiLogEnabled, attrName); // newBucket := oldBucket Union { attrName } // findOrCreate (bucketId, bucketImpl) for newBucket, which updates // attrNameSetToBucketId and buckets Bucket nextBucket = prevBucket; nextBucket.insert({ carb::flatcache::Type(type), attrName, NameSuffix::none }); // Early out if attribute already in bucket const bool attributeAlreadyInBucket = (nextBucket.size() == prevBucket.size()); if (attributeAlreadyInBucket) return prevBucketImpl; const std::pair<BucketId, BucketImpl&> nextBucketIdAndImpl = findOrCreateBucket(nextBucket); const BucketId nextBucketId = nextBucketIdAndImpl.first; BucketImpl& nextBucketImpl = nextBucketIdAndImpl.second; const size_t nextBucketOriginalSize = nextBucketImpl.elemToPath.size(); if (nextBucketOriginalSize == 0) { // Move arrays etc. from original bucket nextBucketImpl = std::move(prevBucketImpl); nextBucketImpl.SetBucket(std::move(nextBucket)); // Below are codified assumptions about the side-effects of attempted move-assigning of a BucketImpl. // We assume that move-assigning prevBucketImpl like above will clear it as well. This is important because // prevBucketImpl may still reside as a valid bucket in the PathToAttributesMap::buckets map. // // These asserts live outside of the move-assignment operator definition because, technically, the compiler is // allowed to elect to use a copy-assignment if it needs to. // // TODO: Would this be better expressed as an explicit "clear" of prevBucketImpl? Why wasn't that the original // behavior? CARB_ASSERT(prevBucketImpl.scalarAttributeArrays.empty()); CARB_ASSERT(prevBucketImpl.arrayAttributeArrays.empty()); CARB_ASSERT(prevBucketImpl.elemToPath.empty()); CARB_ASSERT(prevBucketImpl.listenerIdToChanges.empty()); } else { // TODO : there should be a faster way to do this but more to discuss here later auto prevBucketMapIter = attrNameSetToBucketId.find(prevBucket); const BucketId prevBucketId = (prevBucketMapIter != attrNameSetToBucketId.end()) ? prevBucketMapIter->second : kInvalidBucketId; for (const auto path : prevBucketImpl.elemToPath) { moveElementBetweenBuckets(asInt(path), nextBucketId, prevBucketId, nextBucket); } } const size_t nextBucketNewSize = nextBucketImpl.elemToPath.size(); CARB_ASSERT(nextBucketNewSize >= nextBucketOriginalSize); CARB_ASSERT(getTypeInfo(type).size == typeinfo.size); CARB_ASSERT(getTypeInfo(type).isArray == typeinfo.isArray); CARB_ASSERT(getTypeInfo(type).arrayElemSize == typeinfo.arrayElemSize); // Add an array for the new attribute const AttrName name{ attrName, NameSuffix::none }; ArrayAttributeArray *arrayAttributeArray; MirroredArray* valuesArray; if (typeinfo.isArray) { const bool inserted = nextBucketImpl.arrayAttributeArrays.allocateEntry(std::move(name), &arrayAttributeArray); valuesArray = &arrayAttributeArray->values; if (inserted) { new (arrayAttributeArray) ArrayAttributeArray(nextBucketImpl.platform, type, typeinfo); while (valuesArray->count < nextBucketNewSize) { allocElement(*arrayAttributeArray); } } } else { const bool inserted = nextBucketImpl.scalarAttributeArrays.allocateEntry(std::move(name), &valuesArray); arrayAttributeArray = nullptr; if (inserted) { new (valuesArray) ScalarAttributeArray(nextBucketImpl.platform, type, typeinfo); while (valuesArray->count < nextBucketNewSize) { allocElement(*valuesArray); } } arrayAttributeArray = nullptr; } CARB_ASSERT(valuesArray); CARB_ASSERT(!typeinfo.isArray || arrayAttributeArray); CARB_ASSERT(!typeinfo.isArray || getTypeInfo(valuesArray->type).isArray); #if CARB_ASSERT_ENABLED const size_t elemCount = getElementCount(nextBucketImpl.GetBucket()); CARB_ASSERT(valuesArray->count == elemCount); CARB_ASSERT(!arrayAttributeArray || arrayAttributeArray->cpuElemCounts.count == elemCount); CARB_ASSERT(!arrayAttributeArray || arrayAttributeArray->elemCounts.count == elemCount); CARB_ASSERT(!arrayAttributeArray || arrayAttributeArray->gpuElemCounts.count == elemCount); CARB_ASSERT(!arrayAttributeArray || arrayAttributeArray->gpuPtrs.count == elemCount); #endif // #if CARB_ASSERT_ENABLED // fixup elem/path maps for (pxr::SdfPath& path : nextBucketImpl.elemToPath) { std::pair<BucketId, ArrayIndex>* bucketAndElemIndex; if (pathToBucketElem.find(asInt(path), &bucketAndElemIndex)) { bucketAndElemIndex->first = nextBucketId; } } // If default value specified, copy it to every element if (value) { fillAttributeInternal(nextBucketImpl, name, nextBucketOriginalSize, nextBucketNewSize, value, typeinfo, arrayElemCount, valuesArray, arrayAttributeArray); } return nextBucketImpl; } inline void PathToAttributesMap::fillAttributeInternal(BucketImpl& bucketImpl, const AttrName& name, const size_t startIndex, const size_t endIndex, const void* value, const Typeinfo& typeinfo, const size_t arrayElemCount, MirroredArray *const valuesArray, ArrayAttributeArray *const arrayAttributeArray) { CARB_ASSERT(valuesArray); CARB_ASSERT(value); CARB_ASSERT(!typeinfo.isArray || arrayAttributeArray); CARB_ASSERT(startIndex < valuesArray->count); CARB_ASSERT(endIndex <= valuesArray->count); if (typeinfo.isArray) { CARB_ASSERT(arrayAttributeArray); const ArrayOfArrayInfo aoa = getArrayOfArrayInfo(*arrayAttributeArray); // Fill array sizes { ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(arrayAttributeArray->elemCounts, name, aoa, bucketImpl, CpuWriteConfig()); CARB_ASSERT(startIndex < arrayAndDirtyIndices.array.elementCount); CARB_ASSERT(endIndex <= arrayAndDirtyIndices.array.elementCount); for (size_t i = startIndex; i < endIndex; ++i) { reinterpret_cast<size_t*>(arrayAndDirtyIndices.array.ptr)[i] = arrayElemCount; setArrayElementDirty(arrayAndDirtyIndices, i); } } // Fill array values { ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(*valuesArray, name, aoa, bucketImpl, CpuWriteConfig()); CARB_ASSERT(startIndex < arrayAndDirtyIndices.array.elementCount); CARB_ASSERT(endIndex <= arrayAndDirtyIndices.array.elementCount); for (size_t i = startIndex; i < endIndex; ++i) { uint8_t** dest = reinterpret_cast<uint8_t**>(arrayAndDirtyIndices.array.ptr) + arrayAndDirtyIndices.array.elementSize * i; CARB_ASSERT(*dest); memcpy(*dest, value, arrayElemCount * typeinfo.arrayElemSize); // assumes coherent and packed array value provided setArrayElementDirty(arrayAndDirtyIndices, i); } } } else { const ArrayOfArrayInfo aoa = ScalarArrayOfArrayInfo(); ArrayAndDirtyIndices arrayAndDirtyIndices = getArraySpanC(*valuesArray, name, aoa, bucketImpl, CpuWriteConfig()); CARB_ASSERT(startIndex < arrayAndDirtyIndices.array.elementCount); CARB_ASSERT(endIndex <= arrayAndDirtyIndices.array.elementCount); for (size_t i = startIndex; i < endIndex; ++i) { uint8_t* dest = arrayAndDirtyIndices.array.ptr + arrayAndDirtyIndices.array.elementSize * i; memcpy(dest, value, typeinfo.size); setArrayElementDirty(arrayAndDirtyIndices, i); } } } inline void PathToAttributesMap::addAttributeInternal(const PathC& path, const TokenC& attrNameC, const NameSuffix nameSuffix, const TypeC ctype, const void* value, const Typeinfo& typeinfo, const size_t arrayElemCount) { APILOGGER("addAttributeInternal", apiLogEnabled, path, attrNameC); BucketId bucketId; ArrayIndex elemIndex; std::tie(bucketId, elemIndex) = addAttributeGetBucketAndArrayIndex(path, attrNameC, nameSuffix, ctype); BucketImpl *const bucketImpl = buckets.find(bucketId); CARB_ASSERT(bucketImpl); ArrayAttributeArray *arrayAttributeArray; MirroredArray* valuesArray; const AttrName attrName{ attrNameC, nameSuffix }; if (typeinfo.isArray) { bucketImpl->arrayAttributeArrays.find(attrName, &arrayAttributeArray); CARB_ASSERT(arrayAttributeArray); valuesArray = &arrayAttributeArray->values; } else { bucketImpl->scalarAttributeArrays.find(attrName, &valuesArray); arrayAttributeArray = nullptr; } CARB_ASSERT(valuesArray); CARB_ASSERT(!typeinfo.isArray || arrayAttributeArray); if (value) { fillAttributeInternal(*bucketImpl, attrName, elemIndex, elemIndex + 1, value, typeinfo, arrayElemCount, valuesArray, arrayAttributeArray); } #if CARB_ASSERT_ENABLED const size_t elemCount = bucketImpl->elemToPath.size(); CARB_ASSERT(valuesArray->count == elemCount); CARB_ASSERT(!arrayAttributeArray || arrayAttributeArray->cpuElemCounts.count == elemCount); CARB_ASSERT(!arrayAttributeArray || arrayAttributeArray->elemCounts.count == elemCount); CARB_ASSERT(!arrayAttributeArray || arrayAttributeArray->gpuElemCounts.count == elemCount); CARB_ASSERT(!arrayAttributeArray || arrayAttributeArray->gpuPtrs.count == elemCount); #endif // #if CARB_ASSERT_ENABLED } inline PathToAttributesMap::PathToAttributesMap(const PlatformId& platformId) : platform(carb::getCachedInterface<carb::flatcache::IPlatform>()->getMutable(platformId)) , pathToBucketElem(0, std::hash<PathId>(), std::equal_to<PathId>(), AllocFunctor{ &platform.allocator }, FreeFunctor{ &platform.allocator }) , buckets(platform) , attrNameSetToBucketId() , listenerIdToChangeTrackerConfig(0, ListenerIdHasher(), std::equal_to<ListenerId>(), AllocFunctor{ &platform.allocator }, FreeFunctor{ &platform.allocator }) , typeToInfo(0, std::hash<TypeC>(), std::equal_to<TypeC>(), AllocFunctor{ &platform.allocator }, FreeFunctor{ &platform.allocator }) , usdStageId() , minimalPopulationDone(false) { // required types for arrays of arrays Typeinfo* typeinfo; typeToInfo.allocateEntry(PTAM_SIZE_TYPEC, &typeinfo); *typeinfo = Typeinfo{ sizeof(PTAM_SIZE_TYPE), false, 0 }; typeToInfo.allocateEntry(PTAM_POINTER_TYPEC, &typeinfo); *typeinfo = Typeinfo{ sizeof(PTAM_POINTER_TYPE), false, 0 }; } inline PathToAttributesMap& PathToAttributesMap::operator=(const flatcache::PathToAttributesMap& other) { carb::profiler::ZoneId zoneId = CARB_PROFILE_BEGIN(1, "Clear buckets"); buckets.clear(); CARB_PROFILE_END(1, zoneId); zoneId = CARB_PROFILE_BEGIN(1, "Copy pathToBucketElem"); pathToBucketElem.clear(); pathToBucketElem.reserve(other.pathToBucketElem.size()); other.pathToBucketElem.forEach([this](const PathId& key, const std::pair<BucketId, ArrayIndex> &otherValue) { std::pair<BucketId, ArrayIndex>* value; VALIDATE_TRUE(pathToBucketElem.allocateEntry(key, &value)); static_assert(std::is_copy_constructible<std::pair<BucketId, ArrayIndex>>::value, "Expected pathToBucketElem values to be copy-constructible"); new (value) std::pair<BucketId, ArrayIndex>(otherValue); }); CARB_PROFILE_END(1, zoneId); zoneId = CARB_PROFILE_BEGIN(1, "Copy scalar attributes"); buckets = other.buckets; CARB_PROFILE_END(1, zoneId); attrNameSetToBucketId = other.attrNameSetToBucketId; typeToInfo = other.typeToInfo; usdStageId = other.usdStageId; minimalPopulationDone = other.minimalPopulationDone; stageHierarchy = other.stageHierarchy; zoneId = CARB_PROFILE_BEGIN(1, "Copy array attributes"); { BucketId id{ 0 }; for (size_t i = 0; i < buckets.end(); ++i, ++id) { auto bucketPtr = buckets.find(id); if (bucketPtr) { const BucketImpl& srcBucketImpl = *(other.buckets.find(id)); BucketImpl& destBucketImpl = *bucketPtr; // Copy any array-valued attributes bucketImplCopyArrays(destBucketImpl, id, srcBucketImpl, id); } } } CARB_PROFILE_END(1, zoneId); return *this; } inline PathToAttributesMap::~PathToAttributesMap() { } } } #include <carb/flatcache/GetArrayGPU.h> // Enable the warnings we disabled when we included USD headers #if defined(__GNUC__) # pragma GCC diagnostic pop # ifdef OMNI_USD_SUPPRESS_DEPRECATION_WARNINGS # define __DEPRECATED # undef OMNI_USD_SUPPRESS_DEPRECATION_WARNINGS # endif #endif
omniverse-code/kit/fabric/include/carb/flatcache/HashMap.h
// Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <cstdint> #include <cstdlib> #include <carb/Defines.h> #include <carb/flatcache/Defines.h> #include <carb/flatcache/Intrinsics.h> namespace carb { namespace flatcache { struct HashMapDefaultAlloc { inline void* operator()(const size_t bytes) { return std::malloc(bytes); } }; struct HashMapDefaultFree { inline void operator()(void *const ptr) { std::free(ptr); } }; // A hashmap implemented with the following decisions: // // * Memory is allocated in a single contiguous buffer so that find operations make linear cache line fetches as much // as possible. This intends to make more easily predictable memory access patterns, and thus, easier hardware-level // prefetch decisions. Similarly, whole-map iteration benefits from the same cache-friendly access patterns. // // * Find operations intentionally are coded without range checks on the main loop. This is a tradeoff of speed for // less error-detection in release builds. To help mitigate this, debug builds do track probe counts to validate // we don't exceed the possible length of the hashmap. // // * No opinion codified on thread synchronization. It can be used safely if done carefully, but this is not a // guarantee of the implementation. // // * No iterators provided. If some batch operation must occur, use the forEach() function provided, which should // suffice. The forEach() method should provide similar performance without the added risk of callers being able to // arbitrarily cache iterators outside the control of the HashMap, its routines, or even its lifetime. // // * Deletes copy-constructor and copy-assignment for non-standard-layout mappings. This forces callers to implement // these routines explicitly, favoring the clarity of reading intent in explicit implementation over ambiguity over // compiler selection. Mappings that have standard-layout default to use a memcpy to copy data as fast as possible. // // * Implements allocateEntry() method, rather than insert()/emplace() methods mimicing std::unordered_map API. This // does the minimum steps necessary to reserve address space for a key-value mapping, and provides the caller with // the reserved buffer memory address for them to do their own construction, assignment, or initialization routines. // This favors slightly more explicit coding patterns at the caller to force clarity of intent. In particular, // it make more obvious the choice of the caller between construction vs assignment, and copy vs move semantics. It // also offers greater flexibility without sacrificing performance. // // * ~HashMap() and clear() operate different depending on if KeyT and ValueT are known to be // std::is_trivially_destructible. If they are, the fastest option is chosen: to deallocate the memory without // iteration or explicitly destruction per-entry. Otherwise, the implementation iterates to non-trivially destruct // each object in-place. template<typename KeyT, typename ValueT, typename HashT = std::hash<KeyT>, typename KeyEqualsT = std::equal_to<KeyT>, typename AllocT = HashMapDefaultAlloc, typename FreeT = HashMapDefaultFree> struct HashMap { // I didn't experiment with this exhaustively, could be tuned better, probably static constexpr size_t LOAD_FACTOR_NUMERATOR = 3ull; static constexpr size_t LOAD_FACTOR_DENOMENATOR = 4ull; static constexpr size_t MIN_INIT_CAPACITY = 4ull; static_assert((MIN_INIT_CAPACITY & (MIN_INIT_CAPACITY - 1ull)) == 0, "MIN_INIT_CAPACITY must be a power of two!"); static constexpr bool KEY_IS_TRIVIALLY_DESTRUCTIBLE = std::is_trivially_destructible<KeyT>::value; static constexpr bool VALUE_IS_TRIVIALLY_DESTRUCTIBLE = std::is_trivially_destructible<ValueT>::value; enum EntryState : uint8_t { HASH_MAP_ENTRY_STATE_FREE, HASH_MAP_ENTRY_STATE_OCCUPIED, HASH_MAP_ENTRY_STATE_DELETED, }; struct EntryT { EntryState state; KeyT key; ValueT value; }; static constexpr size_t allocationSize( const size_t capacity ); static constexpr size_t loadThreshold( const size_t capacity ); static constexpr size_t inverseLoadThreshold( const size_t capacity ); static constexpr size_t capacityAdjustedForLoadThreshold( const size_t capacity ); HashMap( const size_t capacity = 0, const HashT &hasher = HashT(), const KeyEqualsT &keyEquals = KeyEqualsT(), const AllocT &alloc_ = AllocT(), const FreeT &free_ = FreeT() ); ~HashMap(); HashMap(const HashMap& other); HashMap& operator=(const HashMap& other); HashMap(HashMap&& other) noexcept; HashMap& operator=(HashMap&& other) noexcept; inline friend void swap(HashMap& a, HashMap& b) noexcept { using std::swap; swap(a.m_hasher, b.m_hasher); swap(a.m_keyEquals, b.m_keyEquals); swap(a.m_alloc, b.m_alloc); swap(a.m_free, b.m_free); swap(a.m_size, b.m_size); swap(a.m_capacity, b.m_capacity); swap(a.m_loadThreshold, b.m_loadThreshold); swap(a.m_mask, b.m_mask); swap(a.m_entries, b.m_entries); } void clear(); const void* data() const; bool empty() const; size_t size() const; size_t capacty() const; void reserve(const size_t capacity); bool find( const KeyT& key, ValueT** outValue ); bool find( const KeyT& key, const ValueT** outValue ) const; bool exists( const KeyT& key ) const; bool allocateEntry( KeyT&& key, ValueT** outValue ); bool allocateEntry( const KeyT& key, ValueT** outValue ); // Intended to be safe to call during forEach() as it does not invalidate iteration. bool freeEntry( const KeyT& key ); void freeEntryByKeyAddress( const KeyT *const key ); void freeEntryByValueAddress( const ValueT *const value ); template<typename CallbackT> inline void forEach( CallbackT callback ); template<typename CallbackT> inline void forEach( CallbackT callback ) const; size_t totalCollisionLength() const; private: size_t hashInternal( const KeyT& key ) const; void resizeIfNecessary(); void resize( const size_t nextCapacity ); void freeEntryInternal( EntryT *const entry ); bool findFirstAvailable( const KeyT& key, EntryT** outEntry ); bool findExisting( const KeyT& key, EntryT** outEntry ); bool findExisting( const KeyT& key, const EntryT** outEntry ) const; HashT m_hasher; KeyEqualsT m_keyEquals; AllocT m_alloc; FreeT m_free; size_t m_size; size_t m_capacity; size_t m_loadThreshold; size_t m_mask; EntryT* m_entries; }; template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT> inline constexpr size_t HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::allocationSize( const size_t capacity ) { return capacity * sizeof( EntryT ); } template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT> inline constexpr size_t HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::loadThreshold( const size_t capacity ) { return (capacity * LOAD_FACTOR_NUMERATOR / LOAD_FACTOR_DENOMENATOR); } template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT> inline constexpr size_t HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::inverseLoadThreshold( const size_t capacity ) { return (capacity * LOAD_FACTOR_DENOMENATOR / LOAD_FACTOR_NUMERATOR); } template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT> inline constexpr size_t HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::capacityAdjustedForLoadThreshold( const size_t capacity ) { // reserves capacity to the nearest power of two that satisfies the load threshhold for the requested capacity size_t adjustedCapacity; if (capacity && capacity >= loadThreshold(MIN_INIT_CAPACITY)) { // +1 because we want capacity < loadThreshold(adjustedCapacity), not capacity <= loadThreshold(adjustedCapacity) adjustedCapacity = 1ull << ( 64u - clz64( inverseLoadThreshold( capacity + 1 ) - 1ull ) ); } else { adjustedCapacity = MIN_INIT_CAPACITY; } CARB_ASSERT(capacity < loadThreshold(adjustedCapacity)); CARB_ASSERT((adjustedCapacity & (adjustedCapacity - 1ull)) == 0); return adjustedCapacity; } template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT> inline HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::HashMap(const size_t capacity, const HashT &hasher, const KeyEqualsT &keyEquals, const AllocT &alloc_, const FreeT &free_) { m_hasher = hasher; m_keyEquals = keyEquals; m_alloc = alloc_; m_free = free_; m_size = 0; if (capacity) { const size_t adjustedCapacity = capacityAdjustedForLoadThreshold(capacity); const size_t bufSize = allocationSize(adjustedCapacity); m_capacity = adjustedCapacity; m_loadThreshold = loadThreshold(adjustedCapacity); m_mask = adjustedCapacity - 1ull; m_entries = (EntryT*)m_alloc(bufSize); memset(m_entries, 0, bufSize); } else { m_capacity = 0; m_loadThreshold = 0; m_mask = 0; m_entries = nullptr; } } template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT> inline HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::~HashMap() { if ( m_entries ) { if (!KEY_IS_TRIVIALLY_DESTRUCTIBLE || !VALUE_IS_TRIVIALLY_DESTRUCTIBLE) { size_t index = 0; size_t visited = 0; for ( ; index < m_capacity && visited < m_size; ++index) { EntryT *const entry = &m_entries[index]; if ( entry->state == HASH_MAP_ENTRY_STATE_OCCUPIED ) { if (!KEY_IS_TRIVIALLY_DESTRUCTIBLE) { entry->key.~KeyT(); } if (!VALUE_IS_TRIVIALLY_DESTRUCTIBLE) { entry->value.~ValueT(); } CARB_ASSERT(visited < m_size); ++visited; } } } m_free(m_entries); m_entries = nullptr; } } template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT> inline HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::HashMap(const HashMap& other) : m_hasher(other.m_hasher) , m_keyEquals(other.m_keyEquals) , m_alloc(other.m_alloc) , m_free(other.m_free) , m_size(other.m_size) , m_capacity(other.m_capacity) , m_loadThreshold(other.m_loadThreshold) , m_mask(other.m_mask) { static_assert(std::is_trivially_copyable<EntryT>::value, "Copying of HashMap is only supported for key-value mappings that are use standard-layout classes."); const size_t bufSize = allocationSize(m_capacity); m_entries = (EntryT*)m_alloc(bufSize); memcpy(m_entries, other.m_entries, bufSize); CARB_ASSERT(m_entries); CARB_ASSERT(m_capacity); CARB_ASSERT((m_capacity & (m_capacity - 1ull)) == 0); // assert m_capacity is power of two CARB_ASSERT(m_size < m_capacity); CARB_ASSERT(m_size < m_loadThreshold); } template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT> inline HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>& HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::operator=(const HashMap& other) { HashMap tmp(other); swap(*this, tmp); return *this; } template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT> inline HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::HashMap(HashMap&& other) noexcept : m_hasher(std::move(other.m_hasher)) , m_keyEquals(std::move(other.m_keyEquals)) , m_alloc(std::move(other.m_alloc)) , m_free(std::move(other.m_free)) , m_size(std::move(other.m_size)) , m_capacity(std::move(other.m_capacity)) , m_loadThreshold(std::move(other.m_loadThreshold)) , m_mask(std::move(other.m_mask)) , m_entries(std::move(other.m_entries)) { other.m_entries = nullptr; other.clear(); } template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT> inline HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>& HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::operator=(HashMap&& other) noexcept { HashMap tmp(std::move(other)); swap(*this, tmp); return *this; } template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT> inline void HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::clear() { if ( m_entries ) { if (!KEY_IS_TRIVIALLY_DESTRUCTIBLE || !VALUE_IS_TRIVIALLY_DESTRUCTIBLE) { size_t index = 0; size_t visited = 0; for ( ; index < m_capacity && visited < m_size; ++index) { EntryT *const entry = &m_entries[index]; if ( entry->state == HASH_MAP_ENTRY_STATE_OCCUPIED ) { if (!KEY_IS_TRIVIALLY_DESTRUCTIBLE) { entry->key.~KeyT(); } if (!VALUE_IS_TRIVIALLY_DESTRUCTIBLE) { entry->value.~ValueT(); } CARB_ASSERT(visited < m_size); ++visited; } entry->state = HASH_MAP_ENTRY_STATE_FREE; } } else { static_assert(HASH_MAP_ENTRY_STATE_FREE == 0, "memset(0) requires HASH_MAP_ENTRY_STATE_FREE == 0"); memset(m_entries, 0, allocationSize(m_capacity)); } } m_size = 0; } template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT> inline const void* HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::data() const { return m_entries; } template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT> inline bool HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::empty() const { return m_size == 0; } template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT> inline size_t HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::size() const { return m_size; } template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT> inline size_t HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::capacty() const { return m_capacity; } template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT> inline void HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::reserve(const size_t capacity) { const size_t adjustedCapacity = capacityAdjustedForLoadThreshold(capacity); if (m_capacity < adjustedCapacity) { resize(adjustedCapacity); } } template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT> inline bool HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::find( const KeyT& key, ValueT** outValue ) { EntryT* existing; if (findExisting( key, &existing) ) { *outValue = &existing->value; return true; } return false; } template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT> inline bool HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::find( const KeyT& key, const ValueT** outValue ) const { const EntryT* existing; if (findExisting( key, &existing) ) { *outValue = &existing->value; return true; } return false; } template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT> inline bool HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::exists( const KeyT& key ) const { const EntryT* existing; if (findExisting( key, &existing) ) { return true; } return false; } template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT> inline bool HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::allocateEntry( KeyT&& key, ValueT** outValue ) { EntryT* availableEntry; resizeIfNecessary(); const bool available = findFirstAvailable(key, &availableEntry); CARB_ASSERT(availableEntry); if (available) { new (&availableEntry->key) KeyT(std::move(key)); CARB_ASSERT(availableEntry->state != HASH_MAP_ENTRY_STATE_OCCUPIED); availableEntry->state = HASH_MAP_ENTRY_STATE_OCCUPIED; *outValue = &availableEntry->value; CARB_ASSERT(m_size < m_capacity); CARB_ASSERT(m_size + 1 > m_size); ++m_size; return true; } *outValue = &availableEntry->value; return false; } template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT> inline bool HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::allocateEntry( const KeyT& key, ValueT** outValue ) { EntryT* availableEntry; resizeIfNecessary(); const bool available = findFirstAvailable(key, &availableEntry); CARB_ASSERT(availableEntry); if (available) { new (&availableEntry->key) KeyT(key); CARB_ASSERT(availableEntry->state != HASH_MAP_ENTRY_STATE_OCCUPIED); availableEntry->state = HASH_MAP_ENTRY_STATE_OCCUPIED; *outValue = &availableEntry->value; CARB_ASSERT(m_size < m_capacity); CARB_ASSERT(m_size + 1 > m_size); ++m_size; return true; } *outValue = &availableEntry->value; return false; } template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT> inline bool HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::freeEntry( const KeyT& key ) { EntryT* existing; if (findExisting(key, &existing)) { freeEntryInternal(existing); return true; } else { return false; } } template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT> inline void HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::freeEntryByKeyAddress( const KeyT *const key ) { static_assert(!std::is_polymorphic<EntryT>::value, "Unable to freeEntry by key address!"); constexpr size_t OFFSET = offsetof(EntryT, key); EntryT *const entry = (EntryT*)(((uintptr_t)key) - OFFSET); freeEntryInternal(entry); } template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT> inline void HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::freeEntryByValueAddress(const ValueT *const value) { static_assert(!std::is_polymorphic<EntryT>::value, "Unable to freeEntry by value address!"); constexpr size_t OFFSET = offsetof(EntryT, value); EntryT *const entry = (EntryT*)(((uintptr_t)value) - OFFSET); freeEntryInternal(entry); } template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT> inline size_t HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::hashInternal( const KeyT& key ) const { size_t hash = m_hasher(key); #define HASHMAP_DEFENSIVE_SALT IN_USE #if USING( HASHMAP_DEFENSIVE_SALT ) // Apply a defensive salt to the user-calculated hash value. It is unsafe to assume user-provided hashes are good. // // Kit historically had a problem where std::hash<PathC> caused terrible distributions inside of space-restricted // hashmaps. This was primarly because the hash values returned had zero entropy in the lower 8 bits. The higher // bits had excellent entropy, though. It is trivial to improve std::hash<PathC> by doing (oldHashValue >> 8). // In other words, tossing the bits with zero entropy. This will produce perfectly unique hash value output for // every PathC input. However, using this directly in a hash map is still not ideal because, while the hash function // has a guarantee on uniqueness, it does not necessarily lend to good distributions in a hash table. Two hash // values that are multiples of each other will naturally colliide in any space-restricted hashmap. // (Which, realistically, is all real hash maps since hardware memory is not infinite.) Applying a little salt on // top of the hash value fixes this distribution problem. // // This also provides general safety against poorly implemented user-provided hash functions that don't generate // unique or well distributed values. // // Known problematic data sets: // - PathC (interned SdfPaths) // - TokenC (interned TfTokens) // // Salt techniques tried: // - H3_XX64 (xxhash): // - good distribution // - too slow // - H3_XX64 (xxhash) with custom seeds: // - no seed performed better than the xxhash default secret // - Custom H3_XX64 implementation specialized for aligned 64-bit keys: // - methematically identical distribution to H3_XX64 // - 2x faster performance than official implementation // - Multiply by a prime // - best distribution so far // - best speed so far (3x faster than custom H3_XX64) // // TODO: A fun intern experiment would be to investigate our various omniverse hash functions for distribution and // speed. And also investigate alternative defensive salting techniques. return hash * 48271ull; #else // #if USING( HASHMAP_DEFENSIVE_SALT ) return hash; #endif // #if USING( HASHMAP_DEFENSIVE_SALT ) #undef HASHMAP_DEFENSIVE_SALT } template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT> inline void HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::resizeIfNecessary() { if (m_size >= m_loadThreshold) { CARB_ASSERT(!m_capacity || m_capacity * 2 > m_capacity); resize(m_capacity ? m_capacity * 2 : MIN_INIT_CAPACITY); } else if (!m_entries) { const size_t bufSize = allocationSize(m_capacity); m_entries = (EntryT*)m_alloc(bufSize); memset(m_entries, 0, bufSize); } CARB_ASSERT(m_entries); CARB_ASSERT(m_capacity); CARB_ASSERT((m_capacity & (m_capacity - 1)) == 0); CARB_ASSERT(m_size < m_capacity); CARB_ASSERT(m_size < m_loadThreshold); } template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT> inline void HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::resize(const size_t nextCapacity) { CARB_ASSERT(m_size < loadThreshold(nextCapacity)); CARB_ASSERT((nextCapacity & (nextCapacity - 1)) == 0); HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT> tmp(nextCapacity, m_hasher, m_keyEquals, m_alloc, m_free ); size_t index = 0; size_t visited = 0; for ( ; index < m_capacity && visited < m_size; ++index) { EntryT *const entry = &m_entries[index]; if ( entry->state == HASH_MAP_ENTRY_STATE_OCCUPIED ) { ValueT *tmpV; tmp.allocateEntry(std::move(entry->key), &tmpV); new (tmpV) ValueT(std::move(entry->value)); CARB_ASSERT(visited < m_size); ++visited; } } CARB_ASSERT(m_size == tmp.m_size); using std::swap; swap(m_entries, tmp.m_entries); swap(m_size, tmp.m_size); swap(m_capacity, tmp.m_capacity); swap(m_loadThreshold, tmp.m_loadThreshold); swap(m_mask, tmp.m_mask); } template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT> inline void HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::freeEntryInternal( EntryT *const entry ) { CARB_ASSERT(entry); CARB_ASSERT(entry->state == HASH_MAP_ENTRY_STATE_OCCUPIED); if (!KEY_IS_TRIVIALLY_DESTRUCTIBLE) { entry->key.~KeyT(); } if (!VALUE_IS_TRIVIALLY_DESTRUCTIBLE) { entry->value.~ValueT(); } entry->state = HASH_MAP_ENTRY_STATE_DELETED; CARB_ASSERT(m_size); CARB_ASSERT(m_size - 1 < m_size); --m_size; } template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT> template<typename CallbackT> inline void HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::forEach( CallbackT callback ) { size_t index = 0; size_t visited = 0; const size_t size_captured = m_size; for ( ; index < m_capacity && visited < size_captured; ++index) { if (m_entries[index].state == HASH_MAP_ENTRY_STATE_OCCUPIED) { callback(m_entries[index].key, m_entries[index].value); CARB_ASSERT(visited < size_captured); ++visited; } } } template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT> template<typename CallbackT> inline void HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::forEach(CallbackT callback) const { size_t index = 0; size_t visited = 0; const size_t size_captured = m_size; for (; index < m_capacity && visited < size_captured; ++index) { if (m_entries[index].state == HASH_MAP_ENTRY_STATE_OCCUPIED) { callback(m_entries[index].key, m_entries[index].value); CARB_ASSERT(visited < size_captured); ++visited; } } } template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT> inline size_t HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::totalCollisionLength() const { size_t len = 0; if ( m_entries ) { size_t index = 0; size_t visited = 0; const size_t size_captured = m_size; for (; index < m_capacity && visited < size_captured; ++index) { const EntryT *const probe = &m_entries[index]; if (probe->state == HASH_MAP_ENTRY_STATE_OCCUPIED) { const EntryT *const natural = &m_entries[hashInternal(probe->key) & m_mask]; len += (size_t)((natural <= probe) ? (probe - natural) : ( ( probe + m_capacity ) - natural) ); CARB_ASSERT(visited < size_captured); ++visited; } } } return len; } template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT> inline bool HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::findFirstAvailable( const KeyT& key, EntryT** outEntry ) { EntryT* probe; size_t probeIdx; // This will technically resize opportunistically if the key might already exist, but at least // that edge case will only occur once per resize, and being opportunistic avoids searching first. resizeIfNecessary(); #if USING( ASSERTS ) size_t probes = 0; #endif // #if USING( ASSERTS ) probeIdx = hashInternal(key) & m_mask; CARB_ASSERT(m_size < m_capacity); // otherwise we infinite loop while(1) { CARB_ASSERT( probeIdx < m_capacity ); probe = &m_entries[probeIdx]; if ( probe->state == HASH_MAP_ENTRY_STATE_FREE ) { *outEntry = probe; return true; } else if ( probe->state == HASH_MAP_ENTRY_STATE_OCCUPIED ) { if ( m_keyEquals(probe->key, key) ) { *outEntry = probe; return false; } } else if ( probe->state == HASH_MAP_ENTRY_STATE_DELETED ) { *outEntry = probe; return true; } probeIdx = ( probeIdx + 1 ) & m_mask; #if USING( ASSERTS ) ++probes; CARB_ASSERT(probes < m_capacity); #endif // #if USING( ASSERTS ) } CARB_ASSERT(false && "unreachable code"); return false; } template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT> inline bool HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::findExisting( const KeyT& key, EntryT** outEntry ) { if (!m_size) { return false; } EntryT* probe; size_t probeIdx; #if USING( ASSERTS ) size_t probes = 0; #endif // #if USING( ASSERTS ) probeIdx = hashInternal(key) & m_mask; CARB_ASSERT(m_size < m_capacity); // otherwise we infinite loop while(1) { CARB_ASSERT( probeIdx < m_capacity ); probe = &m_entries[probeIdx]; if ( probe->state == HASH_MAP_ENTRY_STATE_FREE ) { return false; } else if ( probe->state == HASH_MAP_ENTRY_STATE_OCCUPIED ) { if ( m_keyEquals(probe->key, key) ) { *outEntry = probe; return true; } } else { // skip CARB_ASSERT( probe->state == HASH_MAP_ENTRY_STATE_DELETED ); } probeIdx = ( probeIdx + 1 ) & m_mask; #if USING( ASSERTS ) ++probes; CARB_ASSERT(probes < m_capacity); #endif // #if USING( ASSERTS ) } CARB_ASSERT(false && "unreachable code"); return false; } template<typename KeyT, typename ValueT, typename HashT, typename KeyEqualsT, typename AllocT, typename FreeT> inline bool HashMap<KeyT, ValueT, HashT, KeyEqualsT, AllocT, FreeT>::findExisting( const KeyT& key, const EntryT** outEntry ) const { return const_cast<HashMap*>(this)->findExisting( key, const_cast< EntryT** >(outEntry) ); } } // namespace flatcache } // namespace carb
omniverse-code/kit/fabric/include/carb/flatcache/Enums.h
// Copyright (c) 2021-2021 NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once namespace carb { namespace flatcache { /** * @enum PtrToPtrKind * * @details When getting an array-valued attribute for GPU access, you can * optionally use this enum to ask for a GPU pointer to the GPU data * pointer (eGpuPtrToGpuPtr), or a CPU pointer to the GPU data * pointer (eCpuPtrToGpuPtr). * The advantage of using eCpuPtrToGpuPtr is that you can dereference * the returned pointer on the CPU, and pass the GPU data pointer as * a CUDA kernel parameter. * The advantage of using eGpuPtrToGpuPtr is that it makes it easier * to extend kernels to operate on arrays of arrays later. Also it * allows us to support allocation and resizing of array-valued * attributes on the GPU in the future. * * PtrToPtrKind is not a parameter of methods returning arrays of * arrays, for example getArrayGPU(). This is because there is no way * to pass a variable length array of GPU pointers to a kernel using * its CPU launch parameters. So GPU arrays of arrays always have to * be passed to kernels as a GPU pointer to an array of GPU pointers. */ enum class PtrToPtrKind { eNotApplicable = 0, eGpuPtrToGpuPtr = 0, // eGpuPtrToGpuPtr == eNotApplicable for backward compatibility eCpuPtrToGpuPtr = 1 }; } // namespace flatcache } // namespace carb
omniverse-code/kit/fabric/include/carb/flatcache/IdTypes.h
// Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <cstdint> #include <stddef.h> namespace carb { namespace flatcache { struct StageInProgressId { uint64_t id; }; struct StageAtTimeIntervalId { uint64_t id; }; struct StageWithHistoryId { uint64_t id; }; struct PrimBucketListId { uint64_t id; }; struct ListenerId { size_t id; bool operator==(ListenerId other) const { return id == other.id; } }; struct ListenerIdHasher { size_t operator()(const ListenerId& key) const { return key.id; } }; enum class PlatformId : uint8_t { Global = 0, // add additional platforms here Count, }; } }
omniverse-code/kit/fabric/include/carb/flatcache/OGUtilsNotForGeneralUser.h
// Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <carb/Interface.h> namespace carb { namespace flatcache { struct OGUtilsNotForGeneralUser { CARB_PLUGIN_INTERFACE("carb::flatcache::OGUtilsNotForGeneralUser", 0, 2); ////////////////////////////////////////////////////////////////////////// // A simple collection of utility functions for interacting with flatcache ////////////////////////////////////////////////////////////////////////// /** @brief Import attributes from a usd prim, in provided cache, at a given path, * read at a given time * * @cache[in/out] The cache to be populated * @dstPath[in] The path location in the cache to import attributes to * @prim[in] The prim from which to read the attributes in USD * @time[in] The time at which to read the attributes from * @filter[in] A subset of attributes to consider during this import process. * Will import all attributes if left empty * @force[in] Whether to overwrite values, or just add missing ones */ void(CARB_ABI* importPrimAttributesToCacheAtPathAtTime)(struct PathToAttributesMap& cache, const pxr::SdfPath& dstPath, const pxr::UsdPrim& prim, const pxr::UsdTimeCode& time, const std::set<TokenC>& filter, bool force); /** @brief Copy a subset of data from the cache back to USD * * @cache[in/out] The cache to read the data from * @buckets[in] A collection of bucket subsets, from which the data needs to be copied back to USD */ void (CARB_ABI* pushDataToUSD)(struct PathToAttributesMap& cache, struct BucketSubset const& bucket, bool skipMeshPoints); // Prefetch the whole USD stage to the cache // Typically you only call this at stage load time, because the USD notify // handler updates the cache if the stage changes. void(CARB_ABI* usdToCache)(PathToAttributesMap& cache, bool processConnections); }; } }
omniverse-code/kit/fabric/include/omni/gpucompute/D3dContext.h
// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <carb/graphics/Graphics.h> using namespace carb::graphics; namespace omni { namespace gpucompute { // TODO: move out of public API struct ContextD3D { std::unique_ptr<Device, GfxResult (*)(Device*)> device; CommandQueue* commandQueue; // Not unique_ptr because we don't own it std::unique_ptr<CommandAllocator, void (*)(CommandAllocator*)> commandAllocator; std::unique_ptr<CommandList, void (*)(CommandList*)> commandList; std::unique_ptr<Fence, void (*)(Fence*)> fence; ContextD3D(DeviceDesc deviceDesc, carb::graphics::Graphics* graphics) : device(graphics->createDevice(deviceDesc), graphics->destroyDevice), commandQueue(graphics->getCommandQueue(device.get(), CommandQueueType::eRender, 0)), commandAllocator(graphics->createCommandAllocator(commandQueue), graphics->destroyCommandAllocator), commandList(graphics->createCommandList(commandAllocator.get()), graphics->destroyCommandList), fence(graphics->createFence(device.get(), carb::graphics::FenceDesc { kFenceCreateFlagNone, "GPU compute context fence" }), graphics->destroyFence) { } }; } }
omniverse-code/kit/fabric/include/omni/gpucompute/GpuCompute.h
// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <carb/Types.h> #include <map> #include <memory> #include <vector> using namespace carb; namespace carb { namespace graphics { struct Context; struct Graphics; struct Device; struct CommandList; struct CommandQueue; struct Fence; struct Shader; } } namespace omni { namespace gpucompute { enum class MemcpyKind { hostToHost = 0, hostToDevice = 1, deviceToHost = 2, deviceToDevice = 3 }; struct Context; struct Shader; struct Parameter { const char* name; bool isConstantBuffer; }; // API agnostic representation of // static_cast<char*>(buffer) + byteOffset // which points to data of size elemSize struct GpuPointer { void* buffer; size_t byteOffset; size_t elemSize; }; struct CpuBuffer { void* data; size_t count; }; struct Args { // TODO: move CPU backend into its own plugin // GPU std::vector<GpuPointer> gpuBuffers; std::vector<size_t> gpuArgToBufferCount; // CPU std::vector<CpuBuffer> cpuArgs; }; // A CUDA-style interface for D3D, consisting of GPU malloc, free, memcpy and // kernel dispatch struct GpuCompute { CARB_PLUGIN_INTERFACE("omni::gpucompute::GpuCompute", 0, 1) // CPU memory allocation void(CARB_ABI* hostAlloc)(Context& context, void** ptr, size_t byteCount); void(CARB_ABI* freeHost)(Context& context, void* ptr); // GPU memory allocation void*(CARB_ABI* malloc)(Context& context, size_t byteCount, size_t elemSize); void(CARB_ABI* free)(Context& context, void* ptr); // GPU async memory allocation (uses stream 0 for CUDA) void*(CARB_ABI* mallocAsync)(Context& context, size_t byteCount, size_t elemSize); void(CARB_ABI* freeAsync)(Context& context, void* ptr); void(CARB_ABI* memcpy)(Context& context, void* dst, const void* src, size_t byteCount, MemcpyKind kind); void(CARB_ABI* dispatch)(Context& context, Shader& shader, Args& args, carb::Uint3 gridDim); Context&(CARB_ABI* createContext)(); Context&(CARB_ABI* createContextD3dVk)(carb::graphics::Graphics* graphics, carb::graphics::Device* device, carb::graphics::CommandList* commandList, carb::graphics::CommandQueue* commandQueue, carb::graphics::Fence* fence); void(CARB_ABI* destroyContext)(Context& context); uint32_t(CARB_ABI* peekAtLastError)(Context& context); uint32_t(CARB_ABI* getLastError)(Context& context); void(CARB_ABI* memcpyAsync)(Context& context, void* dst, const void* src, size_t byteCount, MemcpyKind kind); }; enum class Target { CPU, GPU }; struct ComputeCompiler { CARB_PLUGIN_INTERFACE("omni::gpucompute::ComputeCompiler", 0, 1) omni::gpucompute::Shader*(CARB_ABI* compile)(carb::graphics::Device* device, Target target, const char* codeString); void(CARB_ABI* getParameters)(Parameter*& parameters, size_t& count, Shader& shader); void(CARB_ABI* destroyShader)(Shader& shader); }; } }
omniverse-code/kit/include/carb/IObject.h
// Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file //! //! @brief Implementation of Carbonite objects. #pragma once #include "Interface.h" #include <cstdint> namespace carb { /** * Reference-counted object base. */ class IObject { public: CARB_PLUGIN_INTERFACE("carb::IObject", 1, 0) /** * Destructor. */ virtual ~IObject() = default; /** * Atomically add one to the reference count. * @returns The current reference count after one was added, though this value may change before read if other * threads are also modifying the reference count. The return value is guaranteed to be non-zero. */ virtual size_t addRef() = 0; /** * Atomically subtracts one from the reference count. If the result is zero, carb::deleteHandler() is called for * `this`. * @returns The current reference count after one was subtracted. If zero is returned, carb::deleteHandler() was * called for `this`. */ virtual size_t release() = 0; }; /** * Smart pointer type for ref counting `IObject`. It automatically controls reference count for the underlying * `IObject` pointer. */ template <class T> class ObjectPtr { public: //////////// Ctors/dtor //////////// /** * Policy directing how the smart pointer is initialized from from raw pointer. */ enum class InitPolicy { eBorrow, ///< Increases reference count. eSteal ///< Assign the pointer without increasing the reference count. }; /** * Default Constructor */ ObjectPtr() : m_object(nullptr) { } /** * Nullptr Constructor */ ObjectPtr(std::nullptr_t) : m_object(nullptr) { } /** * Constructor * @param object The raw pointer to an object. If not `nullptr`, it will be "borrowed"; that is, the reference count * will be increased as long as `*this` contains it. */ explicit ObjectPtr(T* object) : m_object(object) { if (m_object) { m_object->addRef(); } } /** * Constructor. * @param object The raw pointer to an object. * @param policy Directive on whether the reference count should be increased or not. */ ObjectPtr(T* object, InitPolicy policy) : m_object(object) { if (policy == InitPolicy::eBorrow && m_object != nullptr) { m_object->addRef(); } } /** * Copy constructor. Always increases the reference count. * @param other The smart pointer from which to copy a reference. */ ObjectPtr(const ObjectPtr<T>& other) : ObjectPtr(other.m_object, InitPolicy::eBorrow) { } /// @copydoc ObjectPtr(const ObjectPtr<T>& other) template <class U> ObjectPtr(const ObjectPtr<U>& other) : ObjectPtr(other.m_object, InitPolicy::eBorrow) { } /** * Move constructor. Steals the reference count from @p other and leaves it empty. * @param other The smart pointer from which to steal a reference. */ ObjectPtr(ObjectPtr<T>&& other) : m_object(other.m_object) { other.m_object = nullptr; } /// @copydoc ObjectPtr(ObjectPtr<T>&& other) template <class U> ObjectPtr(ObjectPtr<U>&& other) : m_object(other.m_object) { other.m_object = nullptr; } /** * Destructor. */ ~ObjectPtr() { _release(); } //////////// Helpers //////////// //////////// Ptr //////////// /** * Converts the smart pointer to a raw pointer. * @returns The raw pointer referenced by the smart pointer. May be `nullptr`. */ T* get() const { return m_object; } /** * Pointer dereference operator. * @returns The raw pointer referenced by the smart pointer. */ T* operator->() const { CARB_ASSERT(m_object); return m_object; } /** * Dereference operator. * @returns A reference to the pointed-at object. */ T& operator*() const { CARB_ASSERT(m_object); return *m_object; } /** * Boolean conversion operator. * @returns `true` if the smart pointer is not empty; `false` if the smart pointer is empty. */ explicit operator bool() const { return get() != nullptr; } //////////// Explicit access //////////// /** * Returns the address of the internal reference. * @returns The address of the internal reference. */ T* const* getAddressOf() const { return &m_object; } /// @copydoc getAddressOf() const T** getAddressOf() { return &m_object; } /** * Helper function to release any current reference and return the address of the internal reference pointer. * @returns The address of the internal reference. */ T** releaseAndGetAddressOf() { _release(); return &m_object; } /** * Resets this smart pointer to `nullptr` and returns the previously reference object @a without releasing the held * reference. * @returns The previously referenced object. */ T* detach() { T* temp = m_object; m_object = nullptr; return temp; } /** * Releases the reference on any held object and instead @a steals the given object. * @param other The object to steal a reference to. */ void attach(T* other) { _release(); m_object = other; } //////////// Assignment operator //////////// /** * Assignment to @a nullptr. Releases any previously held reference. * @returns @a *this */ ObjectPtr& operator=(decltype(nullptr)) { _release(); return *this; } /** * Releases any previously held reference and copies a reference to @p other. * @param other The object to reference. * @returns @a *this */ ObjectPtr& operator=(T* other) { ObjectPtr(other).swap(*this); return *this; } /// @copydoc operator= template <typename U> ObjectPtr& operator=(U* other) { ObjectPtr(other).swap(*this); return *this; } /// @copydoc operator= ObjectPtr& operator=(const ObjectPtr& other) { ObjectPtr(other).swap(*this); return *this; } /// @copydoc operator= template <class U> ObjectPtr& operator=(const ObjectPtr<U>& other) { ObjectPtr(other).swap(*this); return *this; } /** * Releases any previously held reference and steals the reference from @p other. * @param other The reference to steal. Will be swapped with @a *this. * @returns @a *this */ ObjectPtr& operator=(ObjectPtr&& other) { other.swap(*this); return *this; } /// @copydoc operator=(ObjectPtr&& other) template <class U> ObjectPtr& operator=(ObjectPtr<U>&& other) { ObjectPtr(std::move(other)).swap(*this); return *this; } /** * Compares equality of this object and another one of the same type. * * @param[in] other The other object to compare this one to. * @returns `true` if the two objects identify the same underlying object. Returns * `false` otherwise. */ template <class U> bool operator==(const ObjectPtr<U>& other) const { return get() == other.get(); } /** * Compares inequality of this object and another one of the same type. * * @param[in] other The other object to compare this one to. * @returns `true` if the two objects do not identify the same underlying object. Returns * `false` otherwise. */ template <class U> bool operator!=(const ObjectPtr<U>& other) const { return get() != other.get(); } /** * Swaps with another smart pointer. * @param other The smart pointer to swap with. */ void swap(ObjectPtr& other) { std::swap(m_object, other.m_object); } private: void _release() { if (T* old = std::exchange(m_object, nullptr)) { old->release(); } } T* m_object; }; /** * Helper function to create carb::ObjectPtr from a carb::IObject pointer by "stealing" the pointer; that is, without * increasing the reference count. * @param other The raw pointer to steal. * @returns A smart pointer referencing @p other. */ template <class T> inline ObjectPtr<T> stealObject(T* other) { return ObjectPtr<T>(other, ObjectPtr<T>::InitPolicy::eSteal); } /** * Helper function to create carb::ObjectPtr from a carb::IObject pointer by "borrowing" the pointer; that is, by * increasing the reference count. * @param other The raw pointer to reference. * @returns A smart pointer referencing @p other. */ template <class T> inline ObjectPtr<T> borrowObject(T* other) { return ObjectPtr<T>(other, ObjectPtr<T>::InitPolicy::eBorrow); } } // namespace carb
omniverse-code/kit/include/carb/SdkVersion.h
// Copyright (c) 2023-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // NOTE: This file is generated by 'make_version.lua' and should not be modified directly. // //! @file //! @brief Defines a macro containing the SDK version this header was built with. #pragma once //! Version string for this SDK build. This string is also returned by carbGetSdkVersion(). //! This value can be passed to @ref CARB_IS_SAME_SDK_VERSION() to verify that the loaded //! version of the Carbonite framework library matches the headers that are in use. #define CARB_SDK_VERSION "158.5+release158.tc9626.54324001"
omniverse-code/kit/include/carb/PluginInitializers.h
// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file //! //! @brief Utilities to ease the creation of Carbonite plugins. #pragma once #include "Defines.h" namespace carb { #ifndef DOXYGEN_BUILD namespace detail { inline bool& initialized() noexcept { static bool init = false; return init; } } // namespace detail #endif struct Framework; namespace logging { void registerLoggingForClient() noexcept; void deregisterLoggingForClient() noexcept; } // namespace logging namespace profiler { void registerProfilerForClient() noexcept; void deregisterProfilerForClient() noexcept; } // namespace profiler namespace assert { void registerAssertForClient() noexcept; void deregisterAssertForClient() noexcept; } // namespace assert namespace l10n { void registerLocalizationForClient() noexcept; void deregisterLocalizationForClient() noexcept; } // namespace l10n /** * Function called automatically at plugin startup to initialize utilities within each plugin. */ inline void pluginInitialize() { if (detail::initialized()) return; carb::detail::initialized() = true; logging::registerLoggingForClient(); profiler::registerProfilerForClient(); assert::registerAssertForClient(); l10n::registerLocalizationForClient(); } /** * Function called automatically at plugin shutdown to de-initialize utilities within each plugin. */ inline void pluginDeinitialize() { if (!detail::initialized()) return; carb::detail::initialized() = false; assert::deregisterAssertForClient(); profiler::deregisterProfilerForClient(); logging::deregisterLoggingForClient(); l10n::deregisterLocalizationForClient(); } } // namespace carb
omniverse-code/kit/include/carb/Defines.h
// Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file //! //! @brief Carbonite basic defines and helper functions. #pragma once #include <cassert> #include <cinttypes> #include <climits> #include <cstdarg> #include <cstdint> #include <cstdio> #include <cstdlib> #include <csignal> #ifndef CARB_NO_MALLOC_FREE # include <cstring> #else # include <cstddef> // for size_t #endif #include <new> #include <exception> // for std::terminate #include <type_traits> #include <mutex> /** A macro to put into `#else` branches when writing platform-specific code. */ #define CARB_UNSUPPORTED_PLATFORM() static_assert(false, "Unsupported platform!") /** A macro to put into the `#else` branches when writing CPU architecture specific code. */ #define CARB_UNSUPPORTED_ARCHITECTURE() static_assert(false, "Unsupported architecture!") #ifndef CARB_DEBUG # if defined(NDEBUG) || defined(DOXYGEN_BUILD) //! A macro indicating whether the current compilation unit is built in debug mode. Always defined as either 0 or 1. Can //! be overridden by defining before this file is included or by passing on the compiler command line. Defined as `0` //! if `NDEBUG` is defined; `1` otherwise. # define CARB_DEBUG 0 # else # define CARB_DEBUG 1 # endif #endif //! A macro that can be used to indicate classes and members that participate in visualizers, such as \a carb.natvis. //! This is a reminder that these classes, members and types will require visualizer fixup if changes are made. #define CARB_VIZ #ifdef DOXYGEN_BUILD //! A macro defined as `1` if compilation is targeting Windows; `0` otherwise. Exactly one of the `CARB_PLATFORM_*` //! macros will be set to `1`. May be overridden by defining before this file is included or by passing on the compiler //! command line. By default, set to `1` if `_WIN32` is defined. # define CARB_PLATFORM_WINDOWS 0 //! A macro defined as `1` if compilation is targeting Linux; `0` otherwise. Exactly one of the `CARB_PLATFORM_*` //! macros will be set to `1`. May be overridden by defining before this file is included or by passing on the compiler //! command line. By default, set to `1` if `_WIN32` is not defined and `__linux__` is defined. # define CARB_PLATFORM_LINUX 1 //! A macro defined as `1` if compilation is targeting Mac OS; `0` otherwise. Exactly one of the `CARB_PLATFORM_*` //! macros will be set to `1`. May be overridden by defining before this file is included or by passing on the compiler //! command line. By default, set to `1` if `_WIN32` and `__linux__` are not defined and `__APPLE__` is defined. # define CARB_PLATFORM_MACOS 0 //! The name of the current platform as a string. # define CARB_PLATFORM_NAME #elif defined(CARB_PLATFORM_WINDOWS) && defined(CARB_PLATFORM_LINUX) && defined(CARB_PLATFORM_MACOS) # if (!!CARB_PLATFORM_WINDOWS) + (!!CARB_PLATFORM_LINUX) + (!!CARB_PLATFORM_MACOS) != 1 # define CARB_PLATFORM_WINDOWS // show previous definition # define CARB_PLATFORM_LINUX // show previous definition # define CARB_PLATFORM_MACOS // show previous definition # error Exactly one of CARB_PLATFORM_WINDOWS, CARB_PLATFORM_LINUX or CARB_PLATFORM_MACOS must be non-zero. # endif #elif !defined(CARB_PLATFORM_WINDOWS) && !defined(CARB_PLATFORM_LINUX) # ifdef _WIN32 # define CARB_PLATFORM_WINDOWS 1 # define CARB_PLATFORM_LINUX 0 # define CARB_PLATFORM_MACOS 0 # define CARB_PLATFORM_NAME "windows" # elif defined(__linux__) # define CARB_PLATFORM_WINDOWS 0 # define CARB_PLATFORM_LINUX 1 # define CARB_PLATFORM_MACOS 0 # define CARB_PLATFORM_NAME "linux" # elif defined(__APPLE__) # define CARB_PLATFORM_WINDOWS 0 # define CARB_PLATFORM_LINUX 0 # define CARB_PLATFORM_MACOS 1 # define CARB_PLATFORM_NAME "macos" # else CARB_UNSUPPORTED_PLATFORM(); # endif #else # error "Must define all of CARB_PLATFORM_WINDOWS, CARB_PLATFORM_LINUX and CARB_PLATFORM_MACOS or none." #endif #if CARB_PLATFORM_LINUX || CARB_PLATFORM_MACOS || defined(DOXYGEN_BUILD) # include <unistd.h> // _POSIX_VERSION comes from unistd.h /** This is set to `_POSIX_VERSION` platforms that are mostly-compliant with POSIX. * This is set to 0 on other platforms (e.g. no GNU extensions). */ # define CARB_POSIX _POSIX_VERSION #else # define CARB_POSIX 0 #endif #ifndef DOXYGEN_SHOULD_SKIP_THIS # if CARB_PLATFORM_WINDOWS # ifndef CARB_NO_MALLOC_FREE # include "malloc.h" # endif # include <intrin.h> # elif CARB_PLATFORM_LINUX # include <alloca.h> # include <signal.h> # define _alloca alloca # endif #endif // Architecture defines #ifdef DOXYGEN_BUILD //! A macro defined as `1` if compilation is targeting the AArch64 platform; `0` otherwise. May not be overridden on the //! command line or by defining before including this file. Set to `1` if `__aarch64__` is defined, `0` if `__x86_64__` //! or `_M_X64` are defined, and left undefined otherwise. # define CARB_AARCH64 0 //! A macro defined as `1` if compilation is targeting the x86-64 platform; `0` otherwise. May not be overridden on the //! command line or by defining before including this file. Set to `0` if `__aarch64__` is defined, `1` if `__x86_64__` //! or `_M_X64` are defined, and left undefined otherwise. # define CARB_X86_64 1 //! The name of the current architecture as a string. # define CARB_ARCH_NAME #elif defined(__aarch64__) # define CARB_AARCH64 1 # define CARB_X86_64 0 #elif defined(__x86_64__) /*GCC*/ || defined(_M_X64) /*MSVC*/ # define CARB_X86_64 1 # define CARB_AARCH64 0 #endif #if CARB_PLATFORM_MACOS # define CARB_ARCH_NAME "universal" #else # if CARB_X86_64 # define CARB_ARCH_NAME "x86_64" # elif CARB_AARCH64 # define CARB_ARCH_NAME "aarch64" # endif #endif #ifndef CARB_PROFILING //! When set to a non-zero value, profiling macros in \a include/carb/profiler/Profile.h will report to the profiler; //! otherwise the profiling macros have no effect. Always set to `1` by default, but may be overridden by defining a //! different value before including this file or by specifying a different value on the compiler command line. # define CARB_PROFILING 1 #endif #ifdef DOXYGEN_BUILD //! A macro defined as `1` if compilation is targeting the Tegra platform. By default set to `1` only if `__aarch64__` //! and `__LINARO_RELEASE__` are defined; `0` otherwise. May be overridden by defining a different value before //! including this file or by specifying a different value on the compiler command line. # define CARB_TEGRA 0 #elif !defined(CARB_TEGRA) # if defined(__aarch64__) && defined(__LINARO_RELEASE__) # define CARB_TEGRA 1 # else # define CARB_TEGRA 0 # endif #endif #ifdef DOXYGEN_BUILD //! A macro defined as `1` if compilation is using Microsoft Visual C++, that is, if `_MSC_VER` is defined. May be //! overridden by defining a different value before including this file or by specifying a different value on the //! compiler command line, however, only one of `CARB_COMPILER_MSC` and `CARB_COMPILER_GNUC` must be set to `1`; the //! other macro(s) must be set to `0`. # define CARB_COMPILER_MSC 0 //! A macro defined as `1` if compilation is using GNU C Compiler (GCC), that is, if `_MSC_VER` is not defined but //! `__GNUC__` is defined. May be overridden by defining a different value before including this file or by specifying a //! different value on the compiler command line, however, only one of `CARB_COMPILER_MSC` and `CARB_COMPILER_GNUC` must //! be set to `1`; the other macro(s) must be set to `0`. # define CARB_COMPILER_GNUC 1 #elif defined(CARB_COMPILER_MSC) && defined(CARB_COMPILER_GNUC) # if (!!CARB_COMPILER_MSC) + (!!CARB_COMPILER_GNUC) != 1 # define CARB_COMPILER_MSC // Show previous definition # define CARB_COMPILER_GNUC // Show previous definition # error Exactly one of CARB_COMPILER_MSC or CARB_COMPILER_GNUC must be non-zero. # endif #elif !defined(CARB_COMPILER_MSC) && !defined(CARB_COMPILER_GNUC) # ifndef CARB_COMPILER_MSC # if defined(_MSC_VER) # define CARB_COMPILER_MSC 1 # define CARB_COMPILER_GNUC 0 # elif defined(__GNUC__) # define CARB_COMPILER_MSC 0 # define CARB_COMPILER_GNUC 1 # else # error "Unsupported compiler." # endif # endif #else # error "Must define CARB_COMPILER_MSC and CARB_COMPILER_GNUC or neither." #endif #ifdef DOXYGEN_BUILD //! A macro defined as `1` if a Clang-infrastructure toolchain is building the current file, that is, if `__clang__` is //! defined; `0` if not. May be overridden by defining a different value before including this file or by specifying a //! different value on the compiler command line. //! @note It is legal to have \ref CARB_COMPILER_MSC and \ref CARB_TOOLCHAIN_CLANG both as `1` simultaneously, which //! represents a Clang-infrastructure toolchain running in Microsoft compatibility mode. # define CARB_TOOLCHAIN_CLANG 0 #elif !defined(CARB_TOOLCHAIN_CLANG) # if defined(__clang__) # define CARB_TOOLCHAIN_CLANG 1 # else # define CARB_TOOLCHAIN_CLANG 0 # endif #endif #ifdef DOXYGEN_BUILD //! A macro defined as `1` if the toolchain is building the current file with `-fsanitize=address`, that is, if //! `__SANITIZE_ADDRESS__` is defined; `0` otherwise. May be overridden by defining a different value before including //! this file or by specifying a different value on the compiler command line. Microsoft Visual Studio supports address //! sanitizer starting with 2019 (v16.9) by specifying `/fsanitize=address` on the compiler command line. //! See https://learn.microsoft.com/en-us/cpp/sanitizers/asan?view=msvc-160 # define CARB_ASAN_ENABLED 0 #elif !defined(CARB_ASAN_ENABLED) # ifdef __SANITIZE_ADDRESS__ # define CARB_ASAN_ENABLED __SANITIZE_ADDRESS__ # else # define CARB_ASAN_ENABLED 0 # endif #endif //! De-parenthesize the contents of \c pack_. `CARB_DEPAREN((x, y))` becomes `x, y`. An unparenthesized pack will cause //! cause a compilation failure; e.g.: `CARB_DEPAREN(foo)` will not work, but `CARB_DEPAREN((foo))` will. #define CARB_DEPAREN(pack_) CARB_IDENTITY pack_ //! Return exactly the arguments. This is useful for expansion of the tokens. #define CARB_IDENTITY(...) __VA_ARGS__ // Compiler specific defines. Exist for all supported compilers but may be a no-op for certain compilers. #ifdef DOXYGEN_BUILD //! Acts as a `char[]` with the current full function signature. # define CARB_PRETTY_FUNCTION "<function signature here>" //! GCC only, defined as `__attribute__((__VA_ARGS__))`; ignored on non-GCC compilers. # define CARB_ATTRIBUTE(...) //! MSVC only, defined as `__declspec(__VA_ARGS__)`; ignored on non-MSVC compilers. # define CARB_DECLSPEC(...) //! MSVC only, defined as `__VA_ARGS__`; ignored on non-MSVC compilers. # define CARB_MSC_ONLY(...) //! Only non-MSVC compilers, defined as `__VA_ARGS__`; ignored on MSVC. # define CARB_NOT_MSC(...) //! GCC only, defined as `gnuc_only_block`; ignored on non-GCC compilers. # define CARB_GNUC_ONLY(...) //! Only non-GCC compilers, defined as `__VA_ARGS__`; ignored on GCC. # define CARB_NOT_GNUC(...) //! Generic pragma, only to be used for pragmas that are the same on all supported compilers. //! @see CARB_PRAGMA_MSC //! @see CARB_PRAGMA_GNUC # define CARB_PRAGMA(...) //! MSVC only, defined as `__pragma(__VA_ARGS__)`; ignored on non-MSVC compilers. # define CARB_PRAGMA_MSC(...) //! GCC only, defined as `_Pragma(__VA_ARGS__)`; ignored on non-GCC compilers. # define CARB_PRAGMA_GNUC(...) //! Macro to work around Exhale tripping over `constexpr` sometimes and reporting things like: //! `Invalid C++ declaration: Expected identifier in nested name, got keyword: static` # define CARB_DOC_CONSTEXPR const //! Indicates whether exceptions are enabled for the current compilation unit. Value depends on parameters passed to the //! compiler. # define CARB_EXCEPTIONS_ENABLED 1 //! Conditionally includes text only when documenting (i.e. when `DOXYGEN_BUILD` is defined). //! @param ... The text to include if documenting # define CARB_DOC_ONLY(...) __VA_ARGS__ //! Declares a value or statement in a way that prevents Doxygen and Sphinx from getting confused //! about matching symbols. There seems to be a bug in Sphinx that prevents at least templated //! symbols from being matched to the ones generated by Doxygen when keywords such as `decltype` //! are used. This is effectively the opposite operation as CARB_DOC_ONLY(). # define CARB_NO_DOC(...) #else # define CARB_DOC_CONSTEXPR constexpr # define CARB_DOC_ONLY(...) # define CARB_NO_DOC(...) __VA_ARGS__ # if CARB_COMPILER_MSC # define CARB_PRETTY_FUNCTION __FUNCSIG__ # define CARB_ATTRIBUTE(...) # define CARB_MSC_ONLY(...) __VA_ARGS__ # define CARB_NOT_MSC(...) # define CARB_GNUC_ONLY(...) # define CARB_NOT_GNUC(...) __VA_ARGS__ # define CARB_PRAGMA(...) __pragma(__VA_ARGS__) # define CARB_DECLSPEC(...) __declspec(__VA_ARGS__) # define CARB_PRAGMA_MSC(...) CARB_PRAGMA(__VA_ARGS__) # define CARB_PRAGMA_GNUC(...) # ifdef __cpp_exceptions # define CARB_EXCEPTIONS_ENABLED 1 # else # define CARB_EXCEPTIONS_ENABLED 0 # endif // Other MSC-specific definitions that must exist outside of the carb namespace extern "C" void _mm_prefetch(char const* _A, int _Sel); // From winnt.h/intrin.h # if defined(__INTELLISENSE__) && _MSC_VER < 1920 // See: https://stackoverflow.com/questions/61485127/including-windows-h-causes-unknown-attributeno-init-all-error # define no_init_all deprecated # endif # elif CARB_COMPILER_GNUC # define CARB_PRETTY_FUNCTION __PRETTY_FUNCTION__ # define CARB_ATTRIBUTE(...) __attribute__((__VA_ARGS__)) # define CARB_DECLSPEC(...) # define CARB_MSC_ONLY(...) # define CARB_NOT_MSC(...) __VA_ARGS__ # define CARB_GNUC_ONLY(...) __VA_ARGS__ # define CARB_NOT_GNUC(...) # define CARB_PRAGMA(...) _Pragma(__VA_ARGS__) # define CARB_PRAGMA_MSC(...) # define CARB_PRAGMA_GNUC(...) CARB_PRAGMA(__VA_ARGS__) # ifdef __EXCEPTIONS # define CARB_EXCEPTIONS_ENABLED 1 # else # define CARB_EXCEPTIONS_ENABLED 0 # endif # else # error Unsupported compiler # endif #endif #if defined(DOXYGEN_BUILD) || defined(OMNI_BIND) //! Turns optimizations off at the function level until a CARB_OPTIMIZE_ON_MSC() call is seen. //! This must be called outside of the body of any function and will remain in effect until //! either a CARB_OPTIMIZE_ON_MSC() call is seen or the end of the translation unit. This //! unfortunately needs to be a separate set of macros versus the one for GCC and Clang due //! to the different style of disabling and enabling optimizations under the MSC compiler. # define CARB_OPTIMIZE_OFF_MSC() //! Restores previous optimizations that were temporarily disable due to an earlier call to //! CARB_OPTIMIZE_OFF_MSC(). This must be called outside the body of any function. If this //! call is not made, the previous optimization state will remain until the end of the current //! translation unit. # define CARB_OPTIMIZE_ON_MSC() //! Disables optimizations for the function that is tagged with this attribute. This only //! affects the single function that it tags. Optimizations will be restored to the previous //! settings for the translation unit outside of the tagged function. # define CARB_NO_OPTIMIZE_GNUC_CLANG() #else # if CARB_COMPILER_MSC # define CARB_OPTIMIZE_OFF_MSC() CARB_PRAGMA_MSC(optimize("", off)) # define CARB_OPTIMIZE_ON_MSC() CARB_PRAGMA_MSC(optimize("", on)) # define CARB_NO_OPTIMIZE_GNUC_CLANG() # elif CARB_TOOLCHAIN_CLANG # define CARB_NO_OPTIMIZE_GNUC_CLANG() CARB_ATTRIBUTE(optnone) # define CARB_OPTIMIZE_OFF_MSC() # define CARB_OPTIMIZE_ON_MSC() # elif CARB_COMPILER_GNUC # define CARB_NO_OPTIMIZE_GNUC_CLANG() CARB_ATTRIBUTE(optimize("-O0")) # define CARB_OPTIMIZE_OFF_MSC() # define CARB_OPTIMIZE_ON_MSC() # else # error Unsupported compiler # endif #endif // MSC-specific warning macros are defined only for MSC // CARB_IGNOREWARNING_MSC_PUSH: MSVC only; pushes the warning state // CARB_IGNOREWARNING_MSC_POP: MSVC only; pops the warning state // CARB_IGNOREWARNING_MSC(w): MSVC only; disables the given warning number (ex: CARB_IGNOREWARNING_MSC(4505)) // CARB_IGNOREWARNING_MSC_WITH_PUSH(w): MSVC only; combines CARB_IGNOREWARNING_MSC_PUSH and CARB_IGNOREWARNING_MSC() #if !defined(DOXYGEN_BUILD) && CARB_COMPILER_MSC # define CARB_IGNOREWARNING_MSC_PUSH __pragma(warning(push)) # define CARB_IGNOREWARNING_MSC_POP __pragma(warning(pop)) # define CARB_IGNOREWARNING_MSC(w) __pragma(warning(disable : w)) # define CARB_IGNOREWARNING_MSC_WITH_PUSH(w) \ CARB_IGNOREWARNING_MSC_PUSH \ CARB_IGNOREWARNING_MSC(w) #else //! For MSVC only, pushes the current compilation warning configuration. Defined as `__pragma(warning(push))` for MSVC //! only; ignored by other compilers. # define CARB_IGNOREWARNING_MSC_PUSH //! For MSVC only, pops the compilation warning configuration previously pushed with \ref CARB_IGNOREWARNING_MSC_PUSH, //! overwriting the current state. Defined as `__pragma(warning(pop))` for MSVC only; ignored by other compilers. # define CARB_IGNOREWARNING_MSC_POP //! For MSVC only, disables a specific compiler warning for the current compilation warning configuration. Defined as //! `__pragma(warning(disable : <w>))` for MSVC only; ignored by other compilers. //! @param w The warning number to disable. # define CARB_IGNOREWARNING_MSC(w) //! Syntactic sugar for \ref CARB_IGNOREWARNING_MSC_PUSH followed by \ref CARB_IGNOREWARNING_MSC. //! @param w The warning number to disable. # define CARB_IGNOREWARNING_MSC_WITH_PUSH(w) #endif // GNUC-specific helper macros are defined for GCC and Clang-infrastructure // CARB_IGNOREWARNING_GNUC_PUSH: GCC only; pushes the warning state // CARB_IGNOREWARNING_GNUC_POP: GCC only; pops the warning state // CARB_IGNOREWARNING_CLANG_PUSH: Clang only; pushes the warning state // CARB_IGNOREWARNING_CLANG_POP: Clang only; pops the warning state // CARB_IGNOREWARNING_GNUC(w): GCC only; disables the given warning (ex: CARB_IGNOREWARNING_GNUC("-Wattributes")) // CARB_IGNOREWARNING_GNUC_WITH_PUSH(w): GCC only; combines CARB_IGNOREWARNING_GNUC_PUSH and CARB_IGNOREWARNING_GNUC() // CARB_IGNOREWARNING_CLANG(w): Clang only; disables the given warning (ex: CARB_IGNOREWARNING_CLANG("-Wattributes")) // CARB_IGNOREWARNING_CLANG_WITH_PUSH(w): Clang only; combines CARB_IGNOREWARNING_CLANG_PUSH and // CARB_IGNOREWARNING_CLANG() #if !defined(DOXYGEN_BUILD) && (CARB_COMPILER_GNUC || CARB_TOOLCHAIN_CLANG) # define CARB_IGNOREWARNING_GNUC_PUSH _Pragma("GCC diagnostic push") # define CARB_IGNOREWARNING_GNUC_POP _Pragma("GCC diagnostic pop") # define INTERNAL_CARB_IGNOREWARNING_GNUC(str) _Pragma(# str) # define CARB_IGNOREWARNING_GNUC(w) INTERNAL_CARB_IGNOREWARNING_GNUC(GCC diagnostic ignored w) # define CARB_IGNOREWARNING_GNUC_WITH_PUSH(w) CARB_IGNOREWARNING_GNUC_PUSH CARB_IGNOREWARNING_GNUC(w) # if CARB_TOOLCHAIN_CLANG # define CARB_IGNOREWARNING_CLANG_PUSH _Pragma("GCC diagnostic push") # define CARB_IGNOREWARNING_CLANG_POP _Pragma("GCC diagnostic pop") # define INTERNAL_CARB_IGNOREWARNING_CLANG(str) _Pragma(# str) # define CARB_IGNOREWARNING_CLANG(w) INTERNAL_CARB_IGNOREWARNING_CLANG(GCC diagnostic ignored w) # define CARB_IGNOREWARNING_CLANG_WITH_PUSH(w) CARB_IGNOREWARNING_CLANG_PUSH CARB_IGNOREWARNING_CLANG(w) # else # define CARB_IGNOREWARNING_CLANG_PUSH # define CARB_IGNOREWARNING_CLANG_POP # define CARB_IGNOREWARNING_CLANG(w) # define CARB_IGNOREWARNING_CLANG_WITH_PUSH(w) # endif #else //! For GCC only, pushes the current compilation warning configuration. Defined as `_Pragma("GCC diagnostic push")` for //! GCC only; ignored by other compilers. # define CARB_IGNOREWARNING_GNUC_PUSH //! For GCC only, pops the compilation warning configuration previously pushed with \ref CARB_IGNOREWARNING_GNUC_PUSH, //! overwriting the current state. Defined as `_Pragma("GCC diagnostic pop")` for GCC only; ignored by other compilers. # define CARB_IGNOREWARNING_GNUC_POP //! For Clang only, pushes the current compilation warning configuration. Defined as `_Pragma("GCC diagnostic push")` //! for Clang only; ignored by other compilers. # define CARB_IGNOREWARNING_CLANG_PUSH //! For Clang only, pops the compilation warning configuration previously pushed with \ref //! CARB_IGNOREWARNING_CLANG_PUSH, overwriting the current state. Defined as `_Pragma("GCC diagnostic pop")` for Clang //! only; ignored by other compilers. # define CARB_IGNOREWARNING_CLANG_POP //! For GCC only, disables a specific compiler warning for the current compilation warning configuration. Defined as //! `_Pragma("GCC diagnostic ignored <warning>")` for GCC only; ignored by other compilers. //! @param w The warning to disable, example: `"-Wattributes"` (note that quotes must be specified) # define CARB_IGNOREWARNING_GNUC(w) //! Syntactic sugar for \ref CARB_IGNOREWARNING_GNUC_PUSH followed by \ref CARB_IGNOREWARNING_GNUC. //! @param w The warning to disable, example: `"-Wattributes"` (note that quotes must be specified) # define CARB_IGNOREWARNING_GNUC_WITH_PUSH(w) //! For Clang only, disables a specific compiler warning for the current compilation warning configuration. Defined as //! `_Pragma("GCC diagnostic ignored <warning>")` for Clang only; ignored by other compilers. //! @param w The warning to disable, example: `"-Wattributes"` (note that quotes must be specified) # define CARB_IGNOREWARNING_CLANG(w) //! Syntactic sugar for \ref CARB_IGNOREWARNING_CLANG_PUSH followed by \ref CARB_IGNOREWARNING_CLANG. //! @param w The warning to disable, example: `"-Wattributes"` (note that quotes must be specified) # define CARB_IGNOREWARNING_CLANG_WITH_PUSH(w) #endif #if defined(__cplusplus) || defined(DOXYGEN_BUILD) //! Defined as `extern "C"` for C++ compilation, that is, when `__cplusplus` is defined; empty define otherwise. # define CARB_EXTERN_C extern "C" #else # define CARB_EXTERN_C #endif //! Grants a function external linkage in a dynamic library or executable. //! //! On MSVC, `extern "C" __declspec(dllexport)`. On GCC/Clang: `extern "C" __attribute__((visibility("default")))`. //! //! This macro is always defined as such. If conditional import/export is desired, use \ref CARB_DYNAMICLINK. #define CARB_EXPORT CARB_EXTERN_C CARB_DECLSPEC(dllexport) CARB_ATTRIBUTE(visibility("default")) //! Imports a function with external linkage from a shared object or DLL. //! //! On all compilers: `extern "C"` //! //! \note on Windows platforms we do not use `__declspec(dllimport)` as it is <a //! href="https://learn.microsoft.com/en-us/cpp/build/importing-into-an-application-using-declspec-dllimport?view=msvc-160">optional</a> //! and can lead to linker warning <a //! href="https://learn.microsoft.com/en-us/cpp/error-messages/tool-errors/linker-tools-warning-lnk4217?view=msvc-160">LNK4217</a>. #define CARB_IMPORT CARB_EXTERN_C // For documentation only #ifdef DOXYGEN_BUILD //! Instructs CARB_DYNAMICLINK to export instead of import //! //! \warning This symbol is not defined anywhere; it is up to the user of \ref CARB_DYNAMICLINK to define this in the //! compilation unit that exports the symbols. **This must be defined before carb/Defines.h is included.** //! //! \see CARB_DYNAMICLINK # define CARB_EXPORTS #endif #if defined(CARB_EXPORTS) || defined(DOXYGEN_BUILD) //! Conditional (import/export) dynamic linking. //! //! If and only if \ref CARB_EXPORTS is defined before including this file, this will match \ref CARB_EXPORT and //! function as granting a function external linkage. If `CARB_EXPORTS` is not defined, this functions as merely //! declaring the function as `extern "C"` so that it can be imported. # define CARB_DYNAMICLINK CARB_EXPORT #else # define CARB_DYNAMICLINK CARB_IMPORT #endif #if CARB_PLATFORM_WINDOWS || defined(DOXYGEN_BUILD) //! Defined as `__cdecl` on Windows and an empty define on Linux. Used to explicitly state ABI calling convention for //! API functions. # define CARB_ABI __cdecl #else # define CARB_ABI #endif #if (defined(__cplusplus) && __cplusplus >= 201400L) || defined(DOXYGEN_BUILD) //! Defined as `1` if the current compiler supports C++14; `0` otherwise. C++14 is the minimum required for using //! Carbonite (though building Carbonite requires C++17). # define CARB_HAS_CPP14 1 #else # define CARB_HAS_CPP14 0 #endif #if (defined(__cplusplus) && __cplusplus >= 201700L) || defined(DOXYGEN_BUILD) //! Defined as `1` if the current compiler supports C++17; `0` otherwise. # define CARB_HAS_CPP17 1 #else # define CARB_HAS_CPP17 0 #endif #if (defined(__cplusplus) && __cplusplus >= 202000L) || defined(DOXYGEN_BUILD) //! Defined as `1` if the current compiler supports C++20; `0` otherwise. # define CARB_HAS_CPP20 1 #else # define CARB_HAS_CPP20 0 #endif // [[nodiscard]] #if CARB_HAS_CPP17 || defined(DOXYGEN_BUILD) //! Defined as `[[nodiscard]]` if the current compiler supports C++17. This reverts to \c warn_unused_result attribute //! where it is available and will be empty if it is not. # define CARB_NODISCARD [[nodiscard]] //! Defined as `[[nodiscard]]` if the current compiler supports C++17 and is empty otherwise. This operates similar to //! \c CARB_NODISCARD but is meant to be used on type definitions, as the \c warn_unused_result fallback is not //! supported for types. # define CARB_NODISCARD_TYPE [[nodiscard]] #elif CARB_COMPILER_GNUC # define CARB_NODISCARD __attribute__((warn_unused_result)) # define CARB_NODISCARD_TYPE #else // not supported # define CARB_NODISCARD # define CARB_NODISCARD_TYPE #endif // [[nodiscard(msg)]] #if CARB_HAS_CPP20 || defined(DOXYGEN_BUILD) //! Defined as `[[nodiscard(msg)]]` if the current compiler supports C++20; falls back to \c CARB_NODISCARD without the //! message pre-C++20. # define CARB_NODISCARD_MSG(msg) [[nodiscard(msg)]] //! Defined as `[[nodiscard(msg)]]` if the current compiler supports C++20; falls back to \c CARB_NODISCARD_TYPE without //! the message pre-C++20. # define CARB_NODISCARD_TYPE_MSG(msg) [[nodiscard(msg)]] #else # define CARB_NODISCARD_MSG(msg) CARB_NODISCARD # define CARB_NODISCARD_TYPE_MSG(msg) CARB_NODISCARD_TYPE #endif // [[fallthrough]] #if CARB_HAS_CPP17 || defined(DOXYGEN_BUILD) //! Defined as `[[fallthrough]]` if the current compiler supports C++17; empty otherwise. # define CARB_FALLTHROUGH [[fallthrough]] #elif CARB_COMPILER_GNUC # if __GNUC__ >= 7 # define CARB_FALLTHROUGH __attribute__((fallthrough)) # else // Marker comment # define CARB_FALLTHROUGH /* fall through */ # endif #else // not supported # define CARB_FALLTHROUGH #endif // [[maybe_unused]] #if CARB_HAS_CPP17 && !defined(DOXYGEN_BUILD) # define CARB_MAYBE_UNUSED [[maybe_unused]] # define CARB_CPP17_CONSTEXPR constexpr #elif CARB_COMPILER_GNUC && !defined(DOXYGEN_BUILD) # define CARB_MAYBE_UNUSED __attribute__((unused)) # define CARB_CPP17_CONSTEXPR #else // not supported //! Defined as `[[maybe_unused]]` if the current compiler supports C++17; empty otherwise. # define CARB_MAYBE_UNUSED //! Defined as `constexpr` if the current compiler supports C++17; empty otherwise. # define CARB_CPP17_CONSTEXPR #endif // [[likely]] / [[unlikely]] #if CARB_HAS_CPP20 || defined(DOXYGEN_BUILD) //! Defined as `([[likely]] !!(<expr>))` if the current compiler supports C++20. If the current compiler is GCC, as a //! fallback, `__builtin_expect(!!(<expr>), 1)` will be used. Otherwise, defined as `(!!(<expr>))` //! @param expr The expression to evaluate, optimized with a `true` outcome likely and expected. //! @returns The boolean result of \p expr. # define CARB_LIKELY(expr) ([[likely]] !!(expr)) //! Defined as `([[unlikely]] !!(<expr>))` if the current compiler supports C++20. If the current compiler is GCC, as a //! fallback, `__builtin_expect(!!(<expr>), 0)` will be used. Otherwise, defined as `(!!(<expr>))` //! @param expr The expression to evaluate, optimized with a `false` outcome likely and expected. //! @returns The boolean result of \p expr. # define CARB_UNLIKELY(expr) ([[unlikely]] !!(expr)) #elif CARB_COMPILER_GNUC # define CARB_LIKELY(expr) __builtin_expect(!!(expr), 1) # define CARB_UNLIKELY(expr) __builtin_expect(!!(expr), 0) #else // not supported # define CARB_LIKELY(expr) (!!(expr)) # define CARB_UNLIKELY(expr) (!!(expr)) #endif // [[no_unique_address]] #if CARB_HAS_CPP20 || defined(DOXYGEN_BUILD) //! Defined as `[[no_unique_address]]` if the current compiler supports C++20; empty otherwise. # define CARB_NO_UNIQUE_ADDRESS [[no_unique_address]] #else // not supported # define CARB_NO_UNIQUE_ADDRESS #endif //! Syntactic sugar for `CARB_ATTRIBUTE(visibility("hidden"))`; ignored on compilers other than GCC. #define CARB_HIDDEN CARB_ATTRIBUTE(visibility("hidden")) //! Syntactic sugar for `CARB_DECLSPEC(selectany) CARB_ATTRIBUTE(weak)`, used to enable weak linking. #define CARB_WEAKLINK CARB_DECLSPEC(selectany) CARB_ATTRIBUTE(weak) // constexpr in CPP20, but not before #if CARB_HAS_CPP20 || defined(DOXYGEN_BUILD) //! Defined as `constexpr` if the current compiler supports C++20; empty otherwise. # define CARB_CPP20_CONSTEXPR constexpr #else # define CARB_CPP20_CONSTEXPR #endif // include the IAssert interface here. Note that this cannot be included any earlier because // it requires symbols such as "CARB_ABI". Also note that it cannot be put into the CARB_DEBUG // section below because the mirroring tool picks it up and generates type information for it. // If it is not unconditionally included here, that leads to build errors in release builds. #include "assert/IAssert.h" #ifdef DOXYGEN_BUILD //! On Windows platforms, defined as `__debugbreak()`; on Linux, `raise(SIGTRAP)`. Used to break into the debugger. # define CARB_BREAK_POINT() #elif CARB_POSIX # define CARB_BREAK_POINT() ::raise(SIGTRAP) #elif CARB_PLATFORM_WINDOWS # define CARB_BREAK_POINT() ::__debugbreak() #else CARB_UNSUPPORTED_PLATFORM(); #endif namespace carb { #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { // clang-format off #define C(a) (unsigned char)(0x##a) constexpr unsigned char lowerTable[256] = { C(00), C(01), C(02), C(03), C(04), C(05), C(06), C(07), C(08), C(09), C(0A), C(0B), C(0C), C(0D), C(0E), C(0F), C(10), C(11), C(12), C(13), C(14), C(15), C(16), C(17), C(18), C(19), C(1A), C(1B), C(1C), C(1D), C(1E), C(1F), C(20), C(21), C(22), C(23), C(24), C(25), C(26), C(27), C(28), C(29), C(2A), C(2B), C(2C), C(2D), C(2E), C(2F), C(30), C(31), C(32), C(33), C(34), C(35), C(36), C(37), C(38), C(39), C(3A), C(3B), C(3C), C(3D), C(3E), C(3F), C(40), // [0x41, 0x5A] -> [0x61, 0x7A] C(61), C(62), C(63), C(64), C(65), C(66), C(67), C(68), C(69), C(6A), C(6B), C(6C), C(6D), C(6E), C(6F), C(70), C(71), C(72), C(73), C(74), C(75), C(76), C(77), C(78), C(79), C(7A), C(5B), C(5C), C(5D), C(5E), C(5F), C(60), C(61), C(62), C(63), C(64), C(65), C(66), C(67), C(68), C(69), C(6A), C(6B), C(6C), C(6D), C(6E), C(6F), C(70), C(71), C(72), C(73), C(74), C(75), C(76), C(77), C(78), C(79), C(7A), C(7B), C(7C), C(7D), C(7E), C(7F), C(80), C(81), C(82), C(83), C(84), C(85), C(86), C(87), C(88), C(89), C(8A), C(8B), C(8C), C(8D), C(8E), C(8F), C(90), C(91), C(92), C(93), C(94), C(95), C(96), C(97), C(98), C(99), C(9A), C(9B), C(9C), C(9D), C(9E), C(9F), C(A0), C(A1), C(A2), C(A3), C(A4), C(A5), C(A6), C(A7), C(A8), C(A9), C(AA), C(AB), C(AC), C(AD), C(AE), C(AF), C(B0), C(B1), C(B2), C(B3), C(B4), C(B5), C(B6), C(B7), C(B8), C(B9), C(BA), C(BB), C(BC), C(BD), C(BE), C(BF), C(C0), C(C1), C(C2), C(C3), C(C4), C(C5), C(C6), C(C7), C(C8), C(C9), C(CA), C(CB), C(CC), C(CD), C(CE), C(CF), C(D0), C(D1), C(D2), C(D3), C(D4), C(D5), C(D6), C(D7), C(D8), C(D9), C(DA), C(DB), C(DC), C(DD), C(DE), C(DF), C(E0), C(E1), C(E2), C(E3), C(E4), C(E5), C(E6), C(E7), C(E8), C(E9), C(EA), C(EB), C(EC), C(ED), C(EE), C(EF), C(F0), C(F1), C(F2), C(F3), C(F4), C(F5), C(F6), C(F7), C(F8), C(F9), C(FA), C(FB), C(FC), C(FD), C(FE), C(FF), }; constexpr unsigned char upperTable[256] = { C(00), C(01), C(02), C(03), C(04), C(05), C(06), C(07), C(08), C(09), C(0A), C(0B), C(0C), C(0D), C(0E), C(0F), C(10), C(11), C(12), C(13), C(14), C(15), C(16), C(17), C(18), C(19), C(1A), C(1B), C(1C), C(1D), C(1E), C(1F), C(20), C(21), C(22), C(23), C(24), C(25), C(26), C(27), C(28), C(29), C(2A), C(2B), C(2C), C(2D), C(2E), C(2F), C(30), C(31), C(32), C(33), C(34), C(35), C(36), C(37), C(38), C(39), C(3A), C(3B), C(3C), C(3D), C(3E), C(3F), C(40), C(41), C(42), C(43), C(44), C(45), C(46), C(47), C(48), C(49), C(4A), C(4B), C(4C), C(4D), C(4E), C(4F), C(50), C(51), C(52), C(53), C(54), C(55), C(56), C(57), C(58), C(59), C(5A), C(5B), C(5C), C(5D), C(5E), C(5F), C(60), // [0x61, 0x7A] -> [0x41, 0x5A] C(41), C(42), C(43), C(44), C(45), C(46), C(47), C(48), C(49), C(4A), C(4B), C(4C), C(4D), C(4E), C(4F), C(50), C(51), C(52), C(53), C(54), C(55), C(56), C(57), C(58), C(59), C(5A), C(7B), C(7C), C(7D), C(7E), C(7F), C(80), C(81), C(82), C(83), C(84), C(85), C(86), C(87), C(88), C(89), C(8A), C(8B), C(8C), C(8D), C(8E), C(8F), C(90), C(91), C(92), C(93), C(94), C(95), C(96), C(97), C(98), C(99), C(9A), C(9B), C(9C), C(9D), C(9E), C(9F), C(A0), C(A1), C(A2), C(A3), C(A4), C(A5), C(A6), C(A7), C(A8), C(A9), C(AA), C(AB), C(AC), C(AD), C(AE), C(AF), C(B0), C(B1), C(B2), C(B3), C(B4), C(B5), C(B6), C(B7), C(B8), C(B9), C(BA), C(BB), C(BC), C(BD), C(BE), C(BF), C(C0), C(C1), C(C2), C(C3), C(C4), C(C5), C(C6), C(C7), C(C8), C(C9), C(CA), C(CB), C(CC), C(CD), C(CE), C(CF), C(D0), C(D1), C(D2), C(D3), C(D4), C(D5), C(D6), C(D7), C(D8), C(D9), C(DA), C(DB), C(DC), C(DD), C(DE), C(DF), C(E0), C(E1), C(E2), C(E3), C(E4), C(E5), C(E6), C(E7), C(E8), C(E9), C(EA), C(EB), C(EC), C(ED), C(EE), C(EF), C(F0), C(F1), C(F2), C(F3), C(F4), C(F5), C(F6), C(F7), C(F8), C(F9), C(FA), C(FB), C(FC), C(FD), C(FE), C(FF), }; #undef C // clang-format on } // namespace detail #endif /** * Assertion handler helper function. Do not call directly. Used by CARB_CHECK and CARB_ASSERT if the * `IAssert` interface is not available (i.e. the Framework is not instantiated). This function prints an "Assertion * failed" message to `stderr` by default. * * @param condition The condition from an assert in progress. * @param file The source file location from an assert in progress. * @param func The source file function name from an assert in progress. * @param line The source file line from an assert in progress. * @param fmt A `printf`-style format specifier string for the assert in progress. * @param ... Arguments corresponding to format specifiers in \p fmt. * @returns \c true if the software breakpoint should be triggered; \c false if a software breakpoint should be skipped. */ inline bool assertHandlerFallback( const char* condition, const char* file, const char* func, int32_t line, const char* fmt = nullptr, ...) { static std::mutex m; std::lock_guard<std::mutex> g(m); if (fmt != nullptr) { fprintf(stderr, "%s:%s():%" PRId32 ": Assertion (%s) failed: ", file, func, line, condition); va_list args; va_start(args, fmt); vfprintf(stderr, fmt, args); va_end(args); fputc('\n', stderr); } else fprintf(stderr, "%s:%" PRId32 ":%s(): Assertion (%s) failed.\n", file, line, func, condition); return true; } } // namespace carb #ifdef DOXYGEN_BUILD //! Indicates whether asserts are enabled. May be overridden by defining this before including this file. By default, is //! set to `1` if `CARB_DEBUG` is non-zero. If this is overridden to a non-zero value and `CARB_ASSERT` is not defined, //! `CARB_ASSERT` will receive the default implementation. # define CARB_ASSERT_ENABLED 0 //! Indicates whether runtime checking is enabled. May be overridden by defining this before including this file. By //! default, is set to `1` always. If this is overridden to a non-zero value and `CARB_CHECK` is not defined, //! `CARB_CHECK` will receive the default implementation. # define CARB_CHECK_ENABLED 0 //! Optionally performs an assertion, by default for debug builds only. //! @warning The \p cond should have no side effects! Asserts can be disabled which will cause \p cond to not be //! evaluated. //! @note The \ref CARB_ASSERT_ENABLED define can be used to determine if asserts are enabled, or to cause them to be //! enabled or disabled by defining it before including this file. //! //! The implementation can be overridden on the command line, or by defining to a different implementation before //! including this file. //! //! When \p cond produces a `false` result, the failure is reported to the `g_carbAssert` assertion handler, or if that //! global variable is `nullptr`, calls \ref carb::assertHandlerFallback(). Depending on the result from that function //! call, execution is allowed to continue, or `CARB_BREAK_POINT()` is invoked to notify the debugger. //! @param cond A condition that is evaluated for a boolean result. If the condition produces \c false, the assert //! handler is notified. //! @param ... An optional printf-style format string and variadic parameters. # define CARB_ASSERT(cond, ...) ((void)0) //! Optionally performs a runtime check assertion, by default for both debug and release builds. //! @warning The \p cond should have no side effects! Asserts can be disabled which will cause \p cond to not be //! evaluated. //! @note The \ref CARB_CHECK_ENABLED define can be used to determine if runtime check asserts are enabled, or to cause //! them to be enabled or disabled by defining it before including this file. //! //! The implementation can be overridden on the command line, or by defining to a different implementation before //! including this file. //! //! When \p cond produces a `false` result, the failure is reported to the `g_carbAssert` assertion handler, or if that //! global variable is `nullptr`, calls \ref carb::assertHandlerFallback(). Depending on the result from that function //! call, execution is allowed to continue, or `CARB_BREAK_POINT()` is invoked to notify the debugger. //! @param cond A condition that is evaluated for a boolean result. If the condition produces \c false, the assert //! handler is notified. //! @param ... An optional printf-style format string and variadic parameters. # define CARB_CHECK(cond, ...) ((void)0) //! Terminates the application if a check fails. //! //! The implementation can be overridden on the command line, or by defining to a different implementation before //! including this file. //! //! @warning The application is malformed and undefined behavior occurs if an overriding implementation of //! `CARB_FATAL_UNLESS` allows continuing when \p cond returns false. //! @param cond A condition that is evaluated for a boolean result. If the condition produces \c false, the assert //! handler is notified. If the assert handler returns, `std::terminate()` is called. //! @param fmt An explanation of the failure is required. This is a printf-style format string. //! @param ... printf-style variadic parameters # define CARB_FATAL_UNLESS(cond, fmt, ...) (!(cond) ? (std::terminate(), false) : true) #else /* main assertion test entry point. This is implemented as a single conditional statement to * ensure that the assertion failure breakpoint occurs on the same line of code as the assertion * test itself. CARB_CHECK() exists in release and debug, and CARB_ASSERT() is debug-only. */ // example-begin CARB_IMPL_ASSERT # define CARB_IMPL_ASSERT(cond, ...) \ (CARB_LIKELY(cond) || \ ![&](const char* funcname__, ...) CARB_NOINLINE { \ return g_carbAssert ? \ g_carbAssert->reportFailedAssertion(#cond, __FILE__, funcname__, __LINE__, ##__VA_ARGS__) : \ ::carb::assertHandlerFallback(#cond, __FILE__, funcname__, __LINE__, ##__VA_ARGS__); \ }(CARB_PRETTY_FUNCTION) || \ (CARB_BREAK_POINT(), false)) // example-end CARB_IMPL_ASSERT # ifndef CARB_CHECK # ifndef CARB_CHECK_ENABLED # define CARB_CHECK_ENABLED 1 # endif # if CARB_CHECK_ENABLED # define CARB_CHECK(cond, ...) CARB_IMPL_ASSERT(cond, ##__VA_ARGS__) # else # define CARB_CHECK(cond, ...) ((void)0) # endif # else // CARB_CHECK was already defined # ifndef CARB_CHECK_ENABLED # define CARB_CHECK /* cause an error showing where it was already defined */ # error CARB_CHECK_ENABLED must also be defined if CARB_CHECK is pre-defined! # endif # endif # ifndef CARB_FATAL_UNLESS // example-begin CARB_FATAL_UNLESS # define CARB_FATAL_UNLESS(cond, fmt, ...) \ (CARB_LIKELY(cond) || \ ([&](const char* funcname__, ...) CARB_NOINLINE { \ if (false) \ ::printf(fmt, ##__VA_ARGS__); \ g_carbAssert ? g_carbAssert->reportFailedAssertion(#cond, __FILE__, funcname__, __LINE__, fmt, ##__VA_ARGS__) : \ ::carb::assertHandlerFallback(#cond, __FILE__, funcname__, __LINE__, fmt, ##__VA_ARGS__); \ }(CARB_PRETTY_FUNCTION), std::terminate(), false)) // example-end CARB_FATAL_UNLESS # endif # ifndef CARB_ASSERT # ifndef CARB_ASSERT_ENABLED # if CARB_DEBUG # define CARB_ASSERT_ENABLED 1 # else # define CARB_ASSERT_ENABLED 0 # endif # endif # if CARB_ASSERT_ENABLED # define CARB_ASSERT(cond, ...) CARB_IMPL_ASSERT(cond, ##__VA_ARGS__) # else # define CARB_ASSERT(cond, ...) ((void)0) # endif # else // CARB_ASSERT was already defined # ifndef CARB_ASSERT_ENABLED # define CARB_ASSERT /* cause an error showing where it was already defined */ # error CARB_ASSERT_ENABLED must also be defined if CARB_ASSERT is pre-defined! # endif # endif #endif //! A helper to determine if the size and alignment of two given structures match, causing a static assert if unmatched. //! @param A One type to compare. //! @param B Another type to compare. #define CARB_ASSERT_STRUCTS_MATCH(A, B) \ static_assert( \ sizeof(A) == sizeof(B) && alignof(A) == alignof(B), "Size or alignment mismatch between " #A " and " #B ".") //! A helper to determine if member `A.a` matches the offset and size of `B.b`, causing a static assert if unmatched. //! @param A The struct containing public member \p a. //! @param a A public member of \p A. //! @param B The struct containing public member \p b. //! @param b A public member of \p B. #define CARB_ASSERT_MEMBERS_MATCH(A, a, B, b) \ static_assert(offsetof(A, a) == offsetof(B, b) && sizeof(A::a) == sizeof(B::b), \ "Offset or size mismatch between members " #a " of " #A " and " #b " of " #B ".") //! The maximum value that can be represented by `uint16_t`. #define CARB_UINT16_MAX UINT16_MAX //! The maximum value that can be represented by `uint32_t`. #define CARB_UINT32_MAX UINT32_MAX //! The maximum value that can be represented by `uint64_t`. #define CARB_UINT64_MAX UINT64_MAX //! The maximum value that can be represented by `unsigned long long`. #define CARB_ULLONG_MAX ULLONG_MAX //! The maximum value that can be represented by `unsigned short`. #define CARB_USHRT_MAX USHRT_MAX //! The maximum value that can be represented by `float`. #define CARB_FLOAT_MAX 3.402823466e+38F //! A macro that returns the least of two values. //! @warning This macro will evaluate parameters more than once! Consider using carb_min() or `std::min`. //! @param a The first value. //! @param b The second value. //! @returns The least of \p a or \p b. If the values are equal \p b will be returned. #define CARB_MIN(a, b) (((a) < (b)) ? (a) : (b)) //! A macro the returns the largest of two values. //! @warning This macro will evaluate parameters more than once! Consider using carb_max() or `std::max`. //! @param a The first value. //! @param b The second value. //! @returns The largest of \p a or \p b. If the values are equal \p b will be returned. #define CARB_MAX(a, b) (((a) > (b)) ? (a) : (b)) //! A macro the returns the largest of two values. //! @warning This macro will evaluate parameters more than once! Consider using `std::clamp` or an inline function //! instead. //! @param x The value to clamp. //! @param lo The lowest acceptable value. This will be returned if `x < lo`. //! @param hi The highest acceptable value. This will be returned if `x > hi`. //! @return \p lo if \p x is less than \p lo; \p hi if \p x is greater than \p hi; \p x otherwise. #define CARB_CLAMP(x, lo, hi) (((x) < (lo)) ? (lo) : (((x) > (hi)) ? (hi) : (x))) //! Rounds a given value to the next highest multiple of another given value. //! @warning This macro will evaluate the \p to parameter more than once! Consider using an inline function instead. //! @param value The value to round. //! @param to The multiple to round to. //! @returns \p value rounded up to the next multiple of \p to. #define CARB_ROUNDUP(value, to) ((((value) + (to)-1) / (to)) * (to)) #ifndef DOXYGEN_SHOULD_SKIP_THIS // CARB_JOIN will join together `a` and `b` and also work properly if either parameter is another macro like __LINE__. // This requires two macros since the preprocessor will only recurse macro expansion if # and ## are not present. # define __CARB_JOIN(a, b) a##b #endif //! A macro that joins two parts to create one symbol allowing one or more parameters to be a macro, as if by the `##` //! preprocessor operator. //! Example: `CARB_JOIN(test, __LINE__)` on line 579 produces `test579`. //! @param a The first name to join. //! @param b The second name to join. #define CARB_JOIN(a, b) __CARB_JOIN(a, b) //! A macro that deletes the copy-construct and copy-assign functions for the given classname. //! @param classname The class to delete copy functions for. #define CARB_PREVENT_COPY(classname) \ classname(const classname&) = delete; /**< @private */ \ classname& operator=(const classname&) = delete /**< @private */ //! A macro that deletes the move-construct and move-assign functions for the given classname. //! @param classname The class to delete move functions for. #define CARB_PREVENT_MOVE(classname) \ classname(classname&&) = delete; /**< @private */ \ classname& operator=(classname&&) = delete /**< @private */ //! Syntactic sugar for both \ref CARB_PREVENT_COPY and \ref CARB_PREVENT_MOVE. //! @param classname The class to delete copy and move functions for. #define CARB_PREVENT_COPY_AND_MOVE(classname) \ CARB_PREVENT_COPY(classname); \ CARB_PREVENT_MOVE(classname) #if defined(__COUNTER__) || defined(DOXYGEN_BUILD) //! A helper macro that appends a number to the given name to create a unique name. //! @param str The name to decorate. # define CARB_ANONYMOUS_VAR(str) CARB_JOIN(str, __COUNTER__) #else # define CARB_ANONYMOUS_VAR(str) CARB_JOIN(str, __LINE__) #endif namespace carb { #ifndef DOXYGEN_SHOULD_SKIP_THIS template <typename T, size_t N> constexpr size_t countOf(T const (&)[N]) { return N; } #endif //! Returns the count of an array as a `size_t` at compile time. //! @param a The array to count. //! @returns The number of elements in \p a. #define CARB_COUNTOF(a) carb::countOf(a) #ifndef DOXYGEN_SHOULD_SKIP_THIS template <typename T, uint32_t N> constexpr uint32_t countOf32(T const (&)[N]) { return N; } #endif //! Returns the count of an array as a `uint32_t` at compile time. //! @param a The array to count. //! @returns The number of elements in \p a. #define CARB_COUNTOF32(a) carb::countOf32(a) #ifndef DOXYGEN_SHOULD_SKIP_THIS template <typename T, typename U> constexpr uint32_t offsetOf(U T::*member) { CARB_IGNOREWARNING_GNUC_PUSH # if CARB_TOOLCHAIN_CLANG && __clang_major__ >= 13 // this error is issued on clang 13 CARB_IGNOREWARNING_GNUC_WITH_PUSH("-Wnull-pointer-subtraction") # endif return (uint32_t)((char*)&((T*)nullptr->*member) - (char*)nullptr); CARB_IGNOREWARNING_GNUC_POP } #endif //! Returns the offset of a member of a class at compile time. //! @param a The member of a class. The member must have visibility to the call of `CARB_OFFSETOF`. The class is //! inferred. //! @returns The offset of \p a from its containing class, in bytes, as a `uint32_t`. #define CARB_OFFSETOF(a) carb::offsetOf(&a) #if CARB_COMPILER_MSC || defined(DOXYGEN_BUILD) //! Returns the required alignment of a type. //! @param T The type to determine alignment of. //! @returns The required alignment of \p T, in bytes. # define CARB_ALIGN_OF(T) __alignof(T) #elif CARB_COMPILER_GNUC # define CARB_ALIGN_OF(T) __alignof__(T) #else # error "Align of cannot be determined - compiler not known" #endif // Implement CARB_HARDWARE_PAUSE; a way of idling the pipelines and reducing the penalty // from memory order violations. See // https://software.intel.com/en-us/articles/long-duration-spin-wait-loops-on-hyper-threading-technology-enabled-intel-processors #ifdef DOXYGEN_BUILD //! Instructs the underlying hardware to idle the CPU pipelines and reduce the penalty from memory order violations. # define CARB_HARDWARE_PAUSE() #elif CARB_X86_64 // avoid including immintrin.h # if CARB_COMPILER_MSC # pragma intrinsic(_mm_pause) # define CARB_HARDWARE_PAUSE() _mm_pause() # else # define CARB_HARDWARE_PAUSE() __builtin_ia32_pause() # endif #elif defined(__aarch64__) # define CARB_HARDWARE_PAUSE() __asm__ __volatile__("yield" ::: "memory") #else CARB_UNSUPPORTED_PLATFORM(); #endif #if CARB_COMPILER_MSC || defined(DOXYGEN_BUILD) # pragma intrinsic(_mm_prefetch) //! Instructs the compiler to force inline of the decorated function # define CARB_ALWAYS_INLINE __forceinline //! Attempts to prefetch from memory using a compiler intrinsic. //! @param addr The address to prefetch //! @param write Pass `true` if writing to the address is intended; `false` otherwise. //! @param level The `carb::PrefetchLevel` hint. # define CARB_PREFETCH(addr, write, level) _mm_prefetch(reinterpret_cast<char*>(addr), int(level)) //! A prefetch level hint to pass to \ref CARB_PREFETCH() enum class PrefetchLevel { kHintNonTemporal = 0, //!< prefetch data into non-temporal cache structure and into a location close to the //!< processor, minimizing cache pollution. kHintL1 = 1, //!< prefetch data into all levels of the cache hierarchy. kHintL2 = 2, //!< prefetch data into level 2 cache and higher. kHintL3 = 3, //!< prefetch data into level 3 cache and higher, or an implementation specific choice. }; #elif CARB_COMPILER_GNUC # define CARB_ALWAYS_INLINE CARB_ATTRIBUTE(always_inline) # define CARB_PREFETCH(addr, write, level) __builtin_prefetch((addr), (write), int(level)) enum class PrefetchLevel { kHintNonTemporal = 0, kHintL1 = 3, kHintL2 = 2, kHintL3 = 1, }; #else CARB_UNSUPPORTED_PLATFORM(); #endif //! A macro that declares that a function may not be inlined. #define CARB_NOINLINE CARB_ATTRIBUTE(noinline) CARB_DECLSPEC(noinline) #ifdef DOXYGEN_BUILD //! Declares a function as deprecated. # define CARB_DEPRECATED(msg) //! Declares a file as deprecated. # define CARB_FILE_DEPRECATED //! Declares that a function will not throw any exceptions # define CARB_NOEXCEPT throw() //! Used when declaring opaque types to prevent Doxygen from getting confused about not finding any implementation. # define DOXYGEN_EMPTY_CLASS \ { \ } #else # define CARB_DEPRECATED(msg) CARB_ATTRIBUTE(deprecated(msg)) CARB_DECLSPEC(deprecated(msg)) # ifdef CARB_IGNORE_REMOVEFILE_WARNINGS # define CARB_FILE_DEPRECATED # define CARB_FILE_DEPRECATED_MSG(...) # else # define CARB_FILE_DEPRECATED_MSG(msg) \ CARB_PRAGMA(message("\x1b[33m" __FILE__ ":" CARB_STRINGIFY( \ __LINE__) ": " msg " (#define CARB_IGNORE_REMOVEFILE_WARNINGS to ignore these warnings)\x1b[0m")) \ CARB_PRAGMA(warning_see_message) # define CARB_FILE_DEPRECATED CARB_FILE_DEPRECATED_MSG("This file is no longer needed and will be removed soon") # endif # define CARB_NOEXCEPT noexcept # define DOXYGEN_EMPTY_CLASS #endif #ifndef DOXYGEN_SHOULD_SKIP_THIS template <typename T> constexpr T align(T x, size_t alignment) { return (T)(((size_t)x + alignment - 1) / alignment * alignment); } template <typename T> T* align(T* x, size_t alignment) { return (T*)(((size_t)x + alignment - 1) / alignment * alignment); } #endif //! Aligns a number or pointer to the next multiple of a provided alignment. //! @note The alignment need not be a power-of-two. //! @param x The pointer or value to align //! @param alignment The alignment value in bytes. //! @returns If \p x is already aligned to \p alignment, returns \p x; otherwise returns \p x rounded up to the next //! multiple of \p alignment. #define CARB_ALIGN(x, alignment) carb::align(x, alignment) #ifndef DOXYGEN_SHOULD_SKIP_THIS template <typename T> constexpr T alignedSize(const T& size, uint32_t alignment) { return ((size + alignment - 1) / alignment) * alignment; } #endif //! Aligns a size to the given alignment. //! @note The alignment need not be a power-of-two. //! @param size The size to align. //! @param alignment The alignment value in bytes. //! @returns If \p size is already aligned to \p alignment, returns \p size; otherwise returns \p size rounded up to the //! next multiple of \p alignment. #define CARB_ALIGNED_SIZE(size, alignment) carb::alignedSize(size, alignment) //! Defined as `alignas(T)`. #define CARB_ALIGN_AS(T) alignas(T) #ifndef DOXYGEN_SHOULD_SKIP_THIS template <typename T> constexpr T divideCeil(T size, uint32_t divisor) { static_assert(std::is_integral<T>::value, "Integral required."); return (size + divisor - 1) / divisor; } #endif /** * Divides size by divisor and returns the closest integer greater than or equal to the division result. * For uses such as calculating a number of thread groups that cover all threads in a compute dispatch. * @param size An integer value. * @param divisor The divisor value. * @returns `size / divisor`, rounded up to the nearest whole integer. The type is based on \p size. */ #define CARB_DIVIDE_CEIL(size, divisor) carb::divideCeil(size, divisor) #if (CARB_HAS_CPP17 && defined(__cpp_lib_hardware_interference_size)) || defined(DOXYGEN_BUILD) //! Minimum offset between two objects to avoid false sharing, i.e. cache line size. If C++17 is not supported, falls //! back to the default value of 64 bytes. # define CARB_CACHELINE_SIZE (std::hardware_destructive_interference_size) #else # define CARB_CACHELINE_SIZE (64) #endif //! Defined as `CARB_ALIGN_AS(CARB_CACHELINE_SIZE)`. #define CARB_CACHELINE_ALIGN CARB_ALIGN_AS(CARB_CACHELINE_SIZE) /** This is a wrapper for the platform-specific call to the non-standard but almost universal alloca() function. */ #if CARB_PLATFORM_WINDOWS # define CARB_ALLOCA(size) _alloca(size) #elif CARB_PLATFORM_LINUX || CARB_PLATFORM_MACOS # define CARB_ALLOCA(size) alloca(size) #else CARB_UNSUPPORTED_PLATFORM(); #endif //! Attempts to allocate an array of the given type on the stack. //! @warning On Windows, the underlying call to `_alloca()` may throw a SEH stack overflow exception if the stack does //! not have sufficient space to perform the allocation. However, on Linux, there is no error handling for the //! underlying `alloca()` call. The caller is advised to use caution. //! @note The memory allocated is within the stack frame of the current function and is automatically freed when the //! function returns or `longjmp()` or `siglongjmp()` is called. The memory is \a not freed when leaving the scope that //! allocates it, except by the methods mentioned. //! @param T The type of the object(s) to allocate. //! @param number The number of objects to allocate. If `0`, a `nullptr` is returned. //! @returns A properly-aligned pointer that will fit \p number quantity of type \p T on the stack. This memory will be //! freed automatically when the function returns or `longjmp()` or `siglongjmp()` is called. #define CARB_STACK_ALLOC(T, number) \ carb::align<T>(((number) ? (T*)CARB_ALLOCA((number) * sizeof(T) + alignof(T)) : nullptr), alignof(T)) //! Allocates memory from the heap. //! @rst //! .. deprecated:: 126.0 //! Please use `carb::allocate()` instead. //! @endrst //! @warning Memory allocated from this method must be freed within the same module that allocated it. //! @param size The number of bytes to allocate. //! @returns A valid pointer to a memory region of \p size bytes. If an error occurs, `nullptr` is returned. #define CARB_MALLOC(size) std::malloc(size) //! Frees memory previously allocated using CARB_MALLOC(). //! @rst //! .. deprecated:: 126.0 //! Please use `carb::deallocate()` instead. //! @endrst //! @param ptr The pointer previously returned from \c CARB_MALLOC. #define CARB_FREE(ptr) std::free(ptr) #ifndef DOXYGEN_SHOULD_SKIP_THIS # define __CARB_STRINGIFY(x) # x #endif //! Turns a name into a string, resolving macros (i.e. `CARB_STRINGIFY(__LINE__)` on line 815 will produce `"815"`). //! @param x The name to turn into a string. //! @returns \p x as a string. #define CARB_STRINGIFY(x) __CARB_STRINGIFY(x) //! FNV-1a 64-bit hash basis. //! @see http://www.isthe.com/chongo/tech/comp/fnv/#FNV-param constexpr uint64_t kFnvBasis = 14695981039346656037ull; //! FNV-1a 64-bit hash prime. //! @see http://www.isthe.com/chongo/tech/comp/fnv/#FNV-param constexpr uint64_t kFnvPrime = 1099511628211ull; //! Compile-time FNV-1a 64-bit hash, use with CARB_HASH_STRING macro //! @param str The string to hash. //! @param n The number of characters in \p str, not including the NUL terminator. //! @param hash The previous hash value or starting hash basis. //! @returns A hash computed from the given parameters. constexpr uint64_t fnv1aHash(const char* str, std::size_t n, uint64_t hash = kFnvBasis) { return n > 0 ? fnv1aHash(str + 1, n - 1, (hash ^ *str) * kFnvPrime) : hash; } #ifndef DOXYGEN_SHOULD_SKIP_THIS //! Compile-time FNV-1a 64-bit hash for a static string (char array). //! @param array The static string to hash. //! @returns A hash computed from the given parameters. template <std::size_t N> constexpr uint64_t fnv1aHash(const char (&array)[N]) { return fnv1aHash(&array[0], N - 1); } #endif //! Runtime FNV-1a 64-bit string hash //! @param str The C-style (NUL terminated) string to hash. //! @param hash The previous hash value or starting hash basis. //! @returns A hash computed from the given parameters. inline uint64_t hashString(const char* str, uint64_t hash = kFnvBasis) { while (*str != '\0') { hash ^= static_cast<unsigned char>(*(str++)); hash *= kFnvPrime; } return hash; } //! A fast table-based implementation of std::tolower for ASCII characters only. //! @warning This function does not work on Unicode characters and is not locale-aware; it is ASCII only. //! @param c The character to change to lower case. //! @return The lower-case letter of \p c if \p c is an upper-case letter; \p c otherwise. constexpr unsigned char tolower(unsigned char c) { return detail::lowerTable[c]; }; //! A fast table-based implementation of std::toupper for ASCII characters only. //! @warning This function does not work on Unicode characters and is not locale-aware; it is ASCII only. //! @param c The character to change to upper case. //! @return The upper-case letter of \p c if \p c is a lower-case letter; \p c otherwise. constexpr unsigned char toupper(unsigned char c) { return detail::upperTable[c]; } //! Runtime FNV-1a 64-bit lower-case string hash (as if the string had been converted using \ref tolower()). //! @param str The C-style (NUL terminated) string to hash. //! @param hash The previous hash value or starting hash basis. //! @returns A hash computed from the given parameters. inline uint64_t hashLowercaseString(const char* str, uint64_t hash = kFnvBasis) { while (*str != '\0') { hash ^= tolower(static_cast<unsigned char>(*(str++))); hash *= kFnvPrime; } return hash; } //! Runtime FNV-1a 64-bit lower-case byte hash (as if the bytes had been converted using \ref tolower()). //! @param buffer The byte buffer to hash. //! @param len The number of bytes in \p buffer. //! @param hash The previous hash value or starting hash basis. //! @returns A hash computed from the given parameters. inline uint64_t hashLowercaseBuffer(const void* buffer, size_t len, uint64_t hash = kFnvBasis) { const unsigned char* data = static_cast<const unsigned char*>(buffer); const unsigned char* const end = data + len; while (data != end) { hash ^= tolower(*(data++)); hash *= kFnvPrime; } return hash; } //! Runtime FNV-1a 64-bit upper-case string hash (as if the string had been converted using \ref toupper()). //! @param str The C-style (NUL terminated) string to hash. //! @param hash The previous hash value or starting hash basis. //! @returns A hash computed from the given parameters. inline uint64_t hashUppercaseString(const char* str, uint64_t hash = kFnvBasis) { while (*str != '\0') { hash ^= toupper(static_cast<unsigned char>(*(str++))); hash *= kFnvPrime; } return hash; } //! Runtime FNV-1a 64-bit upper-case byte hash (as if the bytes had been converted using \ref toupper()). //! @param buffer The byte buffer to hash. //! @param len The number of bytes in \p buffer. //! @param hash The previous hash value or starting hash basis. //! @returns A hash computed from the given parameters. inline uint64_t hashUppercaseBuffer(const void* buffer, size_t len, uint64_t hash = kFnvBasis) { const unsigned char* data = static_cast<const unsigned char*>(buffer); const unsigned char* const end = data + len; while (data != end) { hash ^= toupper(*(data++)); hash *= kFnvPrime; } return hash; } //! Runtime FNV-1a 64-bit byte hash. //! @param buffer The byte buffer to hash. //! @param length The number of bytes in \p buffer. //! @param hash The previous hash value or starting hash basis. //! @returns A hash computed from the given parameters. inline uint64_t hashBuffer(const void* buffer, size_t length, uint64_t hash = kFnvBasis) { const char* ptr = static_cast<const char*>(buffer); for (size_t i = 0; i < length; ++i) { hash ^= static_cast<unsigned char>(ptr[i]); hash *= kFnvPrime; } return hash; } //! Runtime FNV-1a 64-bit hash of a scalar type. //! @param type An scalar to hash. //! @param hash The previous hash value or starting hash basis. //! @returns A hash computed from the given parameters. template <class T> constexpr uint64_t hashScalar(const T& type, uint64_t hash = kFnvBasis) { static_assert(std::is_scalar<T>::value, "Unsupported type for hashing"); return hashBuffer(reinterpret_cast<const char*>(std::addressof(type)), sizeof(type), hash); } /** * Combines two hashes producing better collision avoidance than XOR. * * @param hash1 The initial hash * @param hash2 The hash to combine with @p hash1 * @returns A combined hash of @p hash1 and @p hash2 */ inline constexpr uint64_t hashCombine(uint64_t hash1, uint64_t hash2) noexcept { constexpr uint64_t kConstant{ 14313749767032793493ull }; constexpr int kRotate = 47; hash2 *= kConstant; hash2 ^= (hash2 >> kRotate); hash2 *= kConstant; hash1 ^= hash2; hash1 *= kConstant; // Add an arbitrary value to prevent 0 hashing to 0 hash1 += 0x42524143; // CARB return hash1; } // The string hash macro is guaranteed to evaluate at compile time. MSVC raises a warning for this, which we disable. #if defined(__CUDACC__) || defined(DOXYGEN_BUILD) //! Computes a literal string hash at compile time. //! @param str The string literal to hash //! @returns A hash computed from the given string literal as if by \ref carb::fnv1aHash(). # define CARB_HASH_STRING(str) std::integral_constant<uint64_t, carb::fnv1aHash(str)>::value #else # define CARB_HASH_STRING(str) \ CARB_IGNOREWARNING_MSC_WITH_PUSH(4307) /* 'operator': integral constant overflow */ \ std::integral_constant<uint64_t, carb::fnv1aHash(str)>::value CARB_IGNOREWARNING_MSC_POP #endif //! Syntactic sugar for `CARB_HASH_STRING(CARB_STRINGIFY(T))`. #define CARB_HASH_TYPE(T) CARB_HASH_STRING(CARB_STRINGIFY(T)) // printf-like functions attributes #if CARB_COMPILER_GNUC || defined(DOXYGEN_BUILD) //! Requests that the compiler validate any variadic arguments as printf-style format specifiers, if supported by the //! compiler. Causes a compilation error if the printf-style format specifier doesn't match the given variadic types. //! @note The current implementation is effective only when `CARB_COMPILER_GNUC` is non-zero. The Windows implementation //! does not work properly for custom printf-like function pointers. It is recommended where possible to use a "fake //! printf" trick to force the compiler to evaluate the arguments: //! ```cpp //! if (0) printf(fmt, arg1, arg2); // Compiler will check but never execute. //! ``` //! @param fmt_ordinal The 1-based function parameter receiving the printf-style format string. //! @param args_ordinal The 1-based function parameter receiving the first variadic argument. # define CARB_PRINTF_FUNCTION(fmt_ordinal, args_ordinal) CARB_ATTRIBUTE(format(printf, fmt_ordinal, args_ordinal)) #elif CARB_COMPILER_MSC // Microsoft suggest to use SAL annotations _Printf_format_string_ and _Printf_format_string_params_ for // printf-like functions. Unfortunately it does not work properly for custom printf-like function pointers. // So, instead of defining marker attribute for format string, we use the "fake printf" trick to force compiler // checks and keep function attribute empty. # define CARB_PRINTF_FUNCTION(fmt_ordinal, args_ordinal) #else # define CARB_PRINTF_FUNCTION(fmt_ordinal, args_ordinal) #endif //! An empty class tag type used with \ref EmptyMemberPair constructors. struct ValueInitFirst { //! Default constructor. constexpr explicit ValueInitFirst() = default; }; //! An empty class tag type used with \ref EmptyMemberPair constructors. struct InitBoth { //! Default constructor. constexpr explicit InitBoth() = default; }; //! Attempts to invoke the Empty Member Optimization by inheriting from the First element if possible, which, if empty //! will eliminate the storage necessary for an empty class; the Second element is always stored as a separate member. //! The First element is inherited from if it is an empty `class`/`struct` and is not declared `final`. //! @tparam First The first element of the pair that the pair will inherit from if empty and not `final`. //! @tparam Second The second element of the pair that will always be a member. template <class First, class Second, bool = std::is_empty<First>::value && !std::is_final<First>::value> class EmptyMemberPair : private First { public: //! Type of the First element using FirstType = First; //! Type of the Second element using SecondType = Second; //! Constructor that default-initializes the `First` member and passes all arguments to the constructor of `Second`. //! @param args arguments passed to the constructor of `second`. template <class... Args2> constexpr explicit EmptyMemberPair(ValueInitFirst, Args2&&... args) : First{}, second{ std::forward<Args2>(args)... } { } //! Constructor that initializes both members. //! @param arg1 the argument that is forwarded to the `First` constructor. //! @param args2 arguments passed to the constructor of `second`. template <class Arg1, class... Args2> constexpr explicit EmptyMemberPair(InitBoth, Arg1&& arg1, Args2&&... args2) : First(std::forward<Arg1>(arg1)), second(std::forward<Args2>(args2)...) { } //! Non-const access to `First`. //! @returns a non-const reference to `First`. constexpr FirstType& first() noexcept { return *this; } //! Const access to `First`. //! @returns a const reference to `First`. constexpr const FirstType& first() const noexcept { return *this; } //! Direct access to the `Second` member. SecondType second; }; #ifndef DOXYGEN_SHOULD_SKIP_THIS template <class First, class Second> class EmptyMemberPair<First, Second, false> { public: using FirstType = First; using SecondType = Second; template <class... Args2> constexpr explicit EmptyMemberPair(ValueInitFirst, Args2&&... args) : m_first(), second(std::forward<Args2>(args)...) { } template <class Arg1, class... Args2> constexpr explicit EmptyMemberPair(InitBoth, Arg1&& arg1, Args2&&... args2) : m_first(std::forward<Arg1>(arg1)), second(std::forward<Args2>(args2)...) { } constexpr FirstType& first() noexcept { return m_first; } constexpr const FirstType& first() const noexcept { return m_first; } private: FirstType m_first; public: SecondType second; }; #endif } // namespace carb /** * Picks the minimum of two values. * * Same as `std::min` but implemented without using the `min` keyword as Windows.h can sometimes `#define` it. * * @param left The first value to compare. * @param right The second value to compare. * @returns \p left if \p left is less than \p right, otherwise \p right, even if the values are equal. */ template <class T> CARB_NODISCARD constexpr const T& carb_min(const T& left, const T& right) noexcept(noexcept(left < right)) { return left < right ? left : right; } /** * Picks the maximum of two values. * * Same as `std::max` but implemented without using the `max` keyword as Windows.h can sometimes `\#define` it. * * @param left The first value to compare. * @param right The second value to compare. * @returns \p right if \p left is less than \p right, otherwise \p left, even if the values are equal. */ template <class T> CARB_NODISCARD constexpr const T& carb_max(const T& left, const T& right) noexcept(noexcept(left < right)) { return left < right ? right : left; } #if CARB_POSIX || defined(DOXYGEN_BUILD) /** * A macro to retry operations if they return -1 and errno is set to EINTR. * @warning The `op` expression is potentially evaluated multiple times. * @param op The operation to retry * @returns The return value of \p op while guaranteeing that `errno` is not `EINTR`. */ # define CARB_RETRY_EINTR(op) \ [&] { \ decltype(op) ret_; \ while ((ret_ = (op)) < 0 && errno == EINTR) \ { \ } \ return ret_; \ }() #endif /** * Portable way to mark unused variables as used. * * This tricks the compiler into thinking that the variables are used, eliminating warnings about unused variables. * * @param args Any variables or arguments that should be marked as unused. */ template <class... Args> void CARB_UNUSED(Args&&... CARB_DOC_ONLY(args)) { } /** A macro to mark functionality that has not been implemented yet. * @remarks This will abort the process with a message. * The macro is [[noreturn]]. */ #define CARB_UNIMPLEMENTED(msg, ...) \ do \ { \ CARB_FATAL_UNLESS(false, (msg), ##__VA_ARGS__); \ std::terminate(); \ } while (0) /** A macro to mark placeholder functions on MacOS while the porting effort is in progress. */ #define CARB_MACOS_UNIMPLEMENTED() CARB_UNIMPLEMENTED("Unimplemented on Mac OS") #if defined(CARB_INCLUDE_PURIFY_NAME) && !defined(DOXYGEN_BUILD) # ifdef __COUNTER__ # define CARB_INCLUDE_PURIFY_TEST(...) \ inline void CARB_JOIN(CARB_INCLUDE_PURIFY_NAME, __COUNTER__)() \ __VA_ARGS__ static_assert(true, "Semicolon required") # else # define CARB_INCLUDE_PURIFY_TEST(...) \ inline void CARB_JOIN(CARB_INCLUDE_PURIFY_NAME, __LINE__)() \ __VA_ARGS__ static_assert(true, "Semicolon required") # endif #else /** * A macro that is used only for public includes to define a function which will instantiate templates. * * The templates are instantiated to make sure that all of the required symbols are available for compilation. * Example usage: * @code{.cpp} * CARB_INCLUDE_PURIFY_TEST({ * carb::Delegate<void()> del; * }); * @endcode * @note The braces must be specified inside the macro parentheses. * @note This function is never executed, merely compiled to test include purification. Unit tests are responsible for * full testing. * @note This macro only produces a function if `CARB_INCLUDE_PURIFY_NAME` is set to a function name. This happens in * Carbonite's premake5.lua file when the include purification projects are generated. */ # define CARB_INCLUDE_PURIFY_TEST(...) #endif
omniverse-code/kit/include/carb/StartupUtils.h
// Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file //! //! @brief Contains @ref carb::startupFramework() and @ref carb::shutdownFramework(). Consider using @ref //! OMNI_CORE_INIT(), which invokes these methods for you in a safe manner. #pragma once #include "Framework.h" #include "crashreporter/CrashReporterUtils.h" #include "dictionary/DictionaryUtils.h" #include "extras/AppConfig.h" #include "extras/CmdLineParser.h" #include "extras/EnvironmentVariableParser.h" #include "extras/EnvironmentVariableUtils.h" #include "extras/Path.h" #include "extras/VariableSetup.h" #include "filesystem/IFileSystem.h" #include "l10n/L10nUtils.h" #include "logging/Log.h" #include "logging/LoggingSettingsUtils.h" #include "logging/StandardLogger.h" #include "profiler/Profile.h" #include "settings/ISettings.h" #include "tokens/ITokens.h" #include "tokens/TokensUtils.h" #include "../omni/structuredlog/StructuredLogSettingsUtils.h" #include <array> #include <map> #include <string> #include <vector> namespace carb { //! Parameters passed to @ref carb::startupFramework(). struct StartupFrameworkDesc { //! A string containing either one of two things: //! //! * A path to a configuration file. //! //! * A raw string contain the configuration (in either JSON or TOML format based on @p configFormat ). //! //! @ref carb::startupFramework() will first check to see if the string maps to an existing file, and if not, the //! string is treated as a raw configuration string. const char* configString; // Path to a config file or string with configuration data char** argv; //!< Array of command line arguments int argc; //!< Number of command line arguments //! An array of search paths for plugins. //! //! Relative search paths are relative to the executable's directory, not the current working directory. //! //! These search paths will be used when loading the base set of carbonite plugins (such as carb.settings.plugin), //! then this will be set as the default value for the @ref carb::settings::ISettings key `/pluginSearchPaths` (this //! allows the setting to be overridden if you set it in config.toml or pass it on the command line). //! //! Passing an empty array will result in the executable directory being used as the default search path. //! //! This option is needed when the base set of Carbonite plugins are not inside of the executable's directory; //! otherwise, `/pluginSearchPaths` could be set in config.toml or via the command line. //! //! Defaults to `nullptr`. const char* const* initialPluginsSearchPaths; size_t initialPluginsSearchPathCount; //!< Size of array of paths to search for plugins //! Prefix of command line arguments serving as overrides for configuration values. Default is `--/`. const char* cmdLineParamPrefix; //! Prefix of environment variables serving as overrides for configuration values. Default is `OMNI_APPNAME_`. const char* envVarsParamPrefix; const char* configFormat; //!< The selected config format ("toml", "json", etc). Default is "toml". const char* appNameOverride; //!< Override automatic app name search. Defaults to `nullptr`. const char* appPathOverride; //!< Override automatic app path search. Defaults to `nullptr`. bool disableCrashReporter; //!< If `true`, the crash reporter plugin will not be loaded. Defaults to `false`. //! Returns a @ref StartupFrameworkDesc with default values. static StartupFrameworkDesc getDefault() { static constexpr const char* kDefaultCmdLineParamPrefix = "--/"; static constexpr const char* kDefaultEnvVarsParamPrefix = "OMNI_APPNAME_"; static constexpr const char* kDefaultConfigFormat = "toml"; StartupFrameworkDesc result{}; result.cmdLineParamPrefix = kDefaultCmdLineParamPrefix; result.envVarsParamPrefix = kDefaultEnvVarsParamPrefix; result.configFormat = kDefaultConfigFormat; return result; } }; /** * Simple plugin loading function wrapper that loads plugins matching multiple patterns. * * Consider using @ref carb::startupFramework(), which calls this function with user defined paths via config files, the * environment, and the command line. * * @param pluginNamePatterns String that contains plugin names pattern - wildcards are supported. * @param pluginNamePatternCount Number of items in @p pluginNamePatterns. * @param searchPaths Array of paths to look for plugins in. * @param searchPathCount Number of paths in searchPaths array. */ inline void loadPluginsFromPatterns(const char* const* pluginNamePatterns, size_t pluginNamePatternCount, const char* const* searchPaths = nullptr, size_t searchPathCount = 0) { Framework* f = getFramework(); PluginLoadingDesc desc = PluginLoadingDesc::getDefault(); desc.loadedFileWildcards = pluginNamePatterns; desc.loadedFileWildcardCount = pluginNamePatternCount; desc.searchPaths = searchPaths; desc.searchPathCount = searchPathCount; f->loadPlugins(desc); } /** * Simple plugin loading function wrapper that loads plugins matching a single pattern. * * Consider using @ref carb::startupFramework(), which calls this function with user defined paths via config files, the * environment, and the command line. * * @param pluginNamePattern String that contains a plugin pattern - wildcards are supported. * @param searchPaths Array of paths to look for plugins in. * @param searchPathCount Number of paths in searchPaths array. */ inline void loadPluginsFromPattern(const char* pluginNamePattern, const char* const* searchPaths = nullptr, size_t searchPathCount = 0) { const char* plugins[] = { pluginNamePattern }; loadPluginsFromPatterns(plugins, countOf(plugins), searchPaths, searchPathCount); } //! Internal namespace detail { //! Loads plugins based on settings specified in the given @p settings object. //! //! The settings read populated a @ref carb::PluginLoadingDesc. The settings read are: //! //! @rst //! //! /pluginSearchPaths //! Array of paths in which to search for plugins. //! //! /pluginSearchRecursive //! If ``true`` recursively each path in `/pluginSearchPaths`. //! //! /reloadablePlugins //! Array of plugin wildcards that mark plugins as reloadable. //! //! /pluginsLoaded //! Wildcard of plugins to load. //! //! /pluginsExcluded //! Wildcard of plugins that match `/pluginsLoaded` but should not be loaded. //! //! @endrst //! //! Do not use this function directly. Rather, call @ref carb::startupFramework(). inline void loadPluginsFromConfig(settings::ISettings* settings) { if (settings == nullptr) return; Framework* f = getFramework(); // Initialize the plugin loading description to default configuration, // and override parts of it to the config values, if present. PluginLoadingDesc loadingDesc = PluginLoadingDesc::getDefault(); // Check if plugin search paths are present in the config, and override if present const char* kPluginSearchPathsKey = "/pluginSearchPaths"; std::vector<const char*> pluginSearchPaths(settings->getArrayLength(kPluginSearchPathsKey)); if (!pluginSearchPaths.empty()) { settings->getStringBufferArray(kPluginSearchPathsKey, pluginSearchPaths.data(), pluginSearchPaths.size()); loadingDesc.searchPaths = pluginSearchPaths.data(); loadingDesc.searchPathCount = pluginSearchPaths.size(); } const char* kPluginSearchRecursive = "/pluginSearchRecursive"; // Is search recursive? if (settings->isAccessibleAs(carb::dictionary::ItemType::eBool, kPluginSearchRecursive)) { loadingDesc.searchRecursive = settings->getAsBool(kPluginSearchRecursive); } // Check/override reloadable plugins if present const char* kReloadablePluginsKey = "/reloadablePlugins"; std::vector<const char*> reloadablePluginFiles(settings->getArrayLength(kReloadablePluginsKey)); if (!reloadablePluginFiles.empty()) { settings->getStringBufferArray(kReloadablePluginsKey, reloadablePluginFiles.data(), reloadablePluginFiles.size()); loadingDesc.reloadableFileWildcards = reloadablePluginFiles.data(); loadingDesc.reloadableFileWildcardCount = reloadablePluginFiles.size(); } // Check/override plugins to load if present const char* kPluginsLoadedKey = "/pluginsLoaded"; std::vector<const char*> pluginsLoaded; if (settings->getItemType(kPluginsLoadedKey) == dictionary::ItemType::eDictionary) { pluginsLoaded.resize(settings->getArrayLength(kPluginsLoadedKey)); settings->getStringBufferArray(kPluginsLoadedKey, pluginsLoaded.data(), pluginsLoaded.size()); loadingDesc.loadedFileWildcards = pluginsLoaded.size() ? pluginsLoaded.data() : nullptr; loadingDesc.loadedFileWildcardCount = pluginsLoaded.size(); } const char* kPluginsExcludedKey = "/pluginsExcluded"; std::vector<const char*> pluginsExcluded; if (settings->getItemType(kPluginsExcludedKey) == dictionary::ItemType::eDictionary) { pluginsExcluded.resize(settings->getArrayLength(kPluginsExcludedKey)); settings->getStringBufferArray(kPluginsExcludedKey, pluginsExcluded.data(), pluginsExcluded.size()); loadingDesc.excludedFileWildcards = pluginsExcluded.size() ? pluginsExcluded.data() : nullptr; loadingDesc.excludedFileWildcardCount = pluginsExcluded.size(); } // Load plugins based on the resulting desc if (loadingDesc.loadedFileWildcardCount) f->loadPlugins(loadingDesc); } //! Sets @ref carb::Framework's "default" plugins from the given @p settings `/defaultPlugins` key. //! //! In short, this function calls @ref carb::Framework::setDefaultPlugin for each plugin name in `/defaultPlugins`. //! However, since the interface type cannot be specified, plugins listed in `/defaultPlugins` will become the default //! plugin for \a all interfaces they provide. //! //! This function assumes the plugins in `/defaultPlugins` have already been loaded. //! //! The following keys are used from @p settings: //! //! @rst //! /defaultPlugins //! A list of plugin names. These plugins become the default plugins to use when acquire their interfaces. //! @endrst //! //! Do not use this function directly. Rather, call @ref carb::startupFramework(). inline void setDefaultPluginsFromConfig(settings::ISettings* settings) { if (settings == nullptr) return; Framework* f = getFramework(); // Default plugins const char* kDefaultPluginsKey = "/defaultPlugins"; std::vector<const char*> defaultPlugins(settings->getArrayLength(kDefaultPluginsKey)); if (!defaultPlugins.empty()) { settings->getStringBufferArray(kDefaultPluginsKey, defaultPlugins.data(), defaultPlugins.size()); for (const char* pluginName : defaultPlugins) { // Set plugin as default for all interfaces it provides const PluginDesc& pluginDesc = f->getPluginDesc(pluginName); for (size_t i = 0; i < pluginDesc.interfaceCount; i++) { f->setDefaultPluginEx(g_carbClientName, pluginDesc.interfaces[i], pluginName); } } } } #ifndef DOXYGEN_SHOULD_SKIP_THIS // If the dict item is a special raw string then it returns pointer to the buffer past the special raw string marker // In all other cases it returns nullptr inline const char* getRawStringFromItem(carb::dictionary::IDictionary* dictInterface, const carb::dictionary::Item* item) { if (!dictInterface || !item) { return nullptr; } if (dictInterface->getItemType(item) != dictionary::ItemType::eString) { return nullptr; } const char* stringBuffer = dictInterface->getStringBuffer(item); if (!stringBuffer) { return nullptr; } constexpr char kSpecialRawStringMarker[] = "$raw:"; constexpr size_t kMarkerLen = carb::countOf(kSpecialRawStringMarker) - 1; if (std::strncmp(stringBuffer, kSpecialRawStringMarker, kMarkerLen) != 0) { return nullptr; } return stringBuffer + kMarkerLen; } class LoadSettingsHelper { public: struct SupportedConfigInfo { const char* configFormatName; const char* serializerPluginName; const char* configExt; }; LoadSettingsHelper() { Framework* f = getFramework(); m_fs = f->acquireInterface<filesystem::IFileSystem>(); } struct LoadSettingsDesc { std::string appDir; // Application directory std::string appName; // Application name const char* configStringOrPath; // Configuration string that can be null, string containing configuration data // (in selected configFormat) or a path to a config file const extras::ConfigLoadHelper::CmdLineOptionsMap* cmdLineOptionsMap; // Mapping of the command line options const extras::ConfigLoadHelper::PathwiseEnvOverridesMap* pathwiseEnvOverridesMap; // Mapping of path-wise // environment variables that // will be mapped into // corresponding settings const extras::ConfigLoadHelper::EnvVariablesMap* envVariablesMap; // Mapping of common environment variables const char* const* pluginSearchPaths; // Array of directories used by the system to search for plugins size_t pluginSearchPathCount; // Number of elements in the pluginSearchPaths const char* cmdLineConfigPath; // Path to a file containing config override (in selected configFormat), can be // null const char* configFormat; // Selected configuration format that is supported by the system inline static LoadSettingsDesc getDefault() noexcept { LoadSettingsDesc result{}; Framework* f = getFramework(); filesystem::IFileSystem* fs = f->acquireInterface<filesystem::IFileSystem>(); extras::Path execPathStem(extras::getPathStem(fs->getExecutablePath())); // Initialize application path and name to the executable path and name result.appName = execPathStem.getFilename(); result.appDir = execPathStem.getParent(); result.configFormat = "toml"; return result; } inline void overwriteWithNonEmptyParams(const LoadSettingsDesc& other) noexcept { if (!other.appDir.empty()) { appDir = other.appDir; } if (!other.appName.empty()) { appName = other.appName; } if (other.configStringOrPath) { configStringOrPath = other.configStringOrPath; } if (other.cmdLineOptionsMap) { cmdLineOptionsMap = other.cmdLineOptionsMap; } if (other.pathwiseEnvOverridesMap) { pathwiseEnvOverridesMap = other.pathwiseEnvOverridesMap; } if (other.envVariablesMap) { envVariablesMap = other.envVariablesMap; } if (other.pluginSearchPaths) { pluginSearchPaths = other.pluginSearchPaths; pluginSearchPathCount = other.pluginSearchPathCount; } if (other.cmdLineConfigPath) { cmdLineConfigPath = other.cmdLineConfigPath; } if (other.configFormat) { configFormat = other.configFormat; } } }; void loadBaseSettingsPlugins(const char* const* pluginSearchPaths, size_t pluginSearchPathCount) { Framework* f = getFramework(); // clang-format off const char* plugins[] = { "carb.dictionary.plugin", "carb.settings.plugin", "carb.tokens.plugin", m_selectedConfigInfo ? m_selectedConfigInfo->serializerPluginName : "carb.dictionary.serializer-toml.plugin" }; // clang-format on loadPluginsFromPatterns(plugins, countOf(plugins), pluginSearchPaths, pluginSearchPathCount); m_idict = f->tryAcquireInterface<dictionary::IDictionary>(); if (m_idict == nullptr) { CARB_LOG_INFO("Couldn't acquire dictionary::IDictionary interface on startup to load the settings."); return; } m_settings = f->tryAcquireInterface<settings::ISettings>(); if (m_settings == nullptr) { CARB_LOG_INFO("Couldn't acquire settings::ISettings interface on startup to load the settings."); } } class ConfigStageLoader { public: static constexpr const char* kConfigSuffix = ".config"; static constexpr const char* kOverrideSuffix = ".override"; ConfigStageLoader(filesystem::IFileSystem* fs, dictionary::ISerializer* configSerializer, LoadSettingsHelper* helper, const SupportedConfigInfo* selectedConfigInfo, const extras::ConfigLoadHelper::EnvVariablesMap* envVariablesMap) : m_fs(fs), m_configSerializer(configSerializer), m_helper(helper), m_selectedConfigInfo(selectedConfigInfo), m_envVariablesMap(envVariablesMap) { m_possibleConfigPathsStorage.reserve(4); } dictionary::Item* loadAndMergeSharedUserSpaceConfig(const extras::Path& userFolder, dictionary::Item* combinedConfig, std::string* sharedUserSpaceFilepath) { if (!userFolder.isEmpty()) { m_possibleConfigPathsStorage.clear(); m_possibleConfigPathsStorage.emplace_back(userFolder / "omni" + kConfigSuffix + m_selectedConfigInfo->configExt); return tryLoadAnySettingsAndMergeIntoTarget(m_configSerializer, combinedConfig, m_possibleConfigPathsStorage, m_envVariablesMap, sharedUserSpaceFilepath); } return combinedConfig; } dictionary::Item* loadAndMergeAppSpecificUserSpaceConfig(const extras::Path& userFolder, const std::string& appName, dictionary::Item* combinedConfig, std::string* appSpecificUserSpaceFilepath) { if (!userFolder.isEmpty()) { m_possibleConfigPathsStorage.clear(); m_possibleConfigPathsStorage.emplace_back(userFolder / appName + kConfigSuffix + m_selectedConfigInfo->configExt); return tryLoadAnySettingsAndMergeIntoTarget(m_configSerializer, combinedConfig, m_possibleConfigPathsStorage, m_envVariablesMap, appSpecificUserSpaceFilepath); } return combinedConfig; } dictionary::Item* loadAndMergeLocalSpaceConfig(const std::string& appDir, const std::string& appName, dictionary::Item* combinedConfig, std::string* localSpaceConfigFilepath) { const extras::Path cwd(m_fs->getCurrentDirectoryPath()); const extras::Path appDirPath(appDir); const extras::Path exePath(m_fs->getExecutableDirectoryPath()); const std::string appConfig = appName + kConfigSuffix + m_selectedConfigInfo->configExt; m_possibleConfigPathsStorage.clear(); m_possibleConfigPathsStorage.emplace_back(cwd / appConfig); if (!appDir.empty()) { m_possibleConfigPathsStorage.emplace_back(appDirPath / appConfig); } if (appDirPath != exePath) { m_possibleConfigPathsStorage.emplace_back(exePath / appConfig); } return tryLoadAnySettingsAndMergeIntoTarget(m_configSerializer, combinedConfig, m_possibleConfigPathsStorage, m_envVariablesMap, localSpaceConfigFilepath); } dictionary::Item* loadAndMergeSharedUserSpaceConfigOverride(dictionary::Item* combinedConfig, const std::string& sharedUserSpaceFilepath) { if (!sharedUserSpaceFilepath.empty()) { m_possibleConfigPathsStorage.clear(); addPossiblePathOverridesForSearch( extras::getPathStem(sharedUserSpaceFilepath), m_selectedConfigInfo->configExt); return tryLoadAnySettingsAndMergeIntoTarget( m_configSerializer, combinedConfig, m_possibleConfigPathsStorage, m_envVariablesMap, nullptr); } return combinedConfig; } dictionary::Item* loadAndMergeAppSpecificUserSpaceConfigOverride(dictionary::Item* combinedConfig, const std::string& appSpecificUserSpaceFilepath) { if (!appSpecificUserSpaceFilepath.empty()) { m_possibleConfigPathsStorage.clear(); addPossiblePathOverridesForSearch( extras::getPathStem(appSpecificUserSpaceFilepath), m_selectedConfigInfo->configExt); return tryLoadAnySettingsAndMergeIntoTarget( m_configSerializer, combinedConfig, m_possibleConfigPathsStorage, m_envVariablesMap, nullptr); } return combinedConfig; } dictionary::Item* loadAndMergeLocalSpaceConfigOverride(dictionary::Item* combinedConfig, const std::string& localSpaceConfigFilepath) { if (!localSpaceConfigFilepath.empty()) { m_possibleConfigPathsStorage.clear(); addPossiblePathOverridesForSearch( extras::getPathStem(localSpaceConfigFilepath), m_selectedConfigInfo->configExt); return tryLoadAnySettingsAndMergeIntoTarget( m_configSerializer, combinedConfig, m_possibleConfigPathsStorage, m_envVariablesMap, nullptr); } return combinedConfig; } dictionary::Item* loadAndMergeCustomConfig(dictionary::Item* combinedConfig, const char* filepath, dictionary::ISerializer* customSerializer = nullptr) { m_possibleConfigPathsStorage.clear(); m_possibleConfigPathsStorage.emplace_back(filepath); dictionary::ISerializer* configSerializer = customSerializer ? customSerializer : m_configSerializer; return tryLoadAnySettingsAndMergeIntoTarget( configSerializer, combinedConfig, m_possibleConfigPathsStorage, m_envVariablesMap, nullptr); } private: void addPossiblePathOverridesForSearch(const std::string& pathStem, const char* extension) { m_possibleConfigPathsStorage.emplace_back(pathStem + kOverrideSuffix + extension); m_possibleConfigPathsStorage.emplace_back(pathStem + extension + kOverrideSuffix); } dictionary::Item* tryLoadAnySettingsAndMergeIntoTarget(dictionary::ISerializer* configSerializer, dictionary::Item* targetDict, const std::vector<std::string>& possibleConfigPaths, const extras::ConfigLoadHelper::EnvVariablesMap* envVariablesMap, std::string* loadedDictPath) { if (loadedDictPath) { loadedDictPath->clear(); } dictionary::Item* loadedDict = nullptr; for (const auto& curConfigPath : possibleConfigPaths) { const char* dictFilename = curConfigPath.c_str(); if (!m_fs->exists(dictFilename)) { continue; } loadedDict = dictionary::createDictionaryFromFile(configSerializer, dictFilename); if (loadedDict) { if (loadedDictPath) { *loadedDictPath = dictFilename; } CARB_LOG_INFO("Found and loaded settings from: %s", dictFilename); break; } else { CARB_LOG_ERROR("Couldn't load the '%s' config data from file '%s'", m_selectedConfigInfo->configFormatName, dictFilename); break; } } dictionary::IDictionary* dictionaryInterface = m_helper->getDictionaryInterface(); return extras::ConfigLoadHelper::resolveAndMergeNewDictIntoTarget( dictionaryInterface, targetDict, loadedDict, loadedDictPath ? loadedDictPath->c_str() : nullptr, envVariablesMap); } std::vector<std::string> m_possibleConfigPathsStorage; filesystem::IFileSystem* m_fs = nullptr; dictionary::ISerializer* m_configSerializer = nullptr; LoadSettingsHelper* m_helper = nullptr; const SupportedConfigInfo* m_selectedConfigInfo = nullptr; const extras::ConfigLoadHelper::EnvVariablesMap* m_envVariablesMap = nullptr; }; inline dictionary::ISerializer* acquireOrLoadSerializerFromConfigInfo(const LoadSettingsDesc& params, const SupportedConfigInfo* configInfo) { dictionary::ISerializer* configSerializer = getFramework()->tryAcquireInterface<dictionary::ISerializer>(configInfo->serializerPluginName); if (configSerializer) return configSerializer; return loadConfigSerializerPlugin(params.pluginSearchPaths, params.pluginSearchPathCount, configInfo); } inline dictionary::Item* readConfigStages(const LoadSettingsDesc& params, std::string* localSpaceConfigFilepath, std::string* customConfigFilepath, std::string* cmdLineConfigFilepath) { if (!m_configSerializer) { return nullptr; } CARB_LOG_INFO("Using '%s' format for config files.", m_selectedConfigInfo->configFormatName); dictionary::Item* combinedConfig = nullptr; extras::Path userFolder = extras::ConfigLoadHelper::getConfigUserFolder(params.envVariablesMap); std::string sharedUserSpaceFilepath; std::string appSpecificUserSpaceFilepath; ConfigStageLoader configStageLoader(m_fs, m_configSerializer, this, m_selectedConfigInfo, params.envVariablesMap); // Base configs combinedConfig = configStageLoader.loadAndMergeSharedUserSpaceConfig(userFolder, combinedConfig, &sharedUserSpaceFilepath); combinedConfig = configStageLoader.loadAndMergeAppSpecificUserSpaceConfig( userFolder, params.appName, combinedConfig, &appSpecificUserSpaceFilepath); combinedConfig = configStageLoader.loadAndMergeLocalSpaceConfig( params.appDir, params.appName, combinedConfig, localSpaceConfigFilepath); // Overrides combinedConfig = configStageLoader.loadAndMergeSharedUserSpaceConfigOverride(combinedConfig, sharedUserSpaceFilepath); combinedConfig = configStageLoader.loadAndMergeAppSpecificUserSpaceConfigOverride( combinedConfig, appSpecificUserSpaceFilepath); combinedConfig = configStageLoader.loadAndMergeLocalSpaceConfigOverride(combinedConfig, *localSpaceConfigFilepath); tokens::ITokens* tokensInterface = carb::getFramework()->tryAcquireInterface<tokens::ITokens>(); // Loading text configuration override if (params.configStringOrPath) { std::string configPath; if (tokensInterface) { configPath = tokens::resolveString(tokensInterface, params.configStringOrPath); } else { configPath = params.configStringOrPath; } if (m_fs->exists(configPath.c_str())) { std::string configExt = extras::Path(configPath).getExtension(); const SupportedConfigInfo* configInfo = getConfigInfoFromExtension(configExt.c_str()); dictionary::ISerializer* customSerializer = acquireOrLoadSerializerFromConfigInfo(params, configInfo); if (customConfigFilepath) *customConfigFilepath = configPath; combinedConfig = configStageLoader.loadAndMergeCustomConfig(combinedConfig, configPath.c_str(), customSerializer); } else { dictionary::Item* textConfigurationOverride = m_configSerializer->createDictionaryFromStringBuffer(params.configStringOrPath); if (textConfigurationOverride) { CARB_LOG_INFO("Loaded text configuration override"); combinedConfig = extras::ConfigLoadHelper::resolveAndMergeNewDictIntoTarget( m_idict, combinedConfig, textConfigurationOverride, "text configuration override", params.envVariablesMap); } else { CARB_LOG_ERROR("Couldn't process provided config string as a '%s' config file or config data", m_selectedConfigInfo->configFormatName); } } } // Loading custom file configuration override if (params.cmdLineConfigPath) { std::string configPath; if (tokensInterface) { configPath = tokens::resolveString(tokensInterface, params.cmdLineConfigPath); } else { configPath = params.cmdLineConfigPath; } if (m_fs->exists(configPath.c_str())) { std::string configExt = extras::Path(configPath).getExtension(); const SupportedConfigInfo* configInfo = getConfigInfoFromExtension(configExt.c_str()); dictionary::ISerializer* customSerializer = acquireOrLoadSerializerFromConfigInfo(params, configInfo); if (cmdLineConfigFilepath) *cmdLineConfigFilepath = params.cmdLineConfigPath; combinedConfig = configStageLoader.loadAndMergeCustomConfig(combinedConfig, configPath.c_str(), customSerializer); } else { CARB_LOG_ERROR("The config file '%s' provided via command line doesn't exist", params.cmdLineConfigPath); } } combinedConfig = extras::ConfigLoadHelper::applyPathwiseEnvOverrides( m_idict, combinedConfig, params.pathwiseEnvOverridesMap, params.envVariablesMap); combinedConfig = extras::ConfigLoadHelper::applyCmdLineOverrides( m_idict, combinedConfig, params.cmdLineOptionsMap, params.envVariablesMap); return combinedConfig; } const auto& getSupportedConfigTypes() { static const std::array<SupportedConfigInfo, 2> kSupportedConfigTypes = { { { "toml", "carb.dictionary.serializer-toml.plugin", ".toml" }, { "json", "carb.dictionary.serializer-json.plugin", ".json" } } }; return kSupportedConfigTypes; } const SupportedConfigInfo* getConfigInfoFromExtension(const char* configExtension) { const std::string parmsConfigExt = configExtension; for (const auto& curConfigInfo : getSupportedConfigTypes()) { const char* curConfigExtEnd = curConfigInfo.configExt + std::strlen(curConfigInfo.configExt); if (std::equal(curConfigInfo.configExt, curConfigExtEnd, parmsConfigExt.begin(), parmsConfigExt.end(), [](char l, char r) { return std::tolower(l) == std::tolower(r); })) { return &curConfigInfo; } } return nullptr; } const SupportedConfigInfo* getConfigInfoFromFormatName(const char* configFormat) { const std::string parmsConfigFormat = configFormat; for (const auto& curConfigInfo : getSupportedConfigTypes()) { const char* curConfigFormatEnd = curConfigInfo.configFormatName + std::strlen(curConfigInfo.configFormatName); if (std::equal(curConfigInfo.configFormatName, curConfigFormatEnd, parmsConfigFormat.begin(), parmsConfigFormat.end(), [](char l, char r) { return std::tolower(l) == std::tolower(r); })) { return &curConfigInfo; } } return nullptr; } void selectConfigType(const char* configFormat) { m_selectedConfigInfo = getConfigInfoFromFormatName(configFormat); if (!m_selectedConfigInfo) { CARB_LOG_ERROR("Unsupported configuration format: %s. Falling back to %s", configFormat, getSupportedConfigTypes()[0].configFormatName); m_selectedConfigInfo = &getSupportedConfigTypes()[0]; } } static dictionary::ISerializer* loadConfigSerializerPlugin(const char* const* pluginSearchPaths, size_t pluginSearchPathCount, const SupportedConfigInfo* configInfo) { if (!configInfo) { return nullptr; } dictionary::ISerializer* configSerializer = getFramework()->tryAcquireInterface<dictionary::ISerializer>(configInfo->serializerPluginName); if (!configSerializer) { loadPluginsFromPattern(configInfo->serializerPluginName, pluginSearchPaths, pluginSearchPathCount); configSerializer = getFramework()->tryAcquireInterface<dictionary::ISerializer>(configInfo->serializerPluginName); } if (!configSerializer) { CARB_LOG_ERROR("Couldn't acquire ISerializer interface on startup for parsing '%s' settings.", configInfo->configFormatName); } return configSerializer; } void loadSelectedConfigSerializerPlugin(const char* const* pluginSearchPaths, size_t pluginSearchPathCount) { m_configSerializer = loadConfigSerializerPlugin(pluginSearchPaths, pluginSearchPathCount, m_selectedConfigInfo); } void fixRawStrings(dictionary::Item* combinedConfig) { // Fixing the special raw strings auto rawStringsFixer = [&](dictionary::Item* item, uint32_t elementData, void* userData) { CARB_UNUSED(elementData, userData); const char* rawString = getRawStringFromItem(m_idict, item); if (!rawString) { return 0; } // buffering the value to be implementation-safe const std::string value(rawString); m_idict->setString(item, value.c_str()); return 0; }; const auto getChildByIndexMutable = [](dictionary::IDictionary* dict, dictionary::Item* item, size_t index) { return dict->getItemChildByIndexMutable(item, index); }; dictionary::walkDictionary(m_idict, dictionary::WalkerMode::eIncludeRoot, combinedConfig, 0, rawStringsFixer, nullptr, getChildByIndexMutable); } dictionary::IDictionary* getDictionaryInterface() const { return m_idict; } dictionary::ISerializer* getConfigSerializerInterface() const { return m_configSerializer; } settings::ISettings* getSettingsInterface() const { return m_settings; } dictionary::Item* createEmptyDict(const char* name = "<config>") { dictionary::Item* item = m_idict->createItem(nullptr, name, dictionary::ItemType::eDictionary); if (!item) { CARB_LOG_ERROR("Couldn't create empty configuration"); } return item; }; private: filesystem::IFileSystem* m_fs = nullptr; dictionary::IDictionary* m_idict = nullptr; dictionary::ISerializer* m_configSerializer = nullptr; settings::ISettings* m_settings = nullptr; const SupportedConfigInfo* m_selectedConfigInfo = nullptr; }; /** * Helper function to initialize the settings and tokens plugins from different configuration sources */ inline void loadSettings(const LoadSettingsHelper::LoadSettingsDesc& settingsDesc) { Framework* f = getFramework(); // Preparing settings parameters LoadSettingsHelper::LoadSettingsDesc params = LoadSettingsHelper::LoadSettingsDesc::getDefault(); params.overwriteWithNonEmptyParams(settingsDesc); LoadSettingsHelper loadSettingsHelper; loadSettingsHelper.selectConfigType(params.configFormat); loadSettingsHelper.loadBaseSettingsPlugins(params.pluginSearchPaths, params.pluginSearchPathCount); filesystem::IFileSystem* fs = f->acquireInterface<filesystem::IFileSystem>(); tokens::ITokens* tokensInterface = f->tryAcquireInterface<tokens::ITokens>(); // Initializing tokens if (tokensInterface) { const char* kExePathToken = "exe-path"; const char* kExeFilenameToken = "exe-filename"; carb::extras::Path exeFullPath = fs->getExecutablePath(); tokensInterface->setInitialValue(kExePathToken, exeFullPath.getParent().getStringBuffer()); tokensInterface->setInitialValue(kExeFilenameToken, exeFullPath.getFilename().getStringBuffer()); } settings::ISettings* settings = loadSettingsHelper.getSettingsInterface(); std::string localSpaceConfigFilepath; std::string customConfigFilepath; std::string cmdLineConfigFilepath; if (settings) { loadSettingsHelper.loadSelectedConfigSerializerPlugin(params.pluginSearchPaths, params.pluginSearchPathCount); dictionary::Item* combinedConfig = nullptr; combinedConfig = loadSettingsHelper.readConfigStages( params, &localSpaceConfigFilepath, &customConfigFilepath, &cmdLineConfigFilepath); if (!combinedConfig) { dictionary::IDictionary* dictionaryInterface = loadSettingsHelper.getDictionaryInterface(); CARB_LOG_INFO("Using empty configuration for settings as no other sources created it."); combinedConfig = dictionaryInterface->createItem(nullptr, "<settings>", dictionary::ItemType::eDictionary); } if (!combinedConfig) { CARB_LOG_ERROR("Couldn't initialize settings because no configuration were created."); } else { loadSettingsHelper.fixRawStrings(combinedConfig); // Making the settings from the result dictionary settings->initializeFromDictionary(combinedConfig); } } else { CARB_LOG_INFO("Couldn't acquire ISettings interface on startup to load settings."); } // Initializing tokens if (tokensInterface) { const char* kLocalSpaceConfigPathToken = "local-config-path"; const char* kLocalSpaceConfigPathTokenStr = "${local-config-path}"; const char* kCustomConfigPathToken = "custom-config-path"; const char* kCmdLineConfigPathToken = "cli-config-path"; if (!localSpaceConfigFilepath.empty()) { tokensInterface->setInitialValue(kLocalSpaceConfigPathToken, localSpaceConfigFilepath.c_str()); } else { tokensInterface->setInitialValue(kLocalSpaceConfigPathToken, fs->getCurrentDirectoryPath()); } if (!customConfigFilepath.empty()) { tokensInterface->setInitialValue(kCustomConfigPathToken, customConfigFilepath.c_str()); } else { tokensInterface->setInitialValue(kCustomConfigPathToken, kLocalSpaceConfigPathTokenStr); } if (!cmdLineConfigFilepath.empty()) { tokensInterface->setInitialValue(kCmdLineConfigPathToken, cmdLineConfigFilepath.c_str()); } else { tokensInterface->setInitialValue(kCmdLineConfigPathToken, kLocalSpaceConfigPathTokenStr); } } else { CARB_LOG_INFO("Couldn't acquire tokens interface and initialize default tokens."); } } #endif // DOXYGEN_SHOULD_SKIP_THIS } // namespace detail //! Loads the framework configuration based on a slew of input parameters. //! //! First see @ref carb::StartupFrameworkDesc for an idea of the type of data this function accepts. //! //! At a high-level this function: //! //! - Determines application path from CLI args and env vars (see @ref carb::extras::getAppPathAndName()). //! - Sets application path as filesystem root //! - Loads plugins for settings: *carb.settings.plugin*, *carb.dictionary.plugin*, *carb.tokens.plugins* and any //! serializer plugin. //! - Searches for config file, loads it and applies CLI args overrides. //! //! Rather than this function, consider using @ref OMNI_CORE_INIT(), which handles both starting and shutting down the //! framework for you in your application. inline void loadFrameworkConfiguration(const StartupFrameworkDesc& params) { Framework* f = getFramework(); const StartupFrameworkDesc& defaultStartupFrameworkDesc = StartupFrameworkDesc::getDefault(); const char* cmdLineParamPrefix = params.cmdLineParamPrefix; if (!cmdLineParamPrefix) { cmdLineParamPrefix = defaultStartupFrameworkDesc.cmdLineParamPrefix; } const char* envVarsParamPrefix = params.envVarsParamPrefix; if (!envVarsParamPrefix) { envVarsParamPrefix = defaultStartupFrameworkDesc.envVarsParamPrefix; } const char* configFormat = params.configFormat; if (!configFormat) { configFormat = defaultStartupFrameworkDesc.configFormat; } char** const argv = params.argv; const int argc = params.argc; extras::CmdLineParser cmdLineParser(cmdLineParamPrefix); cmdLineParser.parse(argv, argc); const extras::CmdLineParser::Options& args = cmdLineParser.getOptions(); const char* cmdLineConfigPath = nullptr; bool verboseConfiguration = false; int32_t startLogLevel = logging::getLogging()->getLevelThreshold(); if (argv && argc > 0) { auto findOptionIndex = [=](const char* option) { for (int i = 0; i < argc; ++i) { const char* curArg = argv[i]; if (curArg && !strcmp(curArg, option)) { return i; } } return -1; }; auto findOptionValue = [=](const char* option) -> const char* { const int optionIndex = findOptionIndex(option); if (optionIndex == -1) { return nullptr; } if (optionIndex >= argc - 1) { CARB_LOG_ERROR("Argument not present for the '%s' option", option); } return argv[optionIndex + 1]; }; // Parsing verbose configuration option const char* const kVerboseConfigKey = "--verbose-config"; verboseConfiguration = findOptionIndex(kVerboseConfigKey) != -1; if (verboseConfiguration) { logging::getLogging()->setLevelThreshold(logging::kLevelVerbose); } // Parsing cmd line for "--config-path" argument const char* const kConfigPathKey = "--config-path"; cmdLineConfigPath = findOptionValue(kConfigPathKey); if (cmdLineConfigPath) { CARB_LOG_INFO("Using '%s' as the value for '%s'", cmdLineConfigPath, kConfigPathKey); } // Parsing config format from the command line const char* kConfigFormatKey = "--config-format"; const char* const configFormatValue = findOptionValue(kConfigFormatKey); if (configFormatValue) { configFormat = configFormatValue; } } carb::extras::EnvironmentVariableParser envVarsParser(envVarsParamPrefix); envVarsParser.parse(); filesystem::IFileSystem* fs = f->acquireInterface<filesystem::IFileSystem>(); // Prepare application path and name, which will be used to initialize the IFileSystem default root folder, // and also as one of the variants of configuration file name and location. std::string appPath, appName; extras::getAppPathAndName(args, appPath, appName); // If explicitly specified - override this search logic. That means an application doesn't give a control over // app path and/or app name through settings and env vars. if (params.appNameOverride) appName = params.appNameOverride; if (params.appPathOverride) appPath = params.appPathOverride; CARB_LOG_INFO("App path: %s, name: %s", appPath.c_str(), appName.c_str()); // set the application path for the process. This will be one of the locations we search for // the config file by default. fs->setAppDirectoryPath(appPath.c_str()); // Loading settings from config and command line. { detail::LoadSettingsHelper::LoadSettingsDesc loadSettingsParams = detail::LoadSettingsHelper::LoadSettingsDesc::getDefault(); loadSettingsParams.appDir = appPath; loadSettingsParams.appName = appName; loadSettingsParams.configStringOrPath = params.configString; loadSettingsParams.cmdLineOptionsMap = &args; loadSettingsParams.pathwiseEnvOverridesMap = &envVarsParser.getOptions(); loadSettingsParams.envVariablesMap = &envVarsParser.getEnvVariables(); loadSettingsParams.pluginSearchPaths = params.initialPluginsSearchPaths; loadSettingsParams.pluginSearchPathCount = params.initialPluginsSearchPathCount; loadSettingsParams.cmdLineConfigPath = cmdLineConfigPath; loadSettingsParams.configFormat = configFormat; detail::loadSettings(loadSettingsParams); } // restoring the starting log level if (verboseConfiguration) { logging::getLogging()->setLevelThreshold(startLogLevel); } } //! Configures the framework given a slew of input parameters. //! //! First see @ref carb::StartupFrameworkDesc for an idea of the type of data this function accepts. //! //! At a high-level this function: //! //! - Configures logging with config file //! - Loads plugins according to config file with (see \ref detail::loadPluginsFromConfig()) //! - Configures default plugins according to config file (see \ref detail::setDefaultPluginsFromConfig()) //! - Starts the default profiler (if loaded) //! //! Rather than this function, consider using @ref OMNI_CORE_INIT(), which handles both starting and shutting down the //! framework for you in your application. inline void configureFramework(const StartupFrameworkDesc& params) { Framework* f = getFramework(); if (!params.disableCrashReporter) { // Startup the crash reporter loadPluginsFromPattern( "carb.crashreporter-*", params.initialPluginsSearchPaths, params.initialPluginsSearchPathCount); crashreporter::registerCrashReporterForClient(); } auto settings = f->tryAcquireInterface<carb::settings::ISettings>(); // Configure logging plugin and its default logger logging::configureLogging(settings); logging::configureDefaultLogger(settings); omni::structuredlog::configureStructuredLogging(settings); // Uploading leftover dumps asynchronously if (settings != nullptr) { if (!params.disableCrashReporter) { const char* const kStarupDumpsUploadKey = "/app/uploadDumpsOnStartup"; settings->setDefaultBool(kStarupDumpsUploadKey, true); if (settings->getAsBool(kStarupDumpsUploadKey)) { crashreporter::sendAndRemoveLeftOverDumpsAsync(); } } // specify the plugin search paths in settings so that loadPluginsFromConfig() // will have the search paths to look through const char* kPluginSearchPathsKey = "/pluginSearchPaths"; // only set this if nothing else has been manually set settings->setDefaultStringArray( kPluginSearchPathsKey, params.initialPluginsSearchPaths, params.initialPluginsSearchPathCount); } // Load plugins using supplied configuration detail::loadPluginsFromConfig(settings); // Configure default plugins as present in the config detail::setDefaultPluginsFromConfig(settings); #if !CARB_PLATFORM_MACOS // CC-669: avoid registering this on Mac OS since it's unimplemented // Starting up profiling // This way of registering profiler allows to enable/disable profiling in the config file, by // allowing/denying to load profiler plugin. carb::profiler::registerProfilerForClient(); CARB_PROFILE_STARTUP(); #endif carb::l10n::registerLocalizationForClient(); } //! Starts/Configures the framework given a slew of input parameters. //! //! First see @ref carb::StartupFrameworkDesc for an idea of the type of data this function accepts. //! //! At a high-level this function: //! //! - Calls \ref loadFrameworkConfiguration(), which: //! - Determines application path from CLI args and env vars (see @ref carb::extras::getAppPathAndName()). //! - Sets application path as filesystem root //! - Loads plugins for settings: *carb.settings.plugin*, *carb.dictionary.plugin*, *carb.tokens.plugins* and any //! serializer plugin. //! - Searches for config file, loads it and applies CLI args overrides. //! - Calls \ref configureFramework(), which: //! - Configures logging with config file //! - Loads plugins according to config file //! - Configures default plugins according to config file //! - Starts the default profiler (if loaded) //! //! Rather than this function, consider using @ref OMNI_CORE_INIT(), which handles both starting and shutting down the //! framework for you in your application. inline void startupFramework(const StartupFrameworkDesc& params) { loadFrameworkConfiguration(params); configureFramework(params); } //! Tears down the Carbonite framework. //! //! At a high level, this function: //! - Shuts down the profiler system (if running) //! - Calls \ref profiler::deregisterProfilerForClient(), \ref crashreporter::deregisterCrashReporterForClient(), and //! l10n::deregisterLocalizationForClient(). //! //! \note It is not necessary to manually call this function if \ref OMNI_CORE_INIT is used, since that macro will //! ensure that the Framework is released and shut down. inline void shutdownFramework() { CARB_PROFILE_SHUTDOWN(); profiler::deregisterProfilerForClient(); crashreporter::deregisterCrashReporterForClient(); carb::l10n::deregisterLocalizationForClient(); } } // namespace carb
omniverse-code/kit/include/carb/Types.h
// Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file //! //! @brief Common types used through-out Carbonite. #pragma once #include "Interface.h" #include "Strong.h" #include "../omni/core/OmniAttr.h" #include <cstddef> #include <cstdint> namespace omni { namespace core { OMNI_DECLARE_INTERFACE(ITypeFactory) // forward declaration for entry inPluginFrameworkDesc } namespace log { class ILog; // forward declaration for entry in PluginFrameworkDesc } namespace structuredlog { class IStructuredLog; } } // namespace omni //! The main Carbonite namespace. namespace carb { //! Defines the plugin hot reloading (auto reload) behavior. //! //! @rst //! .. deprecated:: 132.0 //! Hot reloading support has been removed. No replacement will be provided. //! @endrst enum class PluginHotReload { eDisabled, eEnabled }; /** * Defines a descriptor for the plugin implementation, to be provided to the macro CARB_PLUGIN_IMPL. */ struct PluginImplDesc { const char* name; //!< Name of the plugin (e.g. "carb.dictionary.plugin"). Must be globally unique. const char* description; //!< Helpful text describing the plugin. Use for debugging/tools. const char* author; //!< Author (e.g. "NVIDIA"). //! If hot reloading is supported by the plugin. //! @rst //! .. deprecated:: 132.0 //! Hot reloading support has been removed. No replacement will be provided. //! @endrst PluginHotReload hotReload; const char* build; //!< Build version of the plugin. }; CARB_ASSERT_INTEROP_SAFE(PluginImplDesc); //! Defines a struct to be filled by a plugin to provide the framework with all information about it. //! @note This struct has been superseded by PluginRegistryEntry2 but exists for historical and backwards-compatibility. //! In the past, this struct was filled by the macro CARB_PLUGIN_IMPL. struct PluginRegistryEntry { PluginImplDesc implDesc; //!< Textual information about the plugin (name, desc, etc). //! Entry in an array of interfaces implemented by the plugin. struct Interface { InterfaceDesc desc; //!< An interface in the plugin. const void* ptr; //!< Pointer to the interface's `struct`. size_t size; //!< Size of the interface's `struct`. }; Interface* interfaces; //!< Pointer to an array of interfaces implemented by the plugin. size_t interfaceCount; //!< Number of interfaces in the @p interfaces array. }; CARB_ASSERT_INTEROP_SAFE(PluginRegistryEntry); CARB_ASSERT_INTEROP_SAFE(PluginRegistryEntry::Interface); //! Defines a struct to be filled by a plugin to provide the framework with all information about it. //! This struct is automatically created and filled by the macro CARB_PLUGIN_IMPL. struct PluginRegistryEntry2 { size_t sizeofThisStruct; //!< Must reflect `sizeof(PluginRegistryEntry2)`; used as a version for this struct. PluginImplDesc implDesc; //!< Textual information about the plugin (name, desc, etc). //! Entry in an array of interfaces implemented by the plugin. struct Interface2 { size_t sizeofThisStruct; //!< Must reflect `sizeof(Interface2)`; used as a version for this struct. InterfaceDesc desc; //!< An interface in the plugin. size_t size; //!< Required size for the interface (must be the maximum size for all supported versions) size_t align; //!< Required alignment for the interface //! Constructor function for this interface within the plugin (auto-generated by \ref CARB_PLUGIN_IMPL). //! //! Called by the framework to construct the interface. //! @param p The buffer (guaranteed to be at least `size` bytes) to construct the interface into. void(CARB_ABI* Constructor)(void* p); union { //! Destructor function for this interface within the plugin (auto-generated by \ref CARB_PLUGIN_IMPL). //! //! This union member is selected if `VersionedConstructor` is `nullptr`. //! //! Called by the framework to destruct the interface before unloading the plugin. //! @param p The buffer previously passed to \ref Constructor that contains the interface. void(CARB_ABI* Destructor)(void* p); //! Versioned destructor for this interface within the plugin. //! //! This union member is selected if `VersionedConstructor` is not `nullptr`. //! //! This function is typically the user-provided function \ref destroyInterface; if that function is not //! provided no destruction happens. //! @param v The version of the interface, as set in the `v` parameter for \ref VersionedConstructor before //! that function returns. //! @param p The interface buffer that was originally passed to \ref VersionedConstructor. void(CARB_ABI* VersionedDestructor)(Version v, void* p); //!< Destructor with version }; //! Versioned constructor function for this interface within the plugin. //! //! This function is typically \ref fillInterface(carb::Version*, void*). //! @warning This function must not fail when `desc.version` is requested. //! //! @param v When called, the version requested. Before returning, the function should write the version that is //! being constructed into \p p. //! @param p A buffer (guaranteed to be at least `size` bytes) to construct the interface into. //! @retval `true` if the requested version was available and constructed into \p p. //! @retval `false` if the requested version is not available. bool(CARB_ABI* VersionedConstructor)(Version* v, void* p); // Internal note: This struct can be modified via the same rules for PluginRegistryEntry2 below. }; Interface2* interfaces; //!< Pointer to an array of interfaces implemented by the plugin. size_t interfaceCount; //!< Number of interfaces in the @p interfaces array. // Internal note: This struct can be modified without changing the carbonite framework version, provided that new // members are only added to the end of the struct and existing members are not modified. The version can then be // determined by the sizeofThisStruct member. However, if it is modified, please add a new // carb.frameworktest.*.plugin (see ex2initial in premake5.lua for an example). }; CARB_ASSERT_INTEROP_SAFE(PluginRegistryEntry2); CARB_ASSERT_INTEROP_SAFE(PluginRegistryEntry2::Interface2); /** * Defines a struct which contains all key information about a plugin loaded into memory. */ struct PluginDesc { PluginImplDesc impl; //!< Name, description, etc. const InterfaceDesc* interfaces; //!< Array of interfaces implemented by the plugin. size_t interfaceCount; //!< Number of interfaces implemented by the plugin. const InterfaceDesc* dependencies; //!< Array of interfaces on which the plugin depends. size_t dependencyCount; //!< Number of interfaces on which the plugin depends. const char* libPath; //!< File from which the plugin was loaded. }; CARB_ASSERT_INTEROP_SAFE(PluginDesc); //! Lets clients of a plugin know both just before and just after that the plugin is being reloaded. enum class PluginReloadState { eBefore, //!< The plugin is about to be reloaded. eAfter //!< The plugin has been reloaded. }; //! Pass to each plugin's @ref OnPluginRegisterExFn during load. Allows the plugin to grab global Carbonite state such //! as the @ref carb::Framework singleton. struct PluginFrameworkDesc { struct Framework* framework; //!< Owning carb::Framework. Never `nullptr`. omni::core::ITypeFactory* omniTypeFactory; //!< omni::core::ITypeFactory singleton. May be `nullptr`. omni::log::ILog* omniLog; //!< omni::log::ILog singleton. May be `nullptr`. omni::structuredlog::IStructuredLog* omniStructuredLog; //!< omni::structuredlog::IStructuredLog singleton. May be //!< `nullptr`. //! Reserved space for future fields. If a new field is added, subtract 1 from this array. //! //! The fields above must never be removed though newer implementations of carb.dll may decide to populate them with //! nullptr. //! //! When a newer plugin is loaded by an older carb.dll, these fields will be nullptr. It is up to the newer plugin //! (really CARB_PLUGIN_IMPL_WITH_INIT()) to handle this. void* Reserved[28]; }; static_assert(sizeof(PluginFrameworkDesc) == (sizeof(void*) * 32), "sizeof(PluginFrameworkDesc) is unexpected. did you add a new field improperly?"); // contact ncournia for // questions /** * Defines a shared object handle. */ struct CARB_ALIGN_AS(8) SharedHandle { union { void* handlePointer; ///< A user-defined pointer. void* handleWin32; ///< A Windows/NT HANDLE. Defined as void* instead of "HANDLE" to avoid requiring windows.h. int handleFd; ///< A file descriptor (FD), POSIX handle. }; }; //! @defgroup CarbonitePluginExports Functions exported from Carbonite plugins. Use @ref CARB_PLUGIN_IMPL to have //! reasonable default implementations of these function implemented for you in your plugin. //! Required. Returns the plugin's required @ref carb::Framework version. // //! Use @ref CARB_PLUGIN_IMPL to have this function generated for your plugin. //! //! Most users will not have a need to define this function, as it is defined by default via @ref CARB_PLUGIN_IMPL. //! //! @ingroup CarbonitePluginExports //! //! @see carbGetFrameworkVersion typedef Version(CARB_ABI* GetFrameworkVersionFn)(); //! Either this or OnPluginRegisterExFn or OnPluginRegisterEx2Fn are required. Populates the given @ref //! carb::PluginRegistryEntry with the plugin's information. //! //! Prefer using @ref OnPluginRegisterExFn instead of this function. //! //! Most users will not have a need to define this function, as it is defined by default via @ref CARB_PLUGIN_IMPL. //! //! @ingroup CarbonitePluginExports //! //! @see carbOnPluginRegister typedef void(CARB_ABI* OnPluginRegisterFn)(Framework* framework, PluginRegistryEntry* outEntry); //! Either this or OnPluginRegisterFn or OnPluginRegisterEx2 are required. Populates the given @ref //! carb::PluginRegistryEntry with the plugin's information. //! //! Use @ref CARB_PLUGIN_IMPL to have this function generated for your plugin. //! //! @ingroup CarbonitePluginExports //! //! @see carbOnPluginRegisterEx typedef void(CARB_ABI* OnPluginRegisterExFn)(PluginFrameworkDesc* framework, PluginRegistryEntry* outEntry); //! Either this or OnPluginRegisterEx2Fn or OnPluginRegisterFn are required. Populates the given //! carb::PluginRegistryEntry2 with the plugin's information. //! //! Use @ref CARB_PLUGIN_IMPL to have this function generated for your plugin. //! //! @ingroup CarbonitePluginExports //! //! @see carbOnPluginRegisterEx2 typedef void(CARB_ABI* OnPluginRegisterEx2Fn)(PluginFrameworkDesc* framework, PluginRegistryEntry2* outEntry); //! Optional. Called after @ref OnPluginRegisterExFn. //! //! Most users will not have a need to define this function, as it is defined by default via @ref CARB_PLUGIN_IMPL. //! //! @ingroup CarbonitePluginExports //! //! @see carbOnPluginPreStartup typedef void(CARB_ABI* OnPluginPreStartupFn)(); //! Optional. Called after @ref OnPluginPreStartupFn. //! //! Prefer using @ref OnPluginStartupExFn instead of this function since @ref OnPluginStartupExFn return a value that //! will cause the plugin be unloaded. //! //! @ingroup CarbonitePluginExports //! //! @see carbOnPluginStartup typedef void(CARB_ABI* OnPluginStartupFn)(); //! Optional. Called after @ref OnPluginPreStartupFn. //! //! This is the main user defined function for running startup code in your plugin. //! //! @returns Returns `true` if the startup was successful. If `false` is returned, the plugin will be immediately //! unloaded (only @ref OnPluginPostShutdownFn is called). //! //! @ingroup CarbonitePluginExports //! //! @see carbOnPluginStartupEx typedef bool(CARB_ABI* OnPluginStartupExFn)(); //! Optional. Called after @ref OnPluginStartupExFn. //! //! Called when the @ref carb::Framework is unloading the plugin. If the framework is released with //! carb::quickReleaseFrameworkAndTerminate() and OnPluginQuickShutdownFn is available for plugin, this function is not //! called. //! //! This is the main user defined function for running shutdown code in your plugin. //! //! @ingroup CarbonitePluginExports //! //! @see carbOnPluginShutdown typedef void(CARB_ABI* OnPluginShutdownFn)(); //! Optional. Called if provided in lieu of OnPluginShutdownFn when the @ref carb::quickReleaseFrameworkAndTerminate() //! is performing a quick shutdown. //! //! This function should save any state necessary, and close and flush any I/O, returning as quickly as possible. This //! function is not called if the plugin is unloaded normally or through carb::releaseFramework(). //! //! @note If carb::quickReleaseFrameworkAndTerminate() is called, OnPluginQuickShutdownFn is called if it is available. //! If the function does not exist, OnPluginShutdownFn is called instead. OnPluginPostShutdownFn is always called. //! //! @ingroup CarbonitePluginExports //! //! @see carbOnPluginQuickShutdown typedef void(CARB_ABI* OnPluginQuickShutdownFn)(); //! Optional. Called after @ref OnPluginShutdownFn. //! //! Called when the @ref carb::Framework is unloading the plugin. //! //! Most users will not have a need to define this function, as it is defined by default via @ref CARB_PLUGIN_IMPL. //! //! @ingroup CarbonitePluginExports //! //! @see carbOnPluginPostShutdown typedef void(CARB_ABI* OnPluginPostShutdownFn)(); //! Optional. Returns a static list of interfaces this plugin depends upon. //! //! Use @ref CARB_PLUGIN_IMPL_DEPS to have this function generated for you. //! //! @ingroup CarbonitePluginExports //! //! @see carbGetPluginDeps typedef void(CARB_ABI* GetPluginDepsFn)(InterfaceDesc** interfaceDesc, size_t* count); //! Optional. //! //! @ingroup CarbonitePluginExports //! //! @see carbOnReloadDependency typedef void(CARB_ABI* OnReloadDependencyFn)(PluginReloadState reloadState, void* pluginInterface, PluginImplDesc desc); //! Two component `float` vector. struct Float2 { float x; //!< x-component float y; //!< y-component }; //! Three component `float` vector. struct Float3 { float x; //!< x-component float y; //!< y-component float z; //!< z-component }; //! Four component `float` vector. struct Float4 { float x; //!< x-component float y; //!< y-component float z; //!< z-component float w; //!< w-component }; //! Two component `double` vector. struct Double2 { double x; //!< x-component double y; //!< y-component }; //! Three component `double` vector. struct Double3 { double x; //!< x-component double y; //!< y-component double z; //!< z-component }; //! Four component `double` vector. struct Double4 { double x; //!< x-component double y; //!< y-component double z; //!< z-component double w; //!< w-component }; //! RGBA color with templated data type. template <typename T> struct Color { T r; //!< Red T g; //!< Green T b; //!< Blue T a; //!< Alpha (transparency) }; //! RGB `float` color. struct ColorRgb { float r; //!< Red float g; //!< Green float b; //!< Blue }; //! RGBA `float` color. struct ColorRgba { float r; //!< Red float g; //!< Green float b; //!< Blue float a; //!< Alpha (transparency) }; //! RGB `double` color. struct ColorRgbDouble { double r; //!< Red double g; //!< Green double b; //!< Blue }; //! RGBA `double` color. struct ColorRgbaDouble { double r; //!< Red double g; //!< Green double b; //!< Blue double a; //!< Alpha (transparency) }; //! Two component `int32_t` vector. struct Int2 { int32_t x; //!< x-component int32_t y; //!< y-component }; //! Three component `int32_t` vector. struct Int3 { int32_t x; //!< x-component int32_t y; //!< y-component int32_t z; //!< z-component }; //! Four component `int32_t` vector. struct Int4 { int32_t x; //!< x-component int32_t y; //!< y-component int32_t z; //!< z-component int32_t w; //!< w-component }; //! Two component `uint32_t` vector. struct Uint2 { uint32_t x; //!< x-component uint32_t y; //!< y-component }; //! Three component `uint32_t` vector. struct Uint3 { uint32_t x; //!< x-component uint32_t y; //!< y-component uint32_t z; //!< z-component }; //! Four component `uint32_t` vector. struct Uint4 { uint32_t x; //!< x-component uint32_t y; //!< y-component uint32_t z; //!< z-component uint32_t w; //!< w-component }; //! A representation that can combine four character codes into a single 32-bit value for quick comparison. //! @see CARB_MAKE_FOURCC using FourCC = uint32_t; //! A macro for producing a carb::FourCC value from four characters. #define CARB_MAKE_FOURCC(a, b, c, d) \ ((FourCC)(uint8_t)(a) | ((FourCC)(uint8_t)(b) << 8) | ((FourCC)(uint8_t)(c) << 16) | ((FourCC)(uint8_t)(d) << 24)) /** * Timeout constant */ constexpr uint32_t kTimeoutInfinite = CARB_UINT32_MAX; //! A handle type for \ref Framework::addLoadHook() and \ref Framework::removeLoadHook() CARB_STRONGTYPE(LoadHookHandle, size_t); //! A value indicating an invalid load hook handle. constexpr LoadHookHandle kInvalidLoadHook{}; //! An enum that describes a binding registration for \ref carb::Framework::registerScriptBinding(). enum class BindingType : uint32_t { Owner, //!< The given client owns a script language; any interfaces acquired within the script language will be //!< considered as dependencies of the script language. Binding, //!< The given client is a binding for the given script language. Any interfaces acquired by the binding //!< will be considered as dependencies of all owners of the script language. }; } // namespace carb // these types used to be in this file but didn't really belong. we continue to include these type in this file for // backward-compat. #include "RenderingTypes.h"
omniverse-code/kit/include/carb/RString.h
// Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file //! //! @brief Registered String utility. See carb::RString for more info. #pragma once #include "Defines.h" #define RSTRINGENUM_FROM_RSTRING_H #include "RStringEnum.inl" #undef RSTRINGENUM_FROM_RSTRING_H #include <memory> // for std::owner_before #include <ostream> // for std::basic_ostream #include <stdint.h> #include <string> #include <typeindex> // for std::hash namespace carb { //! Operations for RString (and variant classes) constructor. enum class RStringOp { //! Attempt to find a matching registered string, or register a new string if not found. eRegister, //! Only attempt to find a matching registered string. If the string cannot be found, the RString will be empty and //! will return `true` to RString::isEmpty(). eFindExisting, }; //! Internal definition detail. namespace detail { //! @private struct RStringBase { //! @private CARB_VIZ uint32_t m_stringId : 31; //! @private unsigned m_uncased : 1; }; //! @private // NOTE: In order to satisfy the StandardLayoutType named requirement (required for ABI safety), all non-static data // members and bit-fields must be declared in the same class. As such, this class must match RStringBase, but cannot // inherit from RStringBase. struct RStringKeyBase { //! @private CARB_VIZ uint32_t m_stringId : 31; //! @private unsigned m_uncased : 1; //! @private CARB_VIZ int32_t m_number; }; // Validate assumptions CARB_ASSERT_INTEROP_SAFE(RStringBase); CARB_ASSERT_INTEROP_SAFE(RStringKeyBase); static_assert(offsetof(RStringKeyBase, m_number) == sizeof(RStringBase), "Offset error"); /** * The base class for all registered string classes: RString, RStringU, RStringKey, and RStringUKey. * * @tparam Uncased `true` if representing an "un-cased" (i.e. case-insensitive) registered string; `false` otherwise. */ template <bool Uncased, class Base = RStringBase> class RStringTraits : protected Base { public: /** * Constant that indicates whether this is "un-cased" (i.e. case-insensitive). */ static constexpr bool IsUncased = Uncased; //! @private constexpr RStringTraits() noexcept; //! @private constexpr RStringTraits(eRString staticString) noexcept; //! @private RStringTraits(const char* str, RStringOp op); //! @private RStringTraits(const char* str, size_t len, RStringOp op); //! @private RStringTraits(const std::string& str, RStringOp op); //! @private RStringTraits(uint32_t stringId) noexcept; /** * Checks to see if this registered string has been corrupted. * * @note It is not possible for this registered string to become corrupted through normal use of the API. It could * be caused by bad casts or use-after-free. * * @returns `true` if `*this` represents a valid registered string; `false` if `*this` is corrupted. */ bool isValid() const noexcept; /** * Checks to see if this registered string represents the "" (empty) value. * * @returns `true` if `*this` is default-initialized or initialized to eRString::Empty; `false` otherwise. */ constexpr bool isEmpty() const noexcept; /** * Checks to see if this registered string represents an "un-cased" (i.e. case-insensitive) registered string. * * @returns `true` if `*this` is "un-cased" (i.e. case-insensitive); `false` if case-sensitive. */ constexpr bool isUncased() const noexcept; /** * Returns the registered string ID. This ID is only useful for debugging purposes and should not be used for * comparisons. * * @returns The string ID for this registered string. */ constexpr uint32_t getStringId() const noexcept; /** * Returns the hash value as by `carb::hashString(this->c_str())`. * * @note This value is computed once for a registered string and cached, so this operation is generally very fast. * * @returns The hash value as computed by `carb::hashString(this->c_str())`. */ size_t getHash() const; /** * Returns the hash value as by `carb::hashLowercaseString(this->c_str())`. * * @note This value is pre-computed for registered strings and cached, so this operation is always O(1). * * @returns The hash value as computed by `carb::hashLowercaseString(this->c_str())`. */ size_t getUncasedHash() const noexcept; /** * Resolves this registered string to a C-style NUL-terminated string. * * @note This operation is O(1). * * @returns The C-style string previously registered. */ const char* c_str() const noexcept; /** * An alias for c_str(); resolves this registered string to a C-style NUL-terminated string. * * @note This operation is O(1). * * @returns The C-style string previously registered. */ const char* data() const noexcept; /** * Returns the length of the registered string. If the string contains embedded NUL ('\0') characters this may * differ from `std::strlen(c_str())`. * * @note This operation is O(1). * * @returns The length of the registered string not including the NUL terminator. */ size_t length() const noexcept; #ifndef DOXYGEN_BUILD /** * Resolves this registered string to a `std::string`. * * @returns A `std::string` containing a copy of the previously registered string. */ std::string toString() const; #endif /** * Equality comparison between this registered string and another. * * @param other Another registered string. * @returns `true` if `*this` and `other` represent the same registered string; `false` otherwise. */ bool operator==(const RStringTraits& other) const; /** * Inequality comparison between this registered string and another. * * @param other Another registered string. * @returns `false` if `*this` and `other` represent the same registered string; `true` otherwise. */ bool operator!=(const RStringTraits& other) const; /** * Checks whether this registered string is stably (but not lexicographically) ordered before another registered * string. * * This ordering is to make registered strings usable as keys in ordered associative containers in O(1) time. * * @note This is NOT a lexicographical comparison; for that use one of the compare() functions. To reduce ambiguity * between a strict ordering and lexicographical comparison there is no `operator<` function for this string class. * While a lexicographical comparison would be O(n), this comparison is O(1). * * @param other Another registered string. * @returns `true` if `*this` should be ordered-before @p other; `false` otherwise. */ bool owner_before(const RStringTraits& other) const; /** * Lexicographically compares this registered string with another. * * @note If either `*this` or @p other is "un-cased" (i.e. case-insensitive), a case-insensitive compare is * performed. * * @tparam OtherUncased `true` if @p other is "un-cased" (i.e. case-insensitive); `false` otherwise. * @param other Another registered string to compare against. * @returns `0` if the strings are equal, `>0` if @p other is lexicographically ordered before `*this`, or `<0` if * `*this` is lexicographically ordered before @p other. See note above regarding case-sensitivity. */ template <bool OtherUncased, class OtherBase> int compare(const RStringTraits<OtherUncased, OtherBase>& other) const; /** * Lexicographically compares this registered string with a C-style string. * * @note If `*this` is "un-cased" (i.e. case-insensitive), a case-insensitive compare is performed. * * @param s A C-style string to compare against. * @returns `0` if the strings are equal, `>0` if @p s is lexicographically ordered before `*this`, or `<0` if * `*this` is lexicographically ordered before @p s. See note above regarding case-sensitivity. */ int compare(const char* s) const; /** * Lexicographically compares a substring of this registered string with a C-style string. * * @note If `*this` is "un-cased" (i.e. case-insensitive), a case-insensitive compare is performed. * * @param pos The starting offset of the registered string represented by `*this`. Must less-than-or-equal-to the * length of the registered string. * @param count The length from @p pos to use in the comparison. This value is automatically clamped to the end of * the registered string. * @param s A C-style string to compare against. * @returns `0` if the strings are equal, `>0` if @p s is lexicographically ordered before the substring of `*this`, * or `<0` if the substring of `*this` is lexicographically ordered before @p s. See note above regarding * case-sensitivity. */ int compare(size_t pos, size_t count, const char* s) const; /** * Lexicographically compares a substring of this registered string with a C-style string. * * @note If `*this` is "un-cased" (i.e. case-insensitive), a case-insensitive compare is performed. * * @param pos The starting offset of the registered string represented by `*this`. Must less-than-or-equal-to the * length of the registered string. * @param count The length from @p pos to use in the comparison. This value is automatically clamped to the end of * the registered string. * @param s A C-style string to compare against. * @param len The number of characters of @p s to compare against. * @returns `0` if the strings are equal, `>0` if @p s is lexicographically ordered before the substring of `*this`, * or `<0` if the substring of `*this` is lexicographically ordered before @p s. See note above regarding * case-sensitivity. */ int compare(size_t pos, size_t count, const char* s, size_t len) const; /** * Lexicographically compares this registered string with a string. * * @note If `*this` is "un-cased" (i.e. case-insensitive), a case-insensitive compare is performed. * * @param s A string to compare against. * @returns `0` if the strings are equal, `>0` if @p s is lexicographically ordered before `*this`, or `<0` if * `*this` is lexicographically ordered before @p s. See note above regarding case-sensitivity. */ int compare(const std::string& s) const; /** * Lexicographically compares a substring of this registered string with a string. * * @note If `*this` is "un-cased" (i.e. case-insensitive), a case-insensitive compare is performed. * * @param pos The starting offset of the registered string represented by `*this`. Must less-than-or-equal-to the * length of the registered string. * @param count The length from @p pos to use in the comparison. This value is automatically clamped to the end of * the registered string. * @param s A string to compare against. * @returns `0` if the strings are equal, `>0` if @p s is lexicographically ordered before the substring of `*this`, * or `<0` if the substring of `*this` is lexicographically ordered before @p s. See note above regarding * case-sensitivity. */ int compare(size_t pos, size_t count, const std::string& s) const; }; } // namespace detail class RString; class RStringU; class RStringKey; class RStringUKey; /** * Carbonite registered strings. * * The Carbonite framework has a rich <a href="https://en.wikipedia.org/wiki/String_interning">string-interning</a> * interface that is very easily used through the RString (and other) classes. This implements a <a * href="https://en.wikipedia.org/wiki/Flyweight_pattern">Flyweight pattern</a> for strings. The registered string * interface is fully @rstref{ABI-safe <abi-compatibility>} due to versioning, and can even be used in an application * prior to the `main()`, `WinMain()` or `DllMain()` functions being called. Furthermore, the API is fully thread-safe. * * Registered strings have pre-computed hashes which make them ideal for identifiers and map keys, and string * (in-)equality checks are O(1) constant time. For ordered containers, registered strings have an `owner_before()` * function that can be used for stable (though not lexicographical) ordering. If lexicographical ordering is desired, * O(n) `compare()` functions are provided. * * Variations exist around case-sensitivity. The RStringU class (the U stands for "un-cased" which is used in this API * to denote case-insensitivity) is used to register a string that will compare in a case-insensitive manner. Although * RString and RStringU cannot be directly compared for equality, RString::toUncased() exists to explicitly create a * case-insensitive RStringU from an RString which can then be compared. * * Variations also exist around using registered strings as a key value. It can be useful to have an associated number * to denote multiple instances of a registered string: hence the RStringKey and RStringUKey classes. * * To register a string, pass a string to the RString constructor RAII-style. Strings that are registered stay as such * for the entire run of the application; strings are never unregistered. Registered strings are stored in a named * section of shared memory accessible by all modules loaded by an application. The memory for registered strings is * allocated directly from the operating system to avoid cross-DLL heap issues. * * @note Registered strings are a limited resource, but there exists slots for approximately two million strings. * * Variations: * * RStringU - an "un-cased" (i.e. case-insensitive) registered string * * RStringKey - Adds a numeric component to RString to create an identifier or key. * * RStringUKey - Adds a numeric component to RStringU to create an identifier or key that is case-insensitive. */ class CARB_VIZ RString final : public detail::RStringTraits<false> { using Base = detail::RStringTraits<false>; public: /** * Constant that indicates whether this is "un-cased" (i.e. case-insensitive) (will always be `false`). */ using Base::IsUncased; /** * Default constructor. isEmpty() will report `true`. */ constexpr RString() noexcept; /** * Initializes this registered string to one of the static pre-defined registered strings. * @param staticString The pre-defined registered string to use. */ constexpr RString(eRString staticString) noexcept; /** * Finds or registers a new string. * @param str The string to find or register. * @param op The operation to perform. If directed to RStringOp::eFindExisting and the string has not been * previously registered, `*this` is initialized as if with the default constructor. */ explicit RString(const char* str, RStringOp op = RStringOp::eRegister); /** * Finds or registers a new counted string. * @note While generally not recommended, passing @p len allows the given string to contain embedded NUL ('\0') * characters. * @param str The string to find or register. * @param len The number of characters of @p str to include. * @param op The operation to perform. If directed to RStringOp::eFindExisting and the string has not been * previously registered, `*this` is initialized as if with the default constructor. */ explicit RString(const char* str, size_t len, RStringOp op = RStringOp::eRegister); /** * Finds or registers a new `std::string`. * @note If @p str contains embedded NUL ('\0') characters, the RString will contain the embedded NUL characters as * well. * @param str The `std::string` to find or register. * @param op The operation to perform. If directed to RStringOp::eFindExisting and the string has not been * previously registered, `*this` is initialized as if with the default constructor. */ explicit RString(const std::string& str, RStringOp op = RStringOp::eRegister); /** * Truncates RStringKey into only the registered string portion. * @param other The RStringKey to truncate. */ explicit RString(const RStringKey& other) noexcept; /** * Converts this registered string into an "un-cased" (i.e. case-insensitive) registered string. * * @note The returned string may differ in case to `*this` when retrieved with c_str() or toString(). * * @returns An "un-cased" (i.e. case-insensitive) string that matches `*this` when compared in a case-insensitive * manner. */ RStringU toUncased() const noexcept; /** * Returns a copy of this registered string. * @note This function exists for compatibility with the RStringKey interface. * @returns `*this` since this string already has no number component. */ RString truncate() const noexcept; /** * Appends a number to the registered string to form a RStringKey. * * @param number An optional number to append (default = `0`). * @returns An RStringKey based on `*this` and the provided number. */ RStringKey toRStringKey(int32_t number = 0) const; /** * Equality comparison between this registered string and another. * * @param other Another registered string. * @returns `true` if `*this` and `other` represent the same registered string; `false` otherwise. */ bool operator==(const RString& other) const noexcept; /** * Inequality comparison between this registered string and another. * * @param other Another registered string. * @returns `false` if `*this` and `other` represent the same registered string; `true` otherwise. */ bool operator!=(const RString& other) const noexcept; /** * Checks whether this registered string is stably (but not lexicographically) ordered before another registered * string. * * This ordering is to make registered strings usable as keys in ordered associative containers in O(1) time. * * @note This is NOT a lexicographical comparison; for that use one of the compare() functions. To reduce ambiguity * between a strict ordering and lexicographical comparison there is no `operator<` function for this string class. * While a lexicographical comparison would be O(n), this comparison is O(1). * * @param other Another registered string. * @returns `true` if `*this` should be ordered-before @p other; `false` otherwise. */ bool owner_before(const RString& other) const noexcept; }; /** * Case-insensitive registered string. * * The "U" stands for "un-cased". * * See RString for system-level information. This class differs from RString in that it performs case-insensitive * operations. * * Since the desire is for equality comparisons to be speed-of-light (i.e. O(1) numeric comparisons), the first string * registered insensitive to casing is chosen as an "un-cased authority" and if any strings registered through RStringU * later match that string (in a case-insensitive manner), that authority string will be chosen instead. This also means * that when RStringU is used to register a string and then that string is retrieved with RStringU::c_str(), the casing * in the returned string might not match what was registered. */ class CARB_VIZ RStringU final : public detail::RStringTraits<true> { using Base = detail::RStringTraits<true>; public: /** * Constant that indicates whether this is "un-cased" (i.e. case-insensitive) (will always be `true`). */ using Base::IsUncased; /** * Default constructor. isEmpty() will report `true`. */ constexpr RStringU() noexcept; /** * Initializes this registered string to one of the static pre-defined registered strings. * @param staticString The pre-defined registered string to use. */ constexpr RStringU(eRString staticString) noexcept; /** * Finds or registers a new case-insensitive string. * * @note The casing of the string actually used may be different than @p str when reported by c_str() or toString(). * * @param str The string to find or register. * @param op The operation to perform. If directed to RStringOp::eFindExisting and the string has not been * previously registered, `*this` is initialized as if with the default constructor. */ explicit RStringU(const char* str, RStringOp op = RStringOp::eRegister); /** * Finds or registers a new counted case-insensitive string. * @note While generally not recommended, passing @p len allows the given string to contain embedded NUL ('\0') * characters. * @note The casing of the string actually used may be different than @p str when reported by c_str() or toString(). * @param str The string to find or register. * @param len The number of characters of @p str to include. * @param op The operation to perform. If directed to RStringOp::eFindExisting and the string has not been * previously registered, `*this` is initialized as if with the default constructor. */ explicit RStringU(const char* str, size_t len, RStringOp op = RStringOp::eRegister); /** * Finds or registers a new case-insensitive `std::string`. * @note If @p str contains embedded NUL ('\0') characters, the RString will contain the embedded NUL characters as * well. * @note The casing of the string actually used may be different than @p str when reported by c_str() or toString(). * @param str The `std::string` to find or register. * @param op The operation to perform. If directed to RStringOp::eFindExisting and the string has not been * previously registered, `*this` is initialized as if with the default constructor. */ explicit RStringU(const std::string& str, RStringOp op = RStringOp::eRegister); /** * Converts a registered string into an "un-cased" (i.e. case-insensitive) registered string. * @param other The RString to convert. */ explicit RStringU(const RString& other); /** * Truncates RStringUKey into only the registered string portion. * @param other The RStringUKey to truncate. */ explicit RStringU(const RStringUKey& other); /** * Returns a copy of this registered string. * @note This function exists for compatibility with the RString interface. * @returns `*this` since this string is already "un-cased" (i.e. case-insensitive). */ RStringU toUncased() const noexcept; /** * Returns a copy of this registered string. * @note This function exists for compatibility with the RStringKey interface. * @returns `*this` since this string already has no number component. */ RStringU truncate() const noexcept; /** * Appends a number to the registered string to form a RStringUKey. * * @param number An optional number to append (default = `0`). * @returns An RStringUKey based on `*this` and the provided number. */ RStringUKey toRStringKey(int32_t number = 0) const; /** * Equality comparison between this registered string and another. * * @note A case-insensitive compare is performed. * * @param other Another registered string. * @returns `true` if `*this` and `other` represent the same registered string; `false` otherwise. */ bool operator==(const RStringU& other) const noexcept; /** * Inequality comparison between this registered string and another. * * @note A case-insensitive compare is performed. * * @param other Another registered string. * @returns `false` if `*this` and `other` represent the same registered string; `true` otherwise. */ bool operator!=(const RStringU& other) const noexcept; /** * Checks whether this registered string is stably (but not lexicographically) ordered before another registered * string. * * This ordering is to make registered strings usable as keys in ordered associative containers in O(1) time. * * @note This is NOT a lexicographical comparison; for that use one of the compare() functions. To reduce ambiguity * between a strict ordering and lexicographical comparison there is no `operator<` function for this string class. * While a lexicographical comparison would be O(n), this comparison is O(1). * * @param other Another registered string. * @returns `true` if `*this` should be ordered-before @p other; `false` otherwise. */ bool owner_before(const RStringU& other) const noexcept; }; /** * A registered string key. * * See \ref RString for high-level information about the registered string system. * * RStringKey is formed by appending a numeric component to a registered string. This numeric component can be used as a * unique instance identifier alongside the registered string. Additionally, the RStringKey::toString() function will * append a non-zero numeric component following an underscore. */ class CARB_VIZ RStringKey final : public detail::RStringTraits<false, detail::RStringKeyBase> { using Base = detail::RStringTraits<false, detail::RStringKeyBase>; public: /** * Constant that indicates whether this is "un-cased" (i.e. case-insensitive) (will always be `false`). */ using Base::IsUncased; /** * Default constructor. isEmpty() will report `true` and getNumber() will return `0`. */ constexpr RStringKey() noexcept; /** * Initializes this registered string to one of the static pre-defined registered strings. * @param staticString The pre-defined registered string to use. * @param number The number that will be returned by getNumber(). */ constexpr RStringKey(eRString staticString, int32_t number = 0) noexcept; /** * Finds or registers a new string. * @param str The string to find or register. * @param op The operation to perform. If directed to RStringOp::eFindExisting and the string has not been * previously registered, `*this` is initialized as if with the default constructor. */ explicit RStringKey(const char* str, RStringOp op = RStringOp::eRegister); /** * Finds or registers a new string with a given number component. * @param number The number that will be returned by getNumber(). * @param str The string to find or register. * @param op The operation to perform. If directed to RStringOp::eFindExisting and the string has not been * previously registered, `*this` is initialized as if with the default constructor. */ RStringKey(int32_t number, const char* str, RStringOp op = RStringOp::eRegister); /** * Finds or registers a new counted string. * @note While generally not recommended, passing @p len allows the given string to contain embedded NUL ('\0') * characters. * @param str The string to find or register. * @param len The number of characters of @p str to include. * @param op The operation to perform. If directed to RStringOp::eFindExisting and the string has not been * previously registered, `*this` is initialized as if with the default constructor. */ explicit RStringKey(const char* str, size_t len, RStringOp op = RStringOp::eRegister); /** * Finds or registers a new counted string with a given number component. * @note While generally not recommended, passing @p len allows the given string to contain embedded NUL ('\0') * characters. * @param number The number that will be returned by getNumber(). * @param str The string to find or register. * @param len The number of characters of @p str to include. * @param op The operation to perform. If directed to RStringOp::eFindExisting and the string has not been * previously registered, `*this` is initialized as if with the default constructor. */ explicit RStringKey(int32_t number, const char* str, size_t len, RStringOp op = RStringOp::eRegister); /** * Finds or registers a new `std::string`. * @note If @p str contains embedded NUL ('\0') characters, the RString will contain the embedded NUL characters as * well. * @param str The `std::string` to find or register. * @param op The operation to perform. If directed to RStringOp::eFindExisting and the string has not been * previously registered, `*this` is initialized as if with the default constructor. */ explicit RStringKey(const std::string& str, RStringOp op = RStringOp::eRegister); /** * Finds or registers a new `std::string` with a number component. * @note If @p str contains embedded NUL ('\0') characters, the RString will contain the embedded NUL characters as * well. * @param number The number that will be returned by getNumber(). * @param str The `std::string` to find or register. * @param op The operation to perform. If directed to RStringOp::eFindExisting and the string has not been * previously registered, `*this` is initialized as if with the default constructor. */ explicit RStringKey(int32_t number, const std::string& str, RStringOp op = RStringOp::eRegister); /** * Appends a number component to a registered string to form a key. * @param str The registered string to decorate. * @param number The number that will be returned by getNumber(). */ RStringKey(const RString& str, int32_t number = 0); /** * Converts this registered string key into an "un-cased" (i.e. case-insensitive) registered string key. * * @note The returned string may differ in case to `*this` when retrieved with c_str() or toString(). * * @returns An "un-cased" (i.e. case-insensitive) string that matches `*this` when compared in a case-insensitive * manner. The returned registered string key will have the same number component as `*this`. */ RStringUKey toUncased() const noexcept; /** * Returns a registered string without the number component. * @returns A registered string that matches `*this` without a number component. */ RString truncate() const noexcept; /** * Equality comparison between this registered string key and another. * * @param other Another registered string. * @returns `true` if `*this` and `other` represent the same registered string and have matching number components; * `false` otherwise. */ bool operator==(const RStringKey& other) const noexcept; /** * Inequality comparison between this registered string key and another. * * @param other Another registered string. * @returns `false` if `*this` and `other` represent the same registered string and have matching number components; * `true` otherwise. */ bool operator!=(const RStringKey& other) const noexcept; /** * Checks whether this registered string key is stably (but not lexicographically) ordered before another registered * string. The number component is also compared and keys with a lower number component will be ordered before. * * This ordering is to make registered strings usable as keys in ordered associative containers in O(1) time. * * @note This is NOT a lexicographical comparison; for that use one of the compare() functions. To reduce ambiguity * between a strict ordering and lexicographical comparison there is no `operator<` function for this string class. * While a lexicographical comparison would be O(n), this comparison is O(1). * * @param other Another registered string. * @returns `true` if `*this` should be ordered-before @p other; `false` otherwise. */ bool owner_before(const RStringKey& other) const noexcept; #ifndef DOXYGEN_BUILD // Sphinx warns about Duplicate C++ declaration /** * Returns the hash value as by `carb::hashString(this->truncate().c_str())` combined with the number component. * * @note This value is computed once for a registered string and cached, so this operation is generally very fast. * * @returns The hash value as computed by `carb::hashString(this->truncate().c_str())`. */ size_t getHash() const; /** * Returns the hash value as by `carb::hashLowercaseString(this->truncate().c_str())` combined with the number * component. * * @note This value is pre-computed for registered strings and cached, so this operation is always O(1). * * @returns The hash value as computed by `carb::hashLowercaseString(this->truncate().c_str())`. */ size_t getUncasedHash() const noexcept; #endif /** * Returns a string containing the registered string, and if getNumber() is not zero, the number appended. * * Example: RStringKey(eRString::RS_carb, 1).toString() would produce "carb_1". * @returns A string containing the registered string. If getNumber() is non-zero, an underscore and the number are * appended. */ std::string toString() const; /** * Returns the number component of this key. * @returns The number component previously specified in the constructor or with setNumber() or via number(). */ int32_t getNumber() const noexcept; /** * Sets the number component of this key. * @param num The new number component. */ void setNumber(int32_t num) noexcept; /** * Direct access to the number component for manipulation or atomic operations via `atomic_ref`. * @returns A reference to the number component. */ int32_t& number() noexcept; private: // Hide these functions since they are incomplete using Base::c_str; using Base::data; using Base::length; }; /** * A case-insensitive registered string key. * * See \ref RString for high-level information about the registered string system. * * RStringUKey is formed by appending a numeric component to an "un-cased" (i.e. case-insensitive) registered string. * This numeric component can be used as a unique instance identifier alongside the registered string. Additionally, the * RStringUKey::toString() function will append a non-zero numeric component following an underscore. */ class CARB_VIZ RStringUKey final : public detail::RStringTraits<true, detail::RStringKeyBase> { using Base = detail::RStringTraits<true, detail::RStringKeyBase>; public: /** * Constant that indicates whether this is "un-cased" (i.e. case-insensitive) (will always be `true`). */ using Base::IsUncased; /** * Default constructor. isEmpty() will report `true` and getNumber() will return `0`. */ constexpr RStringUKey() noexcept; /** * Initializes this registered string to one of the static pre-defined registered strings. * @param staticString The pre-defined registered string to use. * @param number The number that will be returned by getNumber(). */ constexpr RStringUKey(eRString staticString, int32_t number = 0) noexcept; /** * Finds or registers a new case-insensitive string. * * @note The casing of the string actually used may be different than @p str when reported by c_str() or toString(). * * @param str The string to find or register. * @param op The operation to perform. If directed to RStringOp::eFindExisting and the string has not been * previously registered, `*this` is initialized as if with the default constructor. */ RStringUKey(const char* str, RStringOp op = RStringOp::eRegister); /** * Finds or registers a new string with a given number component. * @param number The number that will be returned by getNumber(). * @param str The string to find or register. * @param op The operation to perform. If directed to RStringOp::eFindExisting and the string has not been * previously registered, `*this` is initialized as if with the default constructor. */ RStringUKey(int32_t number, const char* str, RStringOp op = RStringOp::eRegister); /** * Finds or registers a new counted case-insensitive string. * @note While generally not recommended, passing @p len allows the given string to contain embedded NUL ('\0') * characters. * @note The casing of the string actually used may be different than @p str when reported by c_str() or toString(). * @param str The string to find or register. * @param len The number of characters of @p str to include. * @param op The operation to perform. If directed to RStringOp::eFindExisting and the string has not been * previously registered, `*this` is initialized as if with the default constructor. */ explicit RStringUKey(const char* str, size_t len, RStringOp op = RStringOp::eRegister); /** * Finds or registers a new counted case-insensitive string with a given number component. * @note While generally not recommended, passing @p len allows the given string to contain embedded NUL ('\0') * characters. * @note The casing of the string actually used may be different than @p str when reported by c_str() or toString(). * @param number The number that will be returned by getNumber(). * @param str The string to find or register. * @param len The number of characters of @p str to include. * @param op The operation to perform. If directed to RStringOp::eFindExisting and the string has not been * previously registered, `*this` is initialized as if with the default constructor. */ explicit RStringUKey(int32_t number, const char* str, size_t len, RStringOp op = RStringOp::eRegister); /** * Finds or registers a new case-insensitive `std::string`. * @note If @p str contains embedded NUL ('\0') characters, the RString will contain the embedded NUL characters as * well. * @note The casing of the string actually used may be different than @p str when reported by c_str() or toString(). * @param str The `std::string` to find or register. * @param op The operation to perform. If directed to RStringOp::eFindExisting and the string has not been * previously registered, `*this` is initialized as if with the default constructor. */ explicit RStringUKey(const std::string& str, RStringOp op = RStringOp::eRegister); /** * Finds or registers a new case-insensitive `std::string` with a number component. * @note If @p str contains embedded NUL ('\0') characters, the RString will contain the embedded NUL characters as * well. * @note The casing of the string actually used may be different than @p str when reported by c_str() or toString(). * @param number The number that will be returned by getNumber(). * @param str The `std::string` to find or register. * @param op The operation to perform. If directed to RStringOp::eFindExisting and the string has not been * previously registered, `*this` is initialized as if with the default constructor. */ explicit RStringUKey(int32_t number, const std::string& str, RStringOp op = RStringOp::eRegister); /** * Appends a number component to a registered string to form a key. * @param str The registered string to decorate. * @param number The number that will be returned by getNumber(). */ RStringUKey(const RStringU& str, int32_t number = 0); /** * Converts a registered string key into an "un-cased" (i.e. case-insensitive) registered string key. * @param other The RStringKey to convert. The number component is maintained. */ explicit RStringUKey(const RStringKey& other); /** * Returns a copy of this registered string key. * @note This function exists for compatibility with the RStringKey interface. * @returns `*this` since this string is already "un-cased" (i.e. case-insensitive). The number component will be * the same as the number for `*this`. */ RStringUKey toUncased() const noexcept; /** * Returns a registered string without the number component. * @returns A registered string that matches `*this` without a number component. */ RStringU truncate() const noexcept; /** * Equality comparison between this registered string key and another. * * @note A case-insensitive compare is performed. * * @param other Another registered string. * @returns `true` if `*this` and `other` represent the same registered string and have matching number components; * `false` otherwise. */ bool operator==(const RStringUKey& other) const noexcept; /** * Inequality comparison between this registered string key and another. * * @note A case-insensitive compare is performed. * * @param other Another registered string. * @returns `false` if `*this` and `other` represent the same registered string and have matching number components; * `true` otherwise. */ bool operator!=(const RStringUKey& other) const noexcept; /** * Checks whether this registered string key is stably (but not lexicographically) ordered before another registered * string. The number component is also compared and keys with a lower number component will be ordered before. * * This ordering is to make registered strings usable as keys in ordered associative containers in O(1) time. * * @note This is NOT a lexicographical comparison; for that use one of the compare() functions. To reduce ambiguity * between a strict ordering and lexicographical comparison there is no `operator<` function for this string class. * While a lexicographical comparison would be O(n), this comparison is O(1). * * @param other Another registered string. * @returns `true` if `*this` should be ordered-before @p other; `false` otherwise. */ bool owner_before(const RStringUKey& other) const noexcept; #ifndef DOXYGEN_BUILD // Sphinx warns about Duplicate C++ declaration /** * Returns the hash value as by `carb::hashString(this->truncate().c_str())` combined with the number component. * * @note This value is computed once for a registered string and cached, so this operation is generally very fast. * * @returns The hash value as computed by `carb::hashString(this->truncate().c_str())`. */ size_t getHash() const; /** * Returns the hash value as by `carb::hashLowercaseString(this->truncate().c_str())` combined with the number * component. * * @note This value is pre-computed for registered strings and cached, so this operation is always O(1). * * @returns The hash value as computed by `carb::hashLowercaseString(this->truncate().c_str())`. */ size_t getUncasedHash() const noexcept; #endif /** * Returns a string containing the registered string, and if getNumber() is not zero, the number appended. * * Example: RStringUKey(eRString::RS_carb, 1).toString() would produce "carb_1". * @returns A string containing the registered string. If getNumber() is non-zero, an underscore and the number are * appended. */ std::string toString() const; /** * Returns the number component of this key. * @returns The number component previously specified in the constructor or with setNumber() or via number(). */ int32_t getNumber() const noexcept; /** * Sets the number component of this key. * @param num The new number component. */ void setNumber(int32_t num) noexcept; /** * Direct access to the number component for manipulation or atomic operations via `atomic_ref`. * @returns A reference to the number component. */ int32_t& number() noexcept; private: // Hide these functions since they are incomplete using Base::c_str; using Base::data; using Base::length; }; // Can use ADL specialization for global operator<< for stream output /** * Global stream output operator for RString. * @param o The output stream to write to. * @param s The registered string to output. * @returns The output stream, @p o. */ template <class CharT, class Traits> ::std::basic_ostream<CharT, Traits>& operator<<(::std::basic_ostream<CharT, Traits>& o, const RString& s) { o << s.c_str(); return o; } /** * Global stream output operator for RStringU. * @param o The output stream to write to. * @param s The registered string to output. * @returns The output stream, @p o. */ template <class CharT, class Traits> ::std::basic_ostream<CharT, Traits>& operator<<(::std::basic_ostream<CharT, Traits>& o, const RStringU& s) { o << s.c_str(); return o; } /** * Global stream output operator for RStringKey. * @param o The output stream to write to. * @param s The registered string to output. * @returns The output stream, @p o. */ template <class CharT, class Traits> ::std::basic_ostream<CharT, Traits>& operator<<(::std::basic_ostream<CharT, Traits>& o, const RStringKey& s) { o << s.toString(); return o; } /** * Global stream output operator for RStringUKey. * @param o The output stream to write to. * @param s The registered string to output. * @returns The output stream, @p o. */ template <class CharT, class Traits> ::std::basic_ostream<CharT, Traits>& operator<<(::std::basic_ostream<CharT, Traits>& o, const RStringUKey& s) { o << s.toString(); return o; } } // namespace carb // Specializations for std::hash and std::owner_less per type /** * RString specialization for `std::hash`. */ template <> struct std::hash<::carb::RString> { /** * Returns the hash * @param v The registered string. * @returns The hash as via the getHash() function. */ size_t operator()(const ::carb::RString& v) const { return v.getHash(); } }; /** * RString specialization for `std::owner_less`. */ template <> struct std::owner_less<::carb::RString> { /** * Returns true if @p lhs should be ordered-before @p rhs. * @param lhs A registered string. * @param rhs A registered string. * @returns `true` if @p lhs should be ordered-before @p rhs; `false` otherwise. */ bool operator()(const ::carb::RString& lhs, const ::carb::RString& rhs) const { return lhs.owner_before(rhs); } }; /** * RStringU specialization for `std::hash`. */ template <> struct std::hash<::carb::RStringU> { /** * Returns the hash * @param v The registered string. * @returns The hash as via the getHash() function. */ size_t operator()(const ::carb::RStringU& v) const { return v.getHash(); } }; /** * RStringU specialization for `std::owner_less`. */ template <> struct std::owner_less<::carb::RStringU> { /** * Returns true if @p lhs should be ordered-before @p rhs. * @param lhs A registered string. * @param rhs A registered string. * @returns `true` if @p lhs should be ordered-before @p rhs; `false` otherwise. */ bool operator()(const ::carb::RStringU& lhs, const ::carb::RStringU& rhs) const { return lhs.owner_before(rhs); } }; /** * RStringKey specialization for `std::hash`. */ template <> struct std::hash<::carb::RStringKey> { /** * Returns the hash * @param v The registered string. * @returns The hash as via the getHash() function. */ size_t operator()(const ::carb::RStringKey& v) const { return v.getHash(); } }; /** * RStringKey specialization for `std::owner_less`. */ template <> struct std::owner_less<::carb::RStringKey> { /** * Returns true if @p lhs should be ordered-before @p rhs. * @param lhs A registered string. * @param rhs A registered string. * @returns `true` if @p lhs should be ordered-before @p rhs; `false` otherwise. */ bool operator()(const ::carb::RStringKey& lhs, const ::carb::RStringKey& rhs) const { return lhs.owner_before(rhs); } }; /** * RStringUKey specialization for `std::hash`. */ template <> struct std::hash<::carb::RStringUKey> { /** * Returns the hash * @param v The registered string. * @returns The hash as via the getHash() function. */ size_t operator()(const ::carb::RStringUKey& v) const { return v.getHash(); } }; /** * RStringUKey specialization for `std::owner_less`. */ template <> struct std::owner_less<::carb::RStringUKey> { /** * Returns true if @p lhs should be ordered-before @p rhs. * @param lhs A registered string. * @param rhs A registered string. * @returns `true` if @p lhs should be ordered-before @p rhs; `false` otherwise. */ bool operator()(const ::carb::RStringUKey& lhs, const ::carb::RStringUKey& rhs) const { return lhs.owner_before(rhs); } }; #include "RString.inl"
omniverse-code/kit/include/carb/Memory.h
// Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file //! //! @brief DLL Boundary safe memory management functions #pragma once #include "Defines.h" #include "Types.h" #include "cpp/Bit.h" #include "detail/DeferredLoad.h" //! Internal function used by all other allocation functions. //! //! This function is the entry point into `carb.dll`/`libcarb.so` for @ref carb::allocate(), @ref carb::deallocate(), //! and @ref carb::reallocate(). There are four modes to this function: //! - If @p p is `nullptr` and @p size is `0`, no action is taken and `nullptr` is returned. //! - If @p p is not `nullptr` and @p size is `0`, the given pointer is deallocated and `nullptr` is returned. //! - If @p p is `nullptr` and @p size is non-zero, memory of the requested @p size and alignment specified by @p align //! is allocated and returned. If an allocation error occurs, `nullptr` is returned. //! - If @p p is not `nullptr` and @p size is non-zero, the memory is reallocated and copied (as if by `std::memcpy`) to //! the new memory block, which is returned. If @p p can be resized in situ, the same @p p value is returned. If an //! error occurs, `nullptr` is returned. //! //! @note Using this function requires explicitly linking with `carb.dll`/`libcarb.so` if @ref CARB_REQUIRE_LINKED is //! `1`. Otherwise, the caller must ensure that `carb.dll`/`libcarb.so` is already loaded before calling this function. //! Use in situations where the Carbonite Framework is already loaded (i.e. plugins) does not require explicitly linking //! against Carbonite as this function will be found dynamically at runtime. //! //! @warning Do not call this function directly. Instead call @ref carb::allocate(), @ref carb::deallocate(), or //! @ref carb::reallocate() //! //! @see carb::allocate() carb::reallocate() carb::deallocate() //! @param p The pointer to re-allocate or free. May be `nullptr`. See explanation above. //! @param size The requested size of the memory region in bytes. See explanation above. //! @param align The requested alignment of the memory region in bytes. Must be a power of two. See explanation above. //! @returns Allocated memory, or `nullptr` upon deallocation, or `nullptr` on allocation when an error occurs. #if CARB_REQUIRE_LINKED CARB_DYNAMICLINK void* carbReallocate(void* p, size_t size, size_t align); #else CARB_DYNAMICLINK void* carbReallocate(void* p, size_t size, size_t align) CARB_ATTRIBUTE(weak); #endif namespace carb { //! \cond DEV namespace detail { CARB_DETAIL_DEFINE_DEFERRED_LOAD(getCarbReallocate, carbReallocate, (void* (*)(void*, size_t, size_t))); } // namespace detail //! \endcond //! Allocates a block of memory. //! //! @note Any plugin (or the executable) may @ref allocate the memory and a different plugin (or the executable) may //! @ref deallocate or @ref reallocate it. //! //! @note If carb.dll/libcarb.so is not loaded, this function will always return `nullptr`. //! //! @param size The size of the memory block requested, in bytes. Specifying '0' will return a valid pointer that //! can be passed to @ref deallocate but cannot be used to store any information. //! @param align The minimum alignment (in bytes) of the memory block requested. Must be a power of two. Values less //! than `sizeof(size_t)` are ignored. `0` indicates to use default system alignment (typically //! `2 * sizeof(void*)`). //! @returns A non-`nullptr` memory block of @p size bytes with minimum alignment @p align. If an error occurred, //! or memory could not be allocated, `nullptr` is returned. The memory is not initialized. inline void* allocate(size_t size, size_t align = 0) noexcept { if (auto impl = detail::getCarbReallocate()) return impl(nullptr, size, align); else return nullptr; } //! Deallocates a block of memory previously allocated with @ref allocate(). //! //! @note Any plugin (or the executable) may @ref allocate the memory and a different plugin (or the executable) may //! @ref deallocate or @ref reallocate it. //! //! @note If carb.dll/libcarb.so is not loaded, this function will silently do nothing. Since @ref allocate would have //! returned `nullptr` in this case, this function should never be called. //! //! @param p The block of memory previously returned from @ref allocate() or @ref reallocate(), or `nullptr`. inline void deallocate(void* p) noexcept { if (p) { if (auto impl = detail::getCarbReallocate()) impl(p, 0, 0); } } //! Reallocates a block of memory previously allocated with @ref allocate(). //! //! This function changes the size of the memory block pointed to by @p p to @p size bytes with @p align alignment. //! The contents are unchanged from the start of the memory block up to the minimum of the old size and @p size. If //! @p size is larger than the old size, the added memory is not initialized. If @p p is `nullptr`, the call is //! equivalent to `allocate(size, align)`; if @p size is `0` and @p p is not `nullptr`, the call is equivalent to //! `deallocate(p)`. Unless @p p is `nullptr`, it must have been retrieved by an earlier call to @ref allocate() or //! @ref reallocate(). If the memory region was moved in order to resize it, @p p will be freed as with `deallocate(p)`. //! //! @note Any plugin (or the executable) may @ref allocate the memory and a different plugin (or the executable) may //! @ref deallocate or @ref reallocate it. //! //! @note If carb.dll/libcarb.so is not loaded, this function will always return @p p without side-effects. //! //! @param p The block of memory previously returned from @ref allocate() or @ref reallocate() if resizing is //! resizing is desired. If `nullptr` is passed as this parameter, the call behaves as if //! `allocate(size, align)` was called. //! @param size The size of the memory block requested, in bytes. See above for further explanation. //! @param align The minimum alignment (in bytes) of the memory block requested. Must be a power of two. Values less //! than `sizeof(size_t)` are ignored. Changing the alignment from a previous allocation is undefined behavior. //! `0` indicates to use default system alignment (typically `2 * sizeof(void*)`). //! @returns A pointer to a block of memory of @p size bytes with minimum alignment @p align, unless an error //! occurs in which case `nullptr` is returned. If @p p is `nullptr` and @p size is `0` then `nullptr` is also //! returned. inline void* reallocate(void* p, size_t size, size_t align = 0) noexcept { if (auto impl = detail::getCarbReallocate()) return impl(p, size, align); else return p; } /** * A class implementing the 'Allocator' C++ Named Requirement. * * This class is usable for C++ classes that require an allocator, such as `std::vector`. * @note This class requires dynamic or static linking to carb.dll/libcarb.so/libcarb.dylib in order to function. * @tparam T The type to allocate * @tparam Align The requested alignment. Must be zero or a power of two. Zero indicates to use `T`'s required * alignment. */ template <class T, size_t Align = 0> class Allocator { public: using pointer = T*; //!< pointer using const_pointer = const T*; //!< const_pointer using reference = T&; //!< reference using const_reference = const T&; //!< const_reference using void_pointer = void*; //!< void_pointer using const_void_pointer = const void*; //!< const_void_pointer using value_type = T; //!< value_type using size_type = std::size_t; //!< size_type using difference_type = std::ptrdiff_t; //!< difference_type static_assert(!Align || ::carb::cpp::has_single_bit(Align), "Must be a power of two"); constexpr static size_t alignment = Align; //!< Alignment (non-standard) //! A struct that allows determining an allocator for class `U` through the `other` type. template <class U> struct rebind { //! The type of `Allocator<U>` using other = Allocator<U, alignment>; }; //! Constructor constexpr Allocator() noexcept = default; //! Copy constructor constexpr Allocator(const Allocator&) noexcept = default; //! Copy-assign operator constexpr Allocator& operator=(const Allocator&) noexcept = default; //! Copy constructor template <class U, size_t UAlign> constexpr Allocator(const Allocator<U, UAlign>& other) noexcept { CARB_UNUSED(other); } //! Copy-assign operator template <class U, size_t UAlign> constexpr Allocator& operator=(const Allocator<U, UAlign>& other) noexcept { CARB_UNUSED(other); return *this; } //! Destructor ~Allocator() = default; //! Equality operator constexpr bool operator==(const Allocator& other) const noexcept { CARB_UNUSED(other); return true; } //! Inequality operator constexpr bool operator!=(const Allocator& other) const noexcept { CARB_UNUSED(other); return false; } /** * Allocates suitable storage for an array object of type `T[n]` and creates the array, but does not construct array * elements. * * If \ref alignment is suitable (that is, not less than the required alignment of `T`) it is used, otherwise the * required alignment of `T` is used. * @param n The number of elements of `T` to allocate space for. * @returns A pointer to memory that can contain an array of type `T[n]`, but no array elements have been * constructed. */ pointer allocate(size_type n = 1) noexcept /*strengthened*/ { auto align = ::carb_max(+alignment, std::alignment_of<T>::value); return pointer(::carb::allocate(sizeof(T) * n, align)); } /** * Same as \ref allocate(size_type) but may use \p p (`nullptr` or a pointer obtained from \ref allocate()) to aid * locality. * @param n The number of elements of `T` to allocate space for. * @param p May be `nullptr` or a pointer obtained from \ref allocate(). If non-`nullptr`, \p p is returned. * @returns A pointer to memory that can contain an array of type `T[n]`, but no array elements have been * constructed. */ pointer allocate(size_type n, const_void_pointer p) noexcept /*strengthened*/ { return p ? pointer(p) : allocate(n); } /** * Deallocates storage pointed to by `p`, which must be a value returned by a previous call to \ref allocate() that * has not been invalidated by an intervening call to `deallocate`. * @param p A value returned by a previous call to \ref allocate() and not previously passed to `deallocate`. * @param n Must be the same size value that was originally passed to \ref allocate(). */ void deallocate(pointer p, size_type n) noexcept /*strengthened*/ { CARB_UNUSED(n); ::carb::deallocate(p); } /** * Returns the largest value that can be passed to \ref allocate(). * @returns the largest value that can be passed to \ref allocate(). */ size_type max_size() const noexcept { return size_type(-1); } /** * Constructs an object of type `X` in previously-allocated storage at the address pointed to by `p`, using `args` * as the constructor arguments. * @param p The pointer at which to construct. * @param args The constructor arguments. */ template <class X, class... Args> void construct(X* const p, Args&&... args) { ::new (const_cast<void*>(static_cast<const volatile void*>(p))) X(std::forward<Args>(args)...); } /** * Destructs an object of type `X` pointed to by `p` but does not deallocate any storage. * @param p The pointer to an object of type `X` to destroy. */ template <class X> void destroy(X* const p) { p->~X(); } }; /** * An object can inherit from this class in order to use Carbonite allocation functions for creation/deletion. */ template <size_t Align = 0> class UseCarbAllocatorAligned { public: //! The alignment amount used by this allocator constexpr static size_t alignment = Align; //! \cond DEV void* operator new(std::size_t count) { return carb::allocate(count, alignment); } void* operator new[](std::size_t count) { return carb::allocate(count, alignment); } void operator delete(void* ptr) { carb::deallocate(ptr); } void operator delete[](void* ptr) { carb::deallocate(ptr); } #if CARB_HAS_CPP17 void* operator new(std::size_t count, std::align_val_t al) { return carb::allocate(count, ::carb_max(alignment, size_t(al))); } void* operator new[](std::size_t count, std::align_val_t al) { return carb::allocate(count, ::carb_max(alignment, size_t(al))); } void operator delete(void* ptr, std::align_val_t al) { CARB_UNUSED(al); carb::deallocate(ptr); } void operator delete[](void* ptr, std::align_val_t al) { CARB_UNUSED(al); carb::deallocate(ptr); } #endif //! \endcond }; /** Allocated object deleter helper class. This is suitable for use in various STL container * classes that accept a functor responsible for deleting an object that was allocated using * an allocation system other than new/delete. This particular implementation ensures the * object is destructed before deallocating its memory. */ template <class T> class Deleter { public: /** Functor operator to destruct and deallocate an object that was allocated and constructed * using one of the carb::allocate() family of functions. * * @tparam T The data type of the object to delete. * @param[in] p The object to be destroyed. */ void operator()(T* p) noexcept { p->~T(); carb::deallocate(p); } }; /** * An object can inherit from this class in order to use Carbonite allocation functions for creation/deletion. */ using UseCarbAllocator = UseCarbAllocatorAligned<>; } // namespace carb
omniverse-code/kit/include/carb/ClientUtils.h
// Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! \file //! \brief Utilities for Carbonite clients #pragma once #include "Framework.h" #include "assert/AssertUtils.h" #include "crashreporter/CrashReporterUtils.h" #include "l10n/L10nUtils.h" #include "logging/Log.h" #include "logging/StandardLogger.h" #include "profiler/Profile.h" #include "../omni/core/Omni.h" #include <vector> namespace carb { #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { inline void registerBuiltinFileSystem(Framework* f) { f->registerPlugin(g_carbClientName, f->getBuiltinFileSystemDesc()); } inline void registerBuiltinLogging(Framework* f) { f->registerPlugin(g_carbClientName, f->getBuiltinLoggingDesc()); } inline void registerBuiltinAssert(Framework* f) { f->registerPlugin(g_carbClientName, f->getBuiltinAssertDesc()); } inline void registerBuiltinThreadUtil(Framework* f) { f->registerPlugin(g_carbClientName, f->getBuiltinThreadUtilDesc()); } inline void registerAtexitHandler() { # if CARB_PLATFORM_WINDOWS && !defined _DLL // Since we're not using the dynamic runtime, we need to notify carb.dll if the executable's atexit() functions run. // We only do this if this is compiled into the executable here, so check that auto exeHandle = GetModuleHandleW(NULL); HMODULE myHandle; if (GetModuleHandleExW( CARBWIN_GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | CARBWIN_GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT, (LPCWSTR)&registerAtexitHandler, &myHandle) && myHandle == exeHandle) { // Verified that this function is compiled into the executable and without dynamic runtime. auto carbHandle = GetModuleHandleW(L"carb.dll"); auto proc = (void (*)(void*))(carbHandle ? GetProcAddress(carbHandle, "carbControlAtexit") : nullptr); if (proc) { // Call our undocumented function and pass our atexit() function so that carb.dll can register a callback // to know when the first-chance (executable) atexit happens. proc((void*)&atexit); } } # endif } } // namespace detail #endif /** * Main acquisition of the Carbonite Framework for Clients (applications and plugins). * * \warning It is typically not necessary to call this, since macros such as \ref OMNI_CORE_INIT already ensure that * this function is called properly. * * At a high level, this function: * * - Calls \ref carb::acquireFramework() and assigns it to a global variable within this module: \ref g_carbFramework. * - Calls \ref logging::registerLoggingForClient(), \ref assert::registerAssertForClient(), and * \ref l10n::registerLocalizationForClient(). * - Calls \ref OMNI_CORE_START(). * * @param args Arguments passed to \ref OMNI_CORE_START * @returns A pointer to the Carbonite Framework, if initialization was successful; `nullptr` otherwise. */ inline Framework* acquireFrameworkAndRegisterBuiltins(const OmniCoreStartArgs* args = nullptr) { // Acquire framework and set into global variable Framework* framework = acquireFramework(g_carbClientName); if (framework) { g_carbFramework = framework; static_assert( kFrameworkVersion.major == 0, "The framework automatically registers builtins now; the registerXXX functions can be removed once the framework version changes."); detail::registerAtexitHandler(); // Starting up logging detail::registerBuiltinLogging(framework); logging::registerLoggingForClient(); // Starting up filesystem detail::registerBuiltinFileSystem(framework); detail::registerBuiltinAssert(framework); detail::registerBuiltinThreadUtil(framework); // grab the assertion helper interface. assert::registerAssertForClient(); // grab the l10n interface. l10n::registerLocalizationForClient(); // start up ONI OMNI_CORE_START(args); } return framework; } /** * This function releases the Carbonite Framework. * * The options performed are essentially the teardown operations for \ref acquireFrameworkAndRegisterBuiltins(). * * At a high-level, this function: * - Calls \ref logging::deregisterLoggingForClient(), \ref assert::deregisterAssertForClient(), and * \ref l10n::deregisterLocalizationForClient(). * - Calls \ref omniReleaseStructuredLog(). * - Unloads all Carbonite plugins * - Calls \ref OMNI_CORE_STOP * - Calls \ref releaseFramework() * - Sets \ref g_carbFramework to `nullptr`. * * \note It is not necessary to manually call this function if \ref OMNI_CORE_INIT is used, since that macro will ensure * that the Framework is released. */ inline void releaseFrameworkAndDeregisterBuiltins() { if (isFrameworkValid()) { logging::deregisterLoggingForClient(); assert::deregisterAssertForClient(); l10n::deregisterLocalizationForClient(); // Release structured log before unloading plugins omniReleaseStructuredLog(); g_carbFramework->unloadAllPlugins(); OMNI_CORE_STOP(); releaseFramework(); } g_carbFramework = nullptr; } } // namespace carb /** * Defines global variables of the framework and built-in plugins. * * \note Either this macro, or \ref CARB_GLOBALS_EX or \ref OMNI_APP_GLOBALS must be specified in the global namespace * in exactly one compilation unit for a Carbonite Application. * * @param clientName The name of the client application. Must be unique with respect to any plugins loaded. Also is the * name of the default log channel. */ #define CARB_GLOBALS(clientName) CARB_GLOBALS_EX(clientName, nullptr) /** * Defines global variables of the framework and built-in plugins. * * \note Either this macro, or \ref CARB_GLOBALS or \ref OMNI_APP_GLOBALS must be specified in the global namespace in * exactly one compilation unit for a Carbonite Application. * * @param clientName The name of the client application. Must be unique with respect to any plugins loaded. Also is the * name of the default log channel. * @param clientDescription A description to use for the default log channel. */ #define CARB_GLOBALS_EX(clientName, clientDescription) \ CARB_FRAMEWORK_GLOBALS(clientName) \ CARB_LOG_GLOBALS() \ CARB_PROFILER_GLOBALS() \ CARB_ASSERT_GLOBALS() \ CARB_LOCALIZATION_GLOBALS() \ CARB_CRASH_REPORTER_GLOBALS() \ OMNI_GLOBALS_ADD_DEFAULT_CHANNEL(clientName, clientDescription)
omniverse-code/kit/include/carb/Strong.h
// Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include "Defines.h" #include "cpp/TypeTraits.h" #include <typeindex> // for std::hash #include <ostream> // for std::basic_ostream /** * Implements a strong type. `typedef` and `using` declarations do not declare a new type. `typedef int MyType` uses the * name `MyType` to refer to int; MyType and int are therefore interchangeable. * * CARB_STRONGTYPE(MyType, int) differs in that it creates an int-like structure named MyType which is type-safe. MyType * can be compared to `int` values, but cannot be implicitly assigned an int. */ #define CARB_STRONGTYPE(Name, T) using Name = ::carb::Strong<T, struct Name##Sig> namespace carb { // clang-format off template<class T, class Sig> class Strong final { private: T val; public: using Type = T; constexpr Strong() : val{} {} constexpr explicit Strong(T&& val_) : val(std::forward<T>(val_)) {} constexpr Strong(const Strong& rhs) = default; Strong& operator=(const Strong& rhs) = default; constexpr Strong(Strong&& rhs) = default; Strong& operator=(Strong&& rhs) = default; const T& get() const { return val; } T& get() { return val; } /// Ensure that the underlying type matches expected; recommended for printf template <class U> U ensure() const { static_assert(std::is_same<T, U>::value, "Types are not the same"); return val; } explicit operator bool () const { return !!val; } bool operator == (const Strong& rhs) const { return val == rhs.val; } bool operator == (const T& rhs) const { return val == rhs; } bool operator != (const Strong& rhs) const { return val != rhs.val; } bool operator != (const T& rhs) const { return val != rhs; } bool operator < (const Strong& rhs) const { return val < rhs.val; } void swap(Strong& rhs) noexcept(noexcept(std::swap(val, rhs.val))) { std::swap(val, rhs.val); } }; // clang-format on template <class CharT, class Traits, class T, class Sig> ::std::basic_ostream<CharT, Traits>& operator<<(::std::basic_ostream<CharT, Traits>& o, const Strong<T, Sig>& s) { o << s.get(); return o; } // Swap can be specialized with ADL template <class T, class Sig, typename = std::enable_if_t<carb::cpp::is_swappable<T>::value, bool>> void swap(Strong<T, Sig>& lhs, Strong<T, Sig>& rhs) noexcept(noexcept(lhs.swap(rhs))) { lhs.swap(rhs); } } // namespace carb // Specialization for std::hash template <class T, class Sig> struct std::hash<::carb::Strong<T, Sig>> { size_t operator()(const ::carb::Strong<T, Sig>& v) const { return ::std::hash<T>{}(v.get()); } };
omniverse-code/kit/include/carb/RString.inl
// Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #include "RStringInternals.inl" namespace carb { namespace detail { // Disable warnings since the Base{} initializers might not include every field, which is non-trivial since the Base // is a template parameter and some bases have different fields. However, the later fields will be zero-initialized. CARB_IGNOREWARNING_GNUC_WITH_PUSH("-Wmissing-field-initializers") template <bool Uncased, class Base> inline constexpr RStringTraits<Uncased, Base>::RStringTraits() noexcept : Base{ 0, Uncased } { } template <bool Uncased, class Base> inline constexpr RStringTraits<Uncased, Base>::RStringTraits(eRString staticString) noexcept : Base{ uint32_t(staticString), Uncased } { CARB_ASSERT(uint32_t(staticString) <= kMaxStaticRString); } template <bool Uncased, class Base> inline RStringTraits<Uncased, Base>::RStringTraits(const char* str, RStringOp op) : Base{ rstring::Internals::get().findOrAdd(str, Uncased, op), Uncased } { } template <bool Uncased, class Base> inline RStringTraits<Uncased, Base>::RStringTraits(const char* str, size_t len, RStringOp op) : Base{ rstring::Internals::get().findOrAdd(str, len, Uncased, op), Uncased } { } template <bool Uncased, class Base> inline RStringTraits<Uncased, Base>::RStringTraits(const std::string& str, RStringOp op) : Base{ rstring::Internals::get().findOrAdd(str.data(), str.length(), Uncased, op), Uncased } { } template <bool Uncased, class Base> inline RStringTraits<Uncased, Base>::RStringTraits(uint32_t stringId) noexcept : Base{ stringId, Uncased } { // If we're uncased, we should be referencing an authority. CARB_ASSERT(!Uncased || rstring::Internals::get()[stringId]->m_authority); } CARB_IGNOREWARNING_GNUC_POP template <bool Uncased, class Base> inline bool RStringTraits<Uncased, Base>::isValid() const noexcept { return rstring::Internals::get()[this->m_stringId] != nullptr; } template <bool Uncased, class Base> inline constexpr bool RStringTraits<Uncased, Base>::isEmpty() const noexcept { return this->m_stringId == 0; } template <bool Uncased, class Base> inline constexpr bool RStringTraits<Uncased, Base>::isUncased() const noexcept { CARB_ASSERT(this->m_uncased == Uncased); return Uncased; } template <bool Uncased, class Base> inline constexpr uint32_t RStringTraits<Uncased, Base>::getStringId() const noexcept { return this->m_stringId; } template <bool Uncased, class Base> inline size_t RStringTraits<Uncased, Base>::getHash() const { auto& internals = rstring::Internals::get(); CARB_ASSERT(this->m_uncased == Uncased); return Uncased ? internals[this->m_stringId]->m_uncasedHash : internals.getHash(this->m_stringId); } template <bool Uncased, class Base> inline size_t RStringTraits<Uncased, Base>::getUncasedHash() const noexcept { return rstring::Internals::get()[this->m_stringId]->m_uncasedHash; } template <bool Uncased, class Base> inline const char* RStringTraits<Uncased, Base>::c_str() const noexcept { return rstring::Internals::get()[this->m_stringId]->m_string; } template <bool Uncased, class Base> inline const char* RStringTraits<Uncased, Base>::data() const noexcept { return rstring::Internals::get()[this->m_stringId]->m_string; } template <bool Uncased, class Base> inline size_t RStringTraits<Uncased, Base>::length() const noexcept { return rstring::Internals::get()[this->m_stringId]->m_stringLen; } template <bool Uncased, class Base> inline std::string RStringTraits<Uncased, Base>::toString() const { const carb::detail::rstring::Rec* rec = carb::detail::rstring::Internals::get()[this->m_stringId]; return std::string(rec->m_string, rec->m_stringLen); } template <bool Uncased, class Base> inline bool RStringTraits<Uncased, Base>::operator==(const RStringTraits<Uncased, Base>& other) const { CARB_ASSERT(this->m_uncased == Uncased && other.m_uncased == Uncased); return other.m_stringId == this->m_stringId; } template <bool Uncased, class Base> inline bool RStringTraits<Uncased, Base>::operator!=(const RStringTraits<Uncased, Base>& other) const { return !(*this == other); } template <bool Uncased, class Base> inline bool RStringTraits<Uncased, Base>::owner_before(const RStringTraits<Uncased, Base>& other) const { CARB_ASSERT(this->m_uncased == Uncased && other.m_uncased == Uncased); return this->m_stringId < other.m_stringId; } template <bool Uncased, class Base> template <bool OtherUncased, class OtherBase> inline int RStringTraits<Uncased, Base>::compare(const RStringTraits<OtherUncased, OtherBase>& other) const { CARB_ASSERT(Uncased == this->m_uncased); CARB_ASSERT(OtherUncased == other.isUncased()); return !(Uncased | OtherUncased) ? rstring::casedCompare(c_str(), length(), other.c_str(), other.length()) : rstring::uncasedCompare(c_str(), length(), other.c_str(), other.length()); } template <bool Uncased, class Base> inline int RStringTraits<Uncased, Base>::compare(const char* s) const { CARB_ASSERT(Uncased == this->m_uncased); return !Uncased ? rstring::casedCompare(c_str(), length(), s, std::strlen(s)) : rstring::uncasedCompare(c_str(), length(), s, std::strlen(s)); } template <bool Uncased, class Base> inline int RStringTraits<Uncased, Base>::compare(size_t pos, size_t count, const char* s) const { return compare(pos, count, s, std::strlen(s)); } namespace { inline int checkCased(char c1, char c2) { return int(c1) - int(c2); } inline int checkUncased(char c1, char c2) { return checkCased(char(std::tolower(c1)), char(std::tolower(c2))); } } // namespace template <bool Uncased, class Base> inline int RStringTraits<Uncased, Base>::compare(size_t pos, size_t count, const char* s, size_t len) const { const rstring::Rec* rec = rstring::Internals::get()[this->m_stringId]; CARB_ASSERT(pos <= rec->m_stringLen); // Take the smallest of count, len, or remaining string after pos count = ::carb_min(count, rec->m_stringLen - pos); size_t remain = ::carb_min(count, len); CARB_ASSERT(this->m_uncased == Uncased); auto check = !Uncased ? checkCased : checkUncased; const char* my = rec->m_string + pos; for (; remain != 0; --remain, ++my, ++s) { int c = check(*my, *s); if (c != 0) return c; } // Otherwise equal, so whichever is longer is ordered later. return int(ptrdiff_t(count - len)); } template <bool Uncased, class Base> inline int RStringTraits<Uncased, Base>::compare(const std::string& s) const { CARB_ASSERT(this->m_uncased == Uncased); return !Uncased ? rstring::casedCompare(c_str(), length(), s.c_str(), s.length()) : rstring::uncasedCompare(c_str(), length(), s.c_str(), s.length()); } template <bool Uncased, class Base> inline int RStringTraits<Uncased, Base>::compare(size_t pos, size_t count, const std::string& s) const { return compare(pos, count, s.c_str(), s.length()); } } // namespace detail //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // RString functions inline constexpr RString::RString() noexcept { } inline constexpr RString::RString(eRString staticString) noexcept : Base(staticString) { } inline RString::RString(const char* str, RStringOp op) : Base(str, op) { } inline RString::RString(const char* str, size_t len, RStringOp op) : Base(str, len, op) { } inline RString::RString(const std::string& str, RStringOp op) : Base(str, op) { } inline RString::RString(const RStringKey& other) noexcept : Base(other.getStringId()) { } inline RStringU RString::toUncased() const noexcept { return RStringU(*this); } inline RString RString::truncate() const noexcept { return *this; } inline RStringKey RString::toRStringKey(int32_t number) const { return RStringKey(*this, number); } inline bool RString::operator==(const RString& other) const noexcept { CARB_ASSERT(!m_uncased && !other.m_uncased); return m_stringId == other.m_stringId; } inline bool RString::operator!=(const RString& other) const noexcept { return !(*this == other); } inline bool RString::owner_before(const RString& other) const noexcept { CARB_ASSERT(!m_uncased && !other.m_uncased); return m_stringId < other.m_stringId; } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // RStringU functions inline constexpr RStringU::RStringU() noexcept : Base() { } inline constexpr RStringU::RStringU(eRString staticString) noexcept : Base(staticString) { } inline RStringU::RStringU(const char* str, RStringOp op) : Base(str, op) { } inline RStringU::RStringU(const char* str, size_t len, RStringOp op) : Base(str, len, op) { } inline RStringU::RStringU(const std::string& str, RStringOp op) : Base(str, op) { } inline RStringU::RStringU(const RString& other) : Base(detail::rstring::Internals::get().convertUncased(other.getStringId())) { } inline RStringU::RStringU(const RStringUKey& other) : Base(other.getStringId()) { } inline RStringU RStringU::toUncased() const noexcept { return RStringU(*this); } inline RStringU RStringU::truncate() const noexcept { return *this; } inline RStringUKey RStringU::toRStringKey(int32_t number) const { return RStringUKey(*this, number); } inline bool RStringU::operator==(const RStringU& other) const noexcept { CARB_ASSERT(m_uncased && other.m_uncased); return m_stringId == other.m_stringId; } inline bool RStringU::operator!=(const RStringU& other) const noexcept { return !(*this == other); } inline bool RStringU::owner_before(const RStringU& other) const noexcept { CARB_ASSERT(m_uncased && other.m_uncased); return m_stringId < other.m_stringId; } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // RStringKey functions inline constexpr RStringKey::RStringKey() noexcept : Base() { CARB_ASSERT(this->m_number == 0); } inline constexpr RStringKey::RStringKey(eRString staticString, int32_t number) noexcept : Base(staticString) { this->m_number = number; } inline RStringKey::RStringKey(const char* str, RStringOp op) : Base(str, op) { CARB_ASSERT(this->m_number == 0); } inline RStringKey::RStringKey(int32_t number, const char* str, RStringOp op) : Base(str, op) { this->m_number = number; } inline RStringKey::RStringKey(const char* str, size_t len, RStringOp op) : Base(str, len, op) { CARB_ASSERT(this->m_number == 0); } inline RStringKey::RStringKey(int32_t number, const char* str, size_t len, RStringOp op) : Base(str, len, op) { this->m_number = number; } inline RStringKey::RStringKey(const std::string& str, RStringOp op) : Base(str, op) { CARB_ASSERT(this->m_number == 0); } inline RStringKey::RStringKey(int32_t number, const std::string& str, RStringOp op) : Base(str, op) { this->m_number = number; } inline RStringKey::RStringKey(const RString& rstr, int32_t number) : Base(rstr.getStringId()) { this->m_number = number; } inline RStringUKey RStringKey::toUncased() const noexcept { return RStringUKey(*this); } inline RString RStringKey::truncate() const noexcept { return RString(*this); } inline bool RStringKey::operator==(const RStringKey& other) const noexcept { CARB_ASSERT(!m_uncased && !other.m_uncased); return m_stringId == other.m_stringId && m_number == other.m_number; } inline bool RStringKey::operator!=(const RStringKey& other) const noexcept { return !(*this == other); } inline bool RStringKey::owner_before(const RStringKey& other) const noexcept { CARB_ASSERT(!m_uncased && !other.m_uncased); if (m_stringId != other.m_stringId) return m_stringId < other.m_stringId; return m_number < other.m_number; } inline size_t RStringKey::getHash() const { auto hash = Base::getHash(); return getNumber() ? carb::hashCombine(hash, getNumber()) : hash; } inline size_t RStringKey::getUncasedHash() const noexcept { auto hash = Base::getUncasedHash(); return getNumber() ? carb::hashCombine(hash, getNumber()) : hash; } inline std::string RStringKey::toString() const { std::string str = Base::toString(); if (m_number == 0) return str; str += '_'; str += std::to_string(m_number); return str; } inline int32_t RStringKey::getNumber() const noexcept { return m_number; } inline void RStringKey::setNumber(int32_t num) noexcept { m_number = num; } inline int32_t& RStringKey::number() noexcept { return m_number; } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // RStringUKey functions inline constexpr RStringUKey::RStringUKey() noexcept : Base() { CARB_ASSERT(this->m_number == 0); } inline constexpr RStringUKey::RStringUKey(eRString staticString, int32_t number) noexcept : Base(staticString) { this->m_number = number; } inline RStringUKey::RStringUKey(const char* str, RStringOp op) : Base(str, op) { CARB_ASSERT(this->m_number == 0); } inline RStringUKey::RStringUKey(int32_t number, const char* str, RStringOp op) : Base(str, op) { this->m_number = number; } inline RStringUKey::RStringUKey(const char* str, size_t len, RStringOp op) : Base(str, len, op) { CARB_ASSERT(this->m_number == 0); } inline RStringUKey::RStringUKey(int32_t number, const char* str, size_t len, RStringOp op) : Base(str, len, op) { this->m_number = number; } inline RStringUKey::RStringUKey(const std::string& str, RStringOp op) : Base(str, op) { CARB_ASSERT(this->m_number == 0); } inline RStringUKey::RStringUKey(int32_t number, const std::string& str, RStringOp op) : Base(str, op) { this->m_number = number; } inline RStringUKey::RStringUKey(const RStringU& rstr, int32_t number) : Base(rstr.getStringId()) { this->m_number = number; } inline RStringUKey::RStringUKey(const RStringKey& other) : Base(detail::rstring::Internals::get().convertUncased(other.getStringId())) { this->m_number = other.getNumber(); } inline RStringUKey RStringUKey::toUncased() const noexcept { return RStringUKey(*this); } inline RStringU RStringUKey::truncate() const noexcept { return RStringU(*this); } inline bool RStringUKey::operator==(const RStringUKey& other) const noexcept { CARB_ASSERT(m_uncased && other.m_uncased); return m_stringId == other.m_stringId && m_number == other.m_number; } inline bool RStringUKey::operator!=(const RStringUKey& other) const noexcept { return !(*this == other); } inline bool RStringUKey::owner_before(const RStringUKey& other) const noexcept { CARB_ASSERT(m_uncased && other.m_uncased); if (m_stringId != other.m_stringId) return m_stringId < other.m_stringId; return m_number < other.m_number; } inline size_t RStringUKey::getHash() const { auto hash = Base::getHash(); return getNumber() ? carb::hashCombine(hash, getNumber()) : hash; } inline size_t RStringUKey::getUncasedHash() const noexcept { auto hash = Base::getUncasedHash(); return getNumber() ? carb::hashCombine(hash, getNumber()) : hash; } inline std::string RStringUKey::toString() const { std::string str = Base::toString(); if (m_number == 0) return str; str += '_'; str += std::to_string(m_number); return str; } inline int32_t RStringUKey::getNumber() const noexcept { return m_number; } inline void RStringUKey::setNumber(int32_t num) noexcept { m_number = num; } inline int32_t& RStringUKey::number() noexcept { return m_number; } } // namespace carb
omniverse-code/kit/include/carb/ObjectUtils.h
// Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file //! //! @brief Helper utilities for Carbonite objects (carb::IObject). #pragma once #include "IObject.h" #include <atomic> namespace carb { /** * Default handler for carb::IObject reaching zero references, which calls `delete`. Can be specialized for specific * types. * @param ptr The object to delete. */ template <class T> void deleteHandler(T* ptr) { delete ptr; } } // namespace carb /** * Helper macro to implement default behavior of carb::IObject interface functions IObject::addRef() and * IObject::release(). * * Example usage: * @code * class Foo : public IObject * { * CARB_IOBJECT_IMPL * * public: * ... * }; * @endcode */ #define CARB_IOBJECT_IMPL \ public: \ /** \ * Atomically adds one to the reference count. \ * @returns The current reference count after one was added, though this value may change before read if other \ * threads are also modifying the reference count. The return value is guaranteed to be non-zero. \ */ \ size_t addRef() override \ { \ size_t prev = m_refCount.fetch_add(1, std::memory_order_relaxed); \ CARB_ASSERT(prev != 0); /* resurrected item if this occurs */ \ return prev + 1; \ } \ \ /** \ * Atomically subtracts one from the reference count. If the result is zero, carb::deleteHandler() is called for \ * `this`. \ * @returns The current reference count after one was subtracted. If zero is returned, carb::deleteHandler() was \ * called for `this`. \ */ \ size_t release() override \ { \ size_t prev = m_refCount.fetch_sub(1, std::memory_order_release); \ CARB_ASSERT(prev != 0); /* double release if this occurs */ \ if (prev == 1) \ { \ std::atomic_thread_fence(std::memory_order_acquire); \ carb::deleteHandler(this); \ } \ return prev - 1; \ } \ \ private: \ std::atomic_size_t m_refCount{ 1 };
omniverse-code/kit/include/carb/Framework.h
// Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file //! //! @brief Core header for registering and acquiring interfaces. #pragma once #include "Defines.h" #include "Memory.h" #include "Types.h" #include <cstddef> #include <cstdint> // free() can be #define'd which can interfere below, so handle that here #ifdef free # define CARB_FREE_UNDEFINED # pragma push_macro("free") # undef free #endif namespace carb { //! Defines the current major version of the Carbonite framework. //! //! Incrementing this variable causes great chaos as it represents a breaking change to users. Increment only with //! great thought. #define CARBONITE_MAJOR 0 //! Defines the current minor version of the Carbonite framework. //! //! This value is increment when non-breaking changes are made to the framework. #define CARBONITE_MINOR 6 //! Defines the current version of the Carbonite framework. constexpr struct Version kFrameworkVersion = { CARBONITE_MAJOR, CARBONITE_MINOR }; //! Four character code used to identify a @ref PluginRegistrationDesc object that is likely to //! have further data provided in it. constexpr FourCC kCarb_FourCC = CARB_MAKE_FOURCC('C', 'A', 'R', 'B'); //! Describes the different functions a plugin can define for use by carb::Framework. //! //! Populate this struct and register a plugin with carb::Framework::registerPlugin() for static plugins. //! //! Dynamic plugins are registered via @ref CARB_PLUGIN_IMPL. struct PluginRegistrationDesc { //! This or @ref onPluginRegisterExFn required. Preferred over @ref onPluginRegisterExFn. OnPluginRegisterFn onPluginRegisterFn; OnPluginStartupFn onPluginStartupFn; //!< Can be `nullptr`. OnPluginShutdownFn onPluginShutdownFn; //!< Can be `nullptr`. GetPluginDepsFn getPluginDepsFn; //!< Can be `nullptr`. OnReloadDependencyFn onReloadDependencyFn; //!< Can be `nullptr`. OnPluginPreStartupFn onPluginPreStartupFn; //!< Can be `nullptr`. OnPluginPostShutdownFn onPluginPostShutdownFn; //!< Can be `nullptr`. OnPluginRegisterExFn onPluginRegisterExFn; //!< Can be `nullptr`. OnPluginStartupExFn onPluginStartupExFn = nullptr; //!< Can be `nullptr`. Preferred over @ref onPluginStartupFn. OnPluginRegisterEx2Fn onPluginRegisterEx2Fn = nullptr; //!< Can be `nullptr`. Preferred over onPluginRegisterFn and //!< onPluginRegisterExFn. //! These members exists as a version of PluginRegistrationDesc without changing the framework version to simplify //! adoption. Static plugins that use Framework::registerPlugin() but were compiled with an earlier version of this //! struct that did not have these members will not produce the required bit pattern, //! thereby instructing the Framework that the subsequent members are not valid and cannot be read. FourCC const checkValue{ kCarb_FourCC }; //! The size of this object in bytes. This is only valid if the @ref checkValue member is set //! to @ref kCarb_FourCC. If it is not, this member and other following members will not be //! accessed in order to avoid undefined behavior. size_t const sizeofThis{ sizeof(PluginRegistrationDesc) }; OnPluginQuickShutdownFn onPluginQuickShutdownFn = nullptr; //!< Can be `nullptr`. Function that will be called for //!< the plugin if //!< \ref carb::quickReleaseFrameworkAndTerminate() is //!< invoked. //! Specifies the framework version required by this plugin. Version frameworkVersion{ kFrameworkVersion }; }; //! Describes parameters for finding plugins on disk. Multiple search paths, matching wildcards, and exclusion wildcards //! can be specified. Used primarily by @ref Framework::loadPlugins. //! //! Call @ref PluginLoadingDesc::getDefault() to instantiate this object, as it will correctly set defaults. struct PluginLoadingDesc { //! List of folders in which to search for plugins. //! //! This may contain relative or absolute paths. All relative paths will be resolved relative to @ref //! carb::filesystem::IFileSystem::getAppDirectoryPath(), not the current working directory. Absolute paths in the //! list will be searched directly. If search paths configuration is invalid (e.g. search paths count is zero), the //! fallback values are taken from the default plugin desc. //! //! Defaults to the directory containing the process's executable. const char* const* searchPaths; size_t searchPathCount; //!< Number of entries in @ref searchPaths. Defaults to 1. bool searchRecursive; //!< Is search recursive in search folders. Default to `false`. //! List of Filename wildcards to select loaded files. `*` and `?` can be used, e.g. "carb.*.pl?gin" //! //! Defaults to "*.plugin". This can lead to unnecessary plugins being loaded. const char* const* loadedFileWildcards; size_t loadedFileWildcardCount; //!< Number of entries in @ref loadedFileWildcards. Defaults to 1. //! List of filename wildcards to mark loaded files as reloadable. Framework will treat them specially to allow //! overwriting source plugins and will monitor them for changes. //! //! Defaults to `nullptr`. const char* const* reloadableFileWildcards; size_t reloadableFileWildcardCount; //!< Number of entries in @ref reloadableFileWildcards. Defaults to 0. //! If `true`, load and store the plugins interface information, then immediately unload the plugin until needed. //! When one of plugin's interfaces is acquired, the library will be loaded again. //! //! Defaults to `false`. bool unloadPlugins; //! List of filename wildcards to select excluded files. `*` and `?` can be used. //! //! Defaults to `nullptr`. const char* const* excludedFileWildcards; size_t excludedFileWildcardCount; //!< Number of entries in @ref excludedFileWildcards. Defaults to 0. //! Returns a PluginLoadDesc with sensible defaults. static PluginLoadingDesc getDefault() { static constexpr const char* defaultSearchPath = ""; static constexpr const char* defaultLoadedFileWildcard = "*.plugin"; return { &defaultSearchPath, 1, false, &defaultLoadedFileWildcard, 1, nullptr, 0, false, nullptr, 0 }; } }; //! Flags for use with \ref carb::AcquireInterfaceOptions enum AcquireInterfaceFlags : uint64_t { //! Default search type, a plugin name may be specified in `typeParam`. eAIFDefaultType = 0, //! Acquire interface from interface specified in `typeParam`. eAIFFromInterfaceType, //! Acquire interface from library specified in `typeParam`. eAIFFromLibraryType, //! New types can be added here //! Count of types. eAIFNumTypes, //! A mask that contains all of the above types. fAIFTypeMask = 0xf, //! The interface acquire is optional and may fail without error logging. fAIFOptional = (1 << 4), //! The interface acquire will only succeed if the plugin is already initialized. fAIFNoInitialize = (1 << 5), }; static_assert(eAIFNumTypes <= fAIFTypeMask, "Too many types for mask"); //! A structure used with \ref Framework::internalAcquireInterface(). Typically callers should use one of the adapter //! functions such as \ref Framework::tryAcquireInterface() and not use this directly. struct AcquireInterfaceOptions { //! Size of this structure for versioning. size_t sizeofThis; //! The client requesting this interface const char* clientName; //! The interface requested InterfaceDesc desc; //! Type and flags. One Type must be specified as well as any flags. AcquireInterfaceFlags flags; //! Context interpreted based on the type specified in the `flags` member. const void* typeParam; }; CARB_ASSERT_INTEROP_SAFE(AcquireInterfaceOptions); //! Result of loading a plugin. Used by @ref carb::Framework::loadPlugin. Non-negative values indicated success. enum class LoadPluginResult : int32_t { //! Plugin was attempted to be loaded from a temporary path in use by the framework. eForbiddenPath = -3, //! Invalid argument passed to @ref Framework::loadPlugin. eInvalidArg = -2, //! An unspecified error occurred. The plugin was not loaded. eFailed = -1, //! The plugin was successfully loaded. eSucceeded = 0, //! The plugin was loaded as an ONI plugin. eSucceededAsOmniverseNativeInterface = 1, //! The plugin is already loaded. eAlreadyLoaded = 2, }; //! Release Hook function //! //! Called when the @ref carb::Framework (or an interface) is being released, before the actual release is done. Add a //! release hook with @ref carb::Framework::addReleaseHook(). Registered release hooks can be removed with @ref //! carb::Framework::removeReleaseHook(). //! //! @param iface The interface that is being released. If the framework is being released, this is `nullptr`. //! //! @param userData The data passed to @ref carb::Framework::addReleaseHook(). using ReleaseHookFn = void (*)(void* iface, void* userData); //! Load Hook function //! //! Called when a plugin is loaded for the first time and the requested interface becomes available. The interface must //! be acquired with \ref Framework::tryAcquireInterface() or \ref Framework::acquireInterface() etc. //! //! The thread that first acquires the interface will call all load hooks for that interface before the interface value //! is returned from the Framework. All other threads that acquire that interface will wait until load hooks have been //! called. Calling load hooks is done without an internal Framework mutex locked, so other threads are able to acquire //! other interfaces while load hooks are executing. //! //! It is safe for a `LoadHookFn` to call \ref Framework::removeLoadHook() for the handle that caused it to be called, //! or any other handle. //! //! @see Framework::addLoadHook() Framework::removeLoadHook() //! @param plugin The \ref PluginDesc for the plugin that has now loaded. //! @param userData The `void*` that was passed to \ref Framework::addLoadHook(). using LoadHookFn = void (*)(const PluginDesc& plugin, void* userData); //! Acquire the Carbonite framework for an application. //! //! Do not call this method directly. Rather, call a helper function such as @ref OMNI_CORE_INIT, @ref //! carb::acquireFrameworkAndRegisterBuiltins or @ref carb::acquireFrameworkForBindings. Of the methods above, @ref //! OMNI_CORE_INIT is preferred for most applications. //! //! The Carbonite framework is a singleton object, it will be created on the first acquire call. Subsequent calls to //! acquire return the same instance. //! //! This function is expected to be used by applications, which links with the framework. //! //! Plugins should not use this function. Rather, plugins should use @ref carb::getFramework(). //! //! @thread_safety This function may be called from multiple threads simultaneously. //! //! @param appName The application name requesting the framework. Must not be `nullptr`. //! //! @param frameworkVersion specifies the minimum framework version expected by the application. `nullptr` is return if //! the minimum version cannot be met. //! //! @return The Carbonite framework. Can be `nullptr`. //! //! @see @ref carb::releaseFramework(). CARB_DYNAMICLINK carb::Framework* acquireFramework(const char* appName, Version frameworkVersion = kFrameworkVersion); //! Returns `true` if the Carbonite framework has been created and is still alive. Creation happens at the first @ref //! carb::acquireFramework() call and ends at any @ref carb::releaseFramework() call. CARB_DYNAMICLINK bool isFrameworkValid(); //! Retrieves the Carbonite SDK version string, //! //! @returns A string describing the current Carbonite SDK version. This will be the same value //! as the @ref CARB_SDK_VERSION value that was set when the SDK was built. //! //! @note This version is intended for use in host apps that link directly to the `carb` library. //! Libraries that don't link directly to it such as plugins will not be able to call //! into this without first dynamically importing it. Plugins should instead call this //! through `carb::getFramework()->getSdkVersion()`. CARB_DYNAMICLINK const char* carbGetSdkVersion(); //! Tests whether the Carbonite SDK headers match the version of used to build the framework. //! //! @param[in] version The version string to compare to the version stored in the Carbonite //! framework library. This is expected to be the value of the //! @ref CARB_SDK_VERSION symbol found in `carb/SdkVersion.h`. //! @returns `true` if the version of the headers matches the version of the framework library //! that is currently loaded. Returns `false` if the version string in the headers //! does not match the version of the framework library. If the library does not //! match the headers, it is not necessarily a fatal problem. It does however //! indicate that issues may occur and that there may have been a building or //! packaging problem for the host app. #define CARB_IS_SAME_SDK_VERSION(version) (strcmp(version, carbGetSdkVersion()) == 0) //! Releases the Carbonite framework immediately. //! //! In some cases more, than one client can acquire the framework (e.g. scripting bindings), but only one of the clients //! should be responsible for releasing it. //! //! @thread_safety May be called from any thread. CARB_DYNAMICLINK void releaseFramework(); //! Releases the Carbonite framework immediately and exits the process, without running C/C++ atexit() registered //! functions or static destructors. //! //! @note This function does not return. //! //! @warning This function must not be called from within a DLL, shared object, or plugin. //! //! This function performs the following sequence: //! 1. Calls any exported \ref carbOnPluginQuickShutdown on all loaded plugins, if the framework is acquired. No plugins //! are unloaded, unregistered, nor have their interfaces destroyed. //! 2. Calls any registered Framework release hooks (see \ref carb::Framework::addReleaseHook) in reverse order of //! registration, if the framework is acquired. //! 3. Flushes stdout/stderr. //! 4. Calls `TerminateProcess()` on Windows or `_exit()` on Linux and MacOS. //! //! @thread_safety May be called from any thread. //! @param exitCode The exit code that the process will exit with. CARB_DYNAMICLINK void quickReleaseFrameworkAndTerminate [[noreturn]] (int exitCode); #if CARB_PLATFORM_WINDOWS //! Signal handler for SIGABRT for use with plugins that are statically linked to the CRT. //! //! @param[in] signal The signal that occurred. This will be SIGABRT. //! @returns No return value. //! //! @remarks This acts as a signal handler for SIGABRT signals. This is installed during //! plugin initialization. This should _never_ be called directly since it will //! result in the process aborting immediately. CARB_DYNAMICLINK void carbSignalHandler(int signal); #endif //! Defines the framework for creating Carbonite applications and plugins. //! //! See \carb_framework_overview for high-level documentation on core concepts, using @ref Framework, and creating //! plugins. //! //! Plugins are shared libraries with a .plugin.dll/.so suffix. The plugins are named with the .plugin suffix to support //! plugin discovery and support cohabitation with other supporting .dll/.so libraries in the same folder. It is a //! recommended naming pattern, but not mandatory. //! //! Plugin library file format: //! //! - Windows: <plugin-name>.plugin.dll //! - Linux: lib<plugin-name>.plugin.so //! //! A plugin implements one or many interfaces and has a name which uniquely identifies it to the framework. The //! plugin's name usually matches the filename, but it is not mandatory, the actual plugin name is provided by the //! plugin via @ref carb::OnPluginRegisterFn. //! //! "Static" plugin can also be registered with @ref Framework::registerPlugin() function, thus no shared library will //! be involved. //! //! @ref Framework comes with 3 static plugins: //! //! - @ref carb::logging::ILogging //! - @ref carb::filesystem::IFileSystem //! - @ref carb::assert::IAssert //! //! These plugins are used by @ref Framework itself. Without @ref carb::logging::ILogging, @ref Framework won't be able //! to log messages. Without @ref carb::filesystem::IFileSystem, @ref Framework won't be able to load any "dynamic" //! plugins. Without @ref carb::assert::IAssert, assertion failures will simply write a message to stderr and abort. //! //! It's up to the application to register these needed plugins. @ref OMNI_CORE_INIT() performs this registration on //! the user's behalf. //! //! The term "client" is often used across the @ref Framework API. Client is either: //! //! - A plugin. Here the client name is the same as the plugin name. //! //! - An application. The module which dynamically links with the Framework and uses @ref carb::acquireFramework(). //! //! - Scripting bindings. This is technically similar to an application, in that it dynamically links with the @ref //! Framework and uses @ref carb::acquireFramework(). //! //! Clients are uniquely identified by their name. Many functions accept client name as an argument. This allows @ref //! Framework to create a dependency tree of clients. This dependency tree allows the safe unloading of plugins. //! //! @thread_safety Unless otherwise noted, @ref Framework functions are thread-safe and may be called from multiple //! threads simultaneously. struct Framework { /** * Load and register plugins from shared libraries. */ void loadPlugins(const PluginLoadingDesc& desc = PluginLoadingDesc::getDefault()); /** * Load and register plugins from shared libraries. Prefer using @ref loadPlugins. */ void(CARB_ABI* loadPluginsEx)(const PluginLoadingDesc& desc); /** * Unloads all plugins, including registered "static" plugins (see @ref Framework::registerPlugin). */ void(CARB_ABI* unloadAllPlugins)(); /** * Acquires the typed plugin interface, optionally from a specified plugin. * * If `nullptr` is passed as @p pluginName this method selects the default plugin for the given interface type. * Default plugin selection happens on the first such acquire call for a particular interface name and locked until * after this interface is released. By default the interface with highest version is selected. * * If the plugin has not yet been started, it will be loaded and started (\ref carbOnPluginStartup called) by this * call. * * @ref Framework::setDefaultPlugin can be used to explicitly set which plugin to set as default, but it should be * called before the first acquire call. * * If acquire fails, `nullptr` is returned and an error is logged. * * @param pluginName The option to specify a plugin (implementation) that you specifically want. Pass `nullptr` to * search for all plugins. * * @return The requested plugin interface or `nullptr` if an error occurs (an error message is logged). * * @see See @ref tryAcquireInterface(const char*) for a version of this method that does not log errors. */ template <typename T> T* acquireInterface(const char* pluginName = nullptr); /** * Tries to acquire the typed plugin interface, optionally from a specified plugin. * * If `nullptr` is passed as @p pluginName this method selects the default plugin for the given interface type. * Default plugin selection happens on the first such acquire call for a particular interface name and locked until * after this interface is released. By default the interface with highest version is selected. * * If the plugin has not yet been started, it will be loaded and started (\ref carbOnPluginStartup called) by this * call. * * @ref Framework::setDefaultPlugin can be used to explicitly set which plugin to set as default, but it should be * called before the first acquire call. * * @param pluginName The option to specify a plugin (implementation) that you specifically want. Pass `nullptr` to * search for all plugins. * * @return The requested plugin interface or `nullptr` if an error occurs. */ template <typename T> T* tryAcquireInterface(const char* pluginName = nullptr); /** * Acquires the typed plugin interface from the same plugin as the provided interface. * * Example: * * @code{.cpp} * Foo* foo = framework->acquireInterface<Foo>(); * * // the returned 'bar' interface is from the same plugin as 'foo'. * Bar* bar = framework->acquireInterface<Bar>(foo); * @endcode * * If foo and bar are not nullptr, they are guaranteed to be on the same plugin. * * @param pluginInterface The interface that was returned from acquireInterface. It will be used to select a * plugin with requested interface. * * @return The typed plugin interface that is returned and will be started, or `nullptr` if the interface cannot be * acquired (an error is logged). * * @see See @ref tryAcquireInterface(const void*) for a version of this method that does not log errors. */ template <typename T> T* acquireInterface(const void* pluginInterface); /** * Tries to acquire the typed plugin interface from the same plugin as the provided interface. * * Example: * * @code{.cpp} * Foo* foo = framework->acquireInterface<Foo>(); * * // the returned 'bar' interface is from the same plugin as 'foo'. * Bar* bar = framework->tryAcquireInterface<Bar>(foo); * @endcode * * If foo and bar are not nullptr, they are guaranteed to be on the same plugin. * * @param pluginInterface The interface that was returned from acquireInterface. It will be used to select a * plugin with requested interface. * * @return The typed plugin interface that is returned and will be started, or `nullptr` if the interface cannot be * acquired. */ template <typename T> T* tryAcquireInterface(const void* pluginInterface); /** * Acquires to the typed plugin interface from the given dynamic library file. * * @note If the given library was not a registered plugin, the Framework will attempt to register the library as a * new plugin. * * If the plugin has not yet been started, it will be loaded and started (\ref carbOnPluginStartup called) by this * call. * * @param libraryPath The library path to acquire the interface from. Can be absolute or relative (to the current * working directory) path to a dynamic (.dll/.so/.dylib) library Carbonite plugin. * * @return The typed plugin interface (guaranteed to be from the given library) or `nullptr`. If `nullptr` is * returned, an error is logged. * * @see See @ref tryAcquireInterfaceFromLibrary(const char*) for a version of this method that does not log errors. */ template <typename T> T* acquireInterfaceFromLibrary(const char* libraryPath); /** * Tries to acquire the typed plugin interface from the given dynamic library file. * * @note If the given library was not a registered plugin, the Framework will attempt to register the library as a * new plugin. * * If the plugin has not yet been started, it will be loaded and started (\ref carbOnPluginStartup called) by this * call. * * This function works exactly as @ref Framework::acquireInterfaceFromLibrary(const char*), except if acquire fails * it returns `nullptr` and doesn't log an error. * * @param libraryPath The library path to acquire the interface from. Can be absolute or relative (to the current * working directory) path to a dynamic (.dll/.so/.dylib) library Carbonite plugin. * * @return The typed plugin interface or `nullptr` if the library file was not found or an error occurred. */ template <typename T> T* tryAcquireInterfaceFromLibrary(const char* libraryPath); /** * Tries to acquire the typed plugin interface if and only if it has been previously acquired, optionally from a * specified plugin. * * If `nullptr` is passed as @p pluginName this method selects the default plugin for the given interface type. * Default plugin selection happens on the first such acquire call for a particular interface name and locked until * after this interface is released. By default the interface with highest version is selected. * * Unlike \ref tryAcquireInterface, this function will only acquire an interface if the plugin providing it is * already started (it won't attempt to start the plugin). This is useful during \ref carbOnPluginShutdown when a * circularly-dependent interface may have already been released by the Framework and attempting to reload it would * result in an error. * * @ref Framework::setDefaultPlugin can be used to explicitly set which plugin to set as default, but it should be * called before the first acquire call. * * @param pluginName The option to specify a plugin (implementation) that you specifically want. Pass `nullptr` to * search for all plugins. * * @return The requested plugin interface or `nullptr` if an error occurs or the plugin is not started. */ template <typename T> T* tryAcquireExistingInterface(const char* pluginName = nullptr); /** * Gets the number of plugins with the specified interface. * * @return The number of plugins with the specified interface. */ template <typename T> uint32_t getInterfacesCount(); //! Acquires all interfaces of the given type. //! //! The given output array must be preallocated. @p interfacesSize tells this method the size of the array. //! //! If @p interfaces is to small, the array is filled as much as possible and an error is logged. //! //! If @p interfaces is to big, entries past the required size will not be written. //! //! Upon output, `nullptr` may randomly appear in `interfaces`. This represents failed internal calls to @ref //! tryAcquireInterface. No error is logged in this case. //! //! @param interfaces Preallocated array that will hold the acquired interfaces. Values in this array must be //! preset to `nullptr` in order to determine which entries in the array are valid upon output. //! //! @param interfacesSize Number of preallocated array elements. See @ref Framework::getInterfacesCount(). //! //! @rst //! .. warning:: //! Carefully read this method's documentation, as it has a slew of design issues. It's use is not //! recommended. //! @endrst template <typename T> void acquireInterfaces(T** interfaces, uint32_t interfacesSize); //! Acquires the plugin interface pointer from an interface description. //! //! This is an internal function. Use @ref Framework::acquireInterface(const char*) instead. //! //! @rst //! .. deprecated:: 135.0 //! If explicit client functionality is needed, please use ``internalAcquireInterface`` instead. //! However, note that this function will only be available beginning with Carbonite 135.0. //! @endrst //! //! @param clientName The client requesting the plugin. This is used to form a dependency graph between clients. //! Must not be `nullptr`. //! //! @param desc The plugin interface description //! //! @param pluginName The plugin that you specifically want. If `nullptr`, the interface's "default" plugin is //! used. //! //! @return The returned function pointer for the interface being queried and started. If `nullptr` is returned, an //! error is logged. //! //! @see See @ref tryAcquireInterfaceWithClient for a version of this method that does not log errors. void*(CARB_ABI* acquireInterfaceWithClient)(const char* clientName, InterfaceDesc desc, const char* pluginName); static_assert(kFrameworkVersion.major == 0, "Remove above function in next Framework version"); //! Tries to acquires the plugin interface pointer from an interface description. //! //! This method has the same contract as @ref Framework::acquireInterfaceWithClient except an error is not logged if //! the interface could not be acquired. //! //! This is an internal function. Use @ref Framework::tryAcquireInterface(const char*) instead. //! //! @rst //! .. deprecated:: 135.0 //! If explicit client functionality is needed, please use ``internalAcquireInterface`` instead. //! However, note that this function will only be available beginning with Carbonite 135.0. //! @endrst //! //! @param clientName The client requesting the plugin. This is used to form a dependency graph between clients. //! Must not be `nullptr`. //! //! @param desc The plugin interface description //! //! @param pluginName The plugin that you specifically want. If `nullptr`, the interface's "default" plugin is //! used. //! //! @return The returned function pointer for the interface being queried and started, or `nullptr` if an error //! occurs. void*(CARB_ABI* tryAcquireInterfaceWithClient)(const char* clientName, InterfaceDesc desc, const char* pluginName); static_assert(kFrameworkVersion.major == 0, "Remove above function in next Framework version"); //! Acquires the typed plugin interface from the same plugin as the provided interface. //! //! This is an internal function. Use @ref Framework::acquireInterface(const char*) instead. //! //! @rst //! .. deprecated:: 135.0 //! If explicit client functionality is needed, please use ``internalAcquireInterface`` instead. //! However, note that this function will only be available beginning with Carbonite 135.0. //! @endrst //! //! @param clientName The client requesting the plugin. This is used to form a dependency graph between clients. //! Must not be `nullptr`. //! //! @param desc The plugin interface description. //! //! @param pluginInterface The interface that was returned from acquireInterface. It will be used to select a plugin //! with requested interface. //! //! @return The returned function pointer for the interface being queried and started. If `nullptr` is returned, an //! error is logged. //! //! @see See @ref tryAcquireInterfaceFromInterfaceWithClient for a version of this method that does not log errors. void*(CARB_ABI* acquireInterfaceFromInterfaceWithClient)(const char* clientName, InterfaceDesc desc, const void* pluginInterface); static_assert(kFrameworkVersion.major == 0, "Remove above function in next Framework version"); //! Tries to acquires the typed plugin interface from the same plugin as the provided interface. //! //! This method has the same contract as @ref Framework::acquireInterfaceFromInterfaceWithClient except an error is //! not logged if the interface could not be acquired. //! //! This is an internal function. Use @ref Framework::tryAcquireInterface(const char*) instead. //! //! @rst //! .. deprecated:: 135.0 //! If explicit client functionality is needed, please use ``internalAcquireInterface`` instead. //! However, note that this function will only be available beginning with Carbonite 135.0. //! @endrst //! //! @param clientName The client requesting the plugin. This is used to form a dependency graph between clients. //! Must not be `nullptr`. //! //! @param desc The plugin interface description. //! //! @param pluginInterface The interface that was returned from acquireInterface. It will be used to select a plugin //! with requested interface. //! //! @return The returned function pointer for the interface being queried and started, or `nullptr` if an error //! occurs. void*(CARB_ABI* tryAcquireInterfaceFromInterfaceWithClient)(const char* clientName, InterfaceDesc desc, const void* pluginInterface); static_assert(kFrameworkVersion.major == 0, "Remove above function in next Framework version"); //! Acquires the plugin interface pointer from an interface description and a filename. //! //! @note If the given library was not a registered plugin, the Framework will attempt to register the library as a //! new plugin. //! //! This is an internal function. Use @ref Framework::acquireInterfaceFromLibrary(const char*) instead. //! //! @rst //! .. deprecated:: 135.0 //! If explicit client functionality is needed, please use ``internalAcquireInterface`` instead. //! However, note that this function will only be available beginning with Carbonite 135.0. //! @endrst //! //! @param clientName The client requesting the plugin. This is used to form a dependency graph between clients. //! Must not be `nullptr`. //! //! @param desc The plugin interface description //! //! @param libraryPath The filename to acquire the interface from. Can be absolute or relative path to actual //! .dll/.so Carbonite plugin. Path is relative to the current working directory. Must not be `nullptr`. //! //! @return The returned function pointer for the interface being queried and started. If `nullptr` is returned, an //! error is logged. //! //! @see See @ref tryAcquireInterfaceFromLibraryWithClient for a version of this method that does not log errors. void*(CARB_ABI* acquireInterfaceFromLibraryWithClient)(const char* clientName, InterfaceDesc desc, const char* libraryPath); static_assert(kFrameworkVersion.major == 0, "Remove above function in next Framework version"); //! Tries to acquire the plugin interface pointer from an interface description and a filename. //! //! This method has the same contract as @ref Framework::acquireInterfaceFromLibraryWithClient except an error is //! not logged if the interface could not be acquired. //! //! @note If the given library was not a registered plugin, the Framework will attempt to register the library as a //! new plugin. //! //! This is an internal function. Use @ref Framework::tryAcquireInterfaceFromLibrary(const char*) instead. //! //! @rst //! .. deprecated:: 135.0 //! If explicit client functionality is needed, please use ``internalAcquireInterface`` instead. //! However, note that this function will only be available beginning with Carbonite 135.0. //! @endrst //! //! @param clientName The client requesting the plugin. This is used to form a dependency graph between clients. //! Must not be `nullptr`. //! //! @param desc The plugin interface description //! //! @param libraryPath The filename to acquire the interface from. Can be absolute or relative path to actual //! .dll/.so Carbonite plugin. Path is relative to the current working directory. Must not be `nullptr`. //! //! @return The returned function pointer for the interface being queried and started, or `nullptr` on error. void*(CARB_ABI* tryAcquireInterfaceFromLibraryWithClient)(const char* clientName, InterfaceDesc desc, const char* libraryPath); static_assert(kFrameworkVersion.major == 0, "Remove above function in next Framework version"); //! Gets the number of plugins with the specified interface descriptor. //! //! @param interfaceDesc The interface descriptor to get the plugin count. //! //! @return The number of plugins with the specified interface descriptor. uint32_t(CARB_ABI* getInterfacesCountEx)(InterfaceDesc interfaceDesc); //! Acquires all interfaces of the given type. //! //! The given output array must be preallocated. @p interfacesSize tells this method the size of the array. //! //! If @p interfaces is to small, the array is filled as much as possible and an error is logged. //! //! If @p interfaces is to big, entries past the required size will not be written. //! //! Upon output, `nullptr` may randomly appear in `interfaces`. This represents failed internal calls to @ref //! tryAcquireInterface. No error is logged in this case. //! //! This is an internal function. Use @ref Framework::acquireInterfaces() instead. //! //! @param clientName The client requesting the plugin. This is used to form a dependency graph between clients. //! Must not be `nullptr`. //! //! @param desc The plugin interface description //! //! @param interfaces Preallocated array that will hold the acquired interfaces. Values in this array must be //! preset to `nullptr` in order to determine which entries in the array are valid upon output. //! //! @param interfacesSize Number of preallocated array elements. See @ref Framework::getInterfacesCount(). //! //! @rst //! .. warning:: //! Carefully read this method's documentation, as it has a slew of design issues. It's use is not //! recommended. //! @endrst void(CARB_ABI* acquireInterfacesWithClient)(const char* clientName, InterfaceDesc interfaceDesc, void** interfaces, uint32_t interfacesSize); //! Releases the use of an interface that is no longer needed. //! //! Correct plugin interface type is expected, compile-time check is performed. //! //! @param pluginInterface The interface that was returned from acquireInterface template <typename T> void releaseInterface(T* pluginInterface); //! \cond DEV //! Releases the use of an interface that is no longer needed. //! //! This is an internal function. Use @ref Framework::releaseInterface() instead. //! //! @param clientName The client requesting the plugin. This is used to form a dependency graph between clients. //! Must not be `nullptr`. //! //! @param pluginInterface The interface that was returned from @ref Framework::acquireInterface. void(CARB_ABI* releaseInterfaceWithClient)(const char* clientName, void* pluginInterface); //! \endcond //! Gets the plugin descriptor for a specified plugin. //! //! @param pluginName The plugin that you specifically want to get the descriptor for. Must not be `nullptr`. //! //! @return The @ref PluginDesc, it will be filled with zeros if the plugin doesn't exist. The returned memory will //! be valid as long as the plugin is loaded. const PluginDesc&(CARB_ABI* getPluginDesc)(const char* pluginName); //! Gets the plugin descriptor for an interface returned from @ref Framework::acquireInterface. //! //! @param pluginInterface The interface that was returned from acquireInterface //! //! @return The PluginDesc, it will be filled with zeros if wrong interface pointer is provided. const PluginDesc&(CARB_ABI* getInterfacePluginDesc)(void* pluginInterface); //! Gets the plugins with the specified interface descriptor. //! //! @param interfaceDesc The interface descriptor to get the plugins for. //! //! @param outPlugins The array to be populated with the plugins of size @ref Framework::getInterfacesCount(). //! This array must be set to all zeros before given to this function in order to be able to tell the number of //! entries written. //! //! @rst //! .. danger:: //! //! Do not use this method. The caller will be unable to correctly size ``outPlugins``. The size of the number //! of loaded plugins matching ``interfaceDesc`` may change between the call to //! :cpp:func:`carb::Framework::getInterfacesCount` and this method. //! @endrst void(CARB_ABI* getCompatiblePlugins)(InterfaceDesc interfaceDesc, PluginDesc* outPlugins); //! Gets the number of registered plugins. //! //! @return The number of registered plugins. size_t(CARB_ABI* getPluginCount)(); //! Gets all registered plugins. //! //! @param outPlugins The array to be populated with plugin descriptors of size @ref Framework::getPluginCount(). //! //! @rst //! .. danger:: //! //! Do not use this method. The caller will be unable to correctly size ``outPlugins``. The number of plugins //! may change between the call to :cpp:member:`carb::Framework::getPluginCount` and this method. //! @endrst void(CARB_ABI* getPlugins)(PluginDesc* outPlugins); //! Attempts to reload all plugins that are currently loaded. void(CARB_ABI* tryReloadPlugins)(); //! Register a "static" plugin. //! //! While typical plugins are "dynamic" and loaded from shared libraries (see @ref Framework::loadPlugins), a //! "static" plugin can be added by calling this function from an application or another plugin. The contract is //! exactly the same: you provide a set of functions (some of which are optional), which usually are looked for in a //! shared library by the framework. It can be useful in some special scenarios where you want to hijack particular //! interfaces or limited in your ability to produce new shared libraries. //! //! It is important that the plugin name provided by @ref PluginRegistrationDesc::onPluginRegisterFn function is //! unique, registration will fail otherwise. //! //! @param clientName The client registering the plugin. This is used to form a dependency graph between clients. //! Must not be `nullptr`. //! //! @param desc The plugin registration description. //! //! @return If registration was successful. bool(CARB_ABI* registerPlugin)(const char* clientName, const PluginRegistrationDesc& desc); //! Try to unregister a plugin. //! //! If plugin is in use, which means one if its interfaces was acquired by someone and not yet released, the //! unregister will fail. Both "dynamic" (shared libraries) and "static" (see @ref Framework::registerPlugin) //! plugins can be unregistered. //! //! @param pluginName The plugin to be unregistered. //! //! @return If unregistration was successful. bool(CARB_ABI* unregisterPlugin)(const char* pluginName); //! The descriptor for registering builtin @ref carb::logging::ILogging interface implementation. const PluginRegistrationDesc&(CARB_ABI* getBuiltinLoggingDesc)(); //! The descriptor for registering builtin @ref carb::filesystem::IFileSystem interface implementation. const PluginRegistrationDesc&(CARB_ABI* getBuiltinFileSystemDesc)(); //! Sets the default plugin to be used when an interface type is acquired. //! //! The mechanism of default interfaces allows @ref Framework to guarantee that every call to //! `acquireInterface<Foo>()` will return the same `Foo` interface pointer for everyone. The only way to bypass it //! is by explicitly passing the `pluginName` of the interface you want to acquire. //! //! It is important to note that if the interface was previously already acquired, the effect of this function won't //! take place until it is released by all holders. So it is recommended to set defaults as early as possible. //! //! @tparam T The interface type. //! @param pluginName The name of the plugin (e.g. "carb.profiler-cpu.plugin") that will be set as default. Must not //! be `nullptr`. template <class T> void setDefaultPlugin(const char* pluginName); //! \cond DEV //! Sets the default plugin to be used when the given interface is acquired. //! //! The mechanism of default interfaces allows @ref Framework to guarantee that every call to //! `acquireInterface<Foo>()` will return the same `Foo` interface pointer for everyone. The only way to bypass it //! is by explicitly passing the `pluginName` of the interface you want to acquire. //! //! It is important to note that if the interface was previously already acquired, the effect of this function won't //! take place until it is released by all holders. So it is recommended to set defaults as early as possible. //! //! @param clientName The client registering the plugin. This is used to form a dependency graph between clients. //! Must not be `nullptr`. //! //! @param desc The plugin interface description. //! //! @param pluginName The plugin that will be set as default. Must not be `nullptr`. void(CARB_ABI* setDefaultPluginEx)(const char* clientName, InterfaceDesc desc, const char* pluginName); //! \endcond //! Sets the temporary path where the framework will store data for reloadable plugins. //! //! This function must be called before loading any reloadable plugins. By default @ref Framework creates a //! temporary folder in the executable's folder. //! //! @param tempPath Temporary folder path. void(CARB_ABI* setReloadableTempPath)(const char* tempPath); //! Returns temporary path where the framework will store data for reloadable plugins. //! //! @return Temporary path for reloadable data. The returned memory is valid until the @ref //! Framework::setReloadableTempPath is called or the @ref Framework is destroyed. const char*(CARB_ABI* getReloadableTempPath)(); //! Returns Carbonite version and build information. //! //! The format is: `v{major}.{minor} [{shortgithash} {gitbranch} {isdirty}]` where: //! //! - major - `kFrameworkVersion.major` //! - minor - `kFrameworkVersion.minor` //! - shortgithash - output of `git rev-parse --short HEAD` //! - gitbranch - output of `git rev-parse --abbrev-ref HEAD` //! - isdirty - `DIRTY` if `git status --porcelain` is not empty //! //! Examples: //! //! - `v1.0 [56ab220c master]` //! - `v0.2 [f2fc1ba1 dev/mfornander/harden DIRTY]` const char*(CARB_ABI* getBuildInfo)(); //! Checks if the provided plugin interface matches the requirements. //! //! @param interfaceCandidate The interface that was provided by the user. //! //! @return If the interface candidate matches template interface requirements, returns @p interfaceCandidate. //! Otherwise, returns `nullptr`. template <typename T> T* verifyInterface(T* interfaceCandidate); //! Checks if provided plugin interface matches the requirements. //! //! Do not directly use this method. Instead, use @ref Framework::verifyInterface. //! //! @param desc The interface description that sets the compatibility requirements. //! //! @param interfaceCandidate The interface that was provided by the user. //! //! @return if the interface candidate matches @p desc, returns @p interfaceCandidate. Otherwise, returns `nullptr`. void*(CARB_ABI* verifyInterfaceEx)(InterfaceDesc desc, void* interfaceCandidate); //! The descriptor for registering builtin @ref carb::assert::IAssert interface implementation. const PluginRegistrationDesc&(CARB_ABI* getBuiltinAssertDesc)(); //! The descriptor for registering builtin @ref carb::thread::IThreadUtil interface implementation. const PluginRegistrationDesc&(CARB_ABI* getBuiltinThreadUtilDesc)(); //! Load and register a plugin from the given filename. //! //! Call @ref unloadPlugin() to unload the plugin at @p libraryPath. //! //! @param libraryPath Name of the shared library. Must not be `nullptr`. //! //! @param reloadable Treat the plugin as reloadable. //! //! @param unload Grab the list of interfaces from the plugin and then unload it. If the user tries to acquire one //! of the retrieved interfaces, the plugin will be lazily reloaded. //! //! @return Returns a non-negative value on success, negative value otherwise. LoadPluginResult(CARB_ABI* loadPlugin)(const char* libraryPath, bool reloadable, bool unload); //! Unloads the plugin at the given shared library path. //! //! @param Path to shared library. Must not be `nullptr`. //! //! @returns Returns `true` if a plugin was loaded at the given path and successfully unloaded. `false` otherwise. bool(CARB_ABI* unloadPlugin)(const char* libraryPath); //! Adds a release hook for either the framework or a specific interface. //! //! A release hook can be added multiple times with the same or different user data, in which case it will be called //! multiple times. It is up to the caller to ensure uniqueness if uniqueness is desired. To remove a release hook, //! call @ref carb::Framework::removeReleaseHook() with the same parameters. //! //! @param iface The interface (returned by @ref carb::Framework::acquireInterface()) to monitor for release. If //! `nullptr` is specified, the release hook will be called when the @ref carb::Framework itself is unloaded. //! //! @param fn The release hook callback function that will be called. Must not be `nullptr`. //! //! @param user Data to be passed to the release hook function. May be `nullptr`. //! //! @returns Returns `true` if the interface was found and the release hook was added successfully; `false` //! otherwise. //! //! @rst //! //! .. danger:: //! //! It is *expressly forbidden* to call back into :cpp:type:`carb::Framework` in any way during the //! :cpp:type:`carb::ReleaseHookFn` callback. Doing so results in undefined behavior. The only exception to this //! rule is calling `removeReleaseHook()`. //! //! @endrst bool(CARB_ABI* addReleaseHook)(void* iface, ReleaseHookFn fn, void* user); //! Removes a release hook previously registered with @ref carb::Framework::addReleaseHook(). //! //! The same parameters supplied to @ref carb::Framework::addReleaseHook() must be provided in order to identify the //! correct release hook to remove. It is safe to call this function from within the release hook callback. //! //! @param iface The interface previously passed to @ref addReleaseHook(). //! //! @param fn The function previously passed to @ref addReleaseHook(). //! //! @param user The user data parameter previously passed to @ref addReleaseHook(). //! //! @returns Returns `true` if the release hook was found and removed. If it was not found, `false` is returned. //! //! @rst //! //! .. danger:: //! //! It is *expressly forbidden* to call back into :cpp:type:`carb::Framework` in any way during the //! :cpp:type:`carb::ReleaseHookFn` callback. Doing so results in undefined behavior. The only exception to this //! rule is calling `removeReleaseHook()`. //! //! @endrst bool(CARB_ABI* removeReleaseHook)(void* iface, ReleaseHookFn fn, void* user); //! @private CARB_DEPRECATED("Use carbReallocate() instead") void*(CARB_ABI* internalRealloc)(void* prev, size_t newSize, size_t align); static_assert(kFrameworkVersion.major == 0, "Remove Framework::internalRealloc in next Framework version"); //! Allocates a block of memory. //! //! @note Any plugin (or the executable) may allocate the memory and a different plugin (or the executable) may free //! or reallocate it. //! //! @warning It is undefined behavior to use memory allocated with this function or @ref reallocate() after the //! Carbonite framework has been shut down. //! //! @param size The size of the memory block requested, in bytes. Specifying '0' will return a valid pointer that //! can be passed to @ref free but cannot be used to store any information. //! @param align The minimum alignment (in bytes) of the memory block requested. Must be a power of two. Values less //! than `sizeof(size_t)` are ignored. `0` indicates to use default system alignment (typically //! `2 * sizeof(void*)`). //! @returns A non-`nullptr` memory block of @p size bytes with minimum alignment @p align. If an error occurred, //! or memory could not be allocated, `nullptr` is returned. The memory is not initialized. CARB_DEPRECATED("Use carb::allocate() instead") void* allocate(size_t size, size_t align = 0) { return carb::allocate(size, align); } static_assert(kFrameworkVersion.major == 0, "Remove Framework::allocate in next Framework version"); //! Frees a block of memory previously allocated with @ref allocate(). //! //! @note Any plugin (or the executable) may allocate the memory and a different plugin (or the executable) may //! free it. //! //! @param p The block of memory previously returned from @ref allocate() or @ref reallocate(), or `nullptr`. CARB_DEPRECATED("Use carb::deallocate() instead") void free(void* p) { return carb::deallocate(p); } static_assert(kFrameworkVersion.major == 0, "Remove Framework::free and CARB_FREE_UNDEFINED in next Framework version"); //! Reallocates a block of memory previously allocated with @ref allocate(). //! //! This function changes the size of the memory block pointed to by @p p to @p size bytes with @p align alignment. //! The contents are unchanged from the start of the memory block up to the minimum of the old size and @p size. If //! @p size is larger than the old size, the added memory is not initialized. If @p p is `nullptr`, the call is //! equivalent to `allocate(size, align)`; if @p size is `0` and @p p is not `nullptr`, the call is equivalent to //! `free(p)`. Unless @p p is `nullptr`, it must have been retrieved by an earlier call to @ref allocate() or //! @ref reallocate(). If the memory region was moved in order to resize it, @p p will be freed as with `free(p)`. //! //! @note Any plugin (or the executable) may allocate the memory and a different plugin (or the executable) may //! reallocate it. //! //! @warning It is undefined behavior to use memory allocated with this function or @ref allocate() after the //! Carbonite framework has been shut down. //! //! @param p The block of memory previously returned from @ref allocate() or @ref reallocate() if resizing is //! resizing is desired. If `nullptr` is passed as this parameter, the call behaves as if //! `allocate(size, align)` was called. //! @param size The size of the memory block requested, in bytes. See above for further explanation. //! @param align The minimum alignment (in bytes) of the memory block requested. Must be a power of two. Values less //! than `sizeof(size_t)` are ignored. Changing the alignment from a previous allocation is undefined behavior. //! `0` indicates to use default system alignment (typically `2 * sizeof(void*)`). //! @returns A pointer to a block of memory of @p size bytes with minimum alignment @p align, unless an error //! occurs in which case `nullptr` is returned. If @p p is `nullptr` and @p size is `0` then `nullptr` is also //! returned. CARB_DEPRECATED("Use carb::reallocate() instead") void* reallocate(void* p, size_t size, size_t align = 0) { return carb::reallocate(p, size, align); } static_assert(kFrameworkVersion.major == 0, "Remove Framework::reallocate in next Framework version"); //! Retrieves the Carbonite SDK version string, //! //! @returns A string describing the current Carbonite SDK version. This will be the same value //! as the @ref CARB_SDK_VERSION value that was set when the SDK was built. //! //! @note This version is intended for use in plugins. Since Carbonite plugins aren't directly //! linked to the `carb` library, access to carbGetSdkVersion() isn't as easy as calling //! a library function. This version just provides access to the same result from a //! location that is better guaranteed accessible to plugins. const char*(CARB_ABI* getSdkVersion)(); //! Adds a load hook that is called when an interface becomes available. //! //! No attempt is made to load the plugin. This can be used as a notification mechanism when a plugin cannot be //! loaded immediately (due to circular dependencies for instance) but may be loaded later. To remove the load hook, //! use \ref removeLoadHook(). It is possible to register multiple load hooks with the same parameters, but this is //! not recommended and will cause the function to be called multiple times with the same parameters. //! //! See \ref LoadHookFn for a discussion on how and when load hooks are called. //! //! @see LoadHookFn removeLoadHook() //! @tparam T The interface type //! @param pluginName the name of the specific plugin desired that exposes \c T, or \c nullptr for any plugin. //! @param func the \ref LoadHookFn to call when the given interface becomes available. This function may be called //! multiple times if multiple plugins that expose interface \c T are loaded. //! @param userData application-specific data that is supplied to \p func when it is called. //! @returns A \ref LoadHookHandle uniquely identifying this load hook; \ref kInvalidLoadHook if an error occurs. //! When finished with the load hook, call \ref removeLoadHook(). template <class T> LoadHookHandle addLoadHook(const char* pluginName, LoadHookFn func, void* userData); //! @private LoadHookHandle(CARB_ABI* internalAddLoadHook)( const InterfaceDesc& iface, const char* plugin, const char* clientName, LoadHookFn fn, void* user, bool add); //! Removes a previously-registered load hook. //! //! It is safe to remove the load hook from within the load hook callback. //! //! @param handle The \ref LoadHookHandle returned from \ref addLoadHook(). //! @returns Returns \c true if the load hook was found and removed. If it was not found, \c false is returned. bool(CARB_ABI* removeLoadHook)(LoadHookHandle handle); //! Registers a client as a script binding or script language owner. Typically handled by CARB_BINDINGS(). //! //! This function is used to notify the Carbonite framework of dependencies from a script language. This allows //! proper dependency tracking and shutdown ordering. For instance, if a python binding loads an interface from //! *carb.assets.plugin*, it appears to Carbonite that a non-plugin client requested the interface. However, if //! python was started from *carb.scripting-python.plugin*, then it becomes necessary to establish a dependency //! relationship between *carb.scripting-python.plugin* and any plugins loaded from python bindings. This function //! has two purposes in this example: the *carb.scripting-python.plugin* will register itself as //! \ref BindingType::Owner for @p scriptType `python`. All bindings automatically register themselves as //! \ref BindingType::Binding for @p scriptType `python` through `CARB_BINDINGS()`. Whenever the binding acquires an //! interface, all registered \ref BindingType::Owner clients gain a dependency on the acquired interface. //! //! @param type The \ref BindingType of \p clientName. //! @param clientName A plugin or binding's client name (`g_carbClientName` typically created by `CARB_GLOBALS()` or //! `CARB_BINDINGS()`). //! @param scriptType A user-defined script type, such as "python" or "lua". Must match between owner and bindings. //! Not case-sensitive. void(CARB_ABI* registerScriptBinding)(BindingType type, const char* clientName, const char* scriptType); //! The main framework access function for acquiring an interface. //! //! @note This function is generally not intended to be used directly; instead, consider one of the many type-safe //! adapter functions such as \ref tryAcquireInterface(). //! //! @warning This function will be `nullptr` in Carbonite releases prior to 135.0 //! //! @param options The structure containing the options for acquiring the interface. //! @returns The interface pointer for the interface being acquired. May be `nullptr` if the interface could not be //! acquired. Verbose logging will explain the entire acquisition process. Warning and Error logs may be //! produced depending on options. void*(CARB_ABI* internalAcquireInterface)(const AcquireInterfaceOptions& options); }; } // namespace carb //! The client's name. //! //! A "client" can be one of the following in the Carbonite framework: //! //! - A plugin. Here the client name is the same as the plugin name. //! //! - An application. //! //! - Scripting bindings. //! //! Clients are uniquely identified by their name. Many functions accept client name as an argument. This allows @ref //! carb::Framework to create a dependency tree of clients. This dependency tree allows the safe unloading of //! plugins. CARB_WEAKLINK CARB_HIDDEN const char* g_carbClientName; //! Defines the client's global @ref carb::Framework pointer. //! //! Do not directly access this pointer. Rather use helper methods like @ref carb::getFramework() and @ref //! carb::isFrameworkValid(). CARB_WEAKLINK CARB_HIDDEN carb::Framework* g_carbFramework; //! Global symbol to enforce the use of CARB_GLOBALS() in Carbonite modules. Do not modify or use //! this value. //! //! If there is an unresolved symbol linker error about this symbol (build time or run time), it //! means that the CARB_GLOBALS() macro was not called at the global scope in the module. This //! exists to ensure that all the global symbols related to each Carbonite module have been //! properly defined and initialized. extern bool g_needToCall_CARB_GLOBALS_atGlobalScope; //! Defines global variables for use by Carbonite. Call this macro from the global namespace. //! //! Do not call this macro directly. Rather: //! //! - For applications, call @ref OMNI_APP_GLOBALS. //! //! - For Carbonite plugins, call @ref CARB_PLUGIN_IMPL. //! //! - For ONI plugins, call @ref OMNI_MODULE_GLOBALS. #define CARB_FRAMEWORK_GLOBALS(clientName) \ CARB_HIDDEN bool g_needToCall_CARB_GLOBALS_atGlobalScope = carb::detail::setClientName(clientName); namespace carb { namespace detail { //! Sets the client name for the calling module. //! //! @param[in] clientName A string literal containing the name of the calling plugin or //! executable. This string must be guaranteed constant for the //! lifetime of the module. //! @returns `true`. //! //! @note This should not be called directly. This is called as part of CARB_FRAMEWORK_GLOBALS(). inline bool setClientName(const char* clientName) { g_carbClientName = clientName; return true; } } // namespace detail //! Gets the Carbonite framework. //! //! The @ref carb::Framework can be `nullptr` for applications if it hasn't acquired it (see @ref //! carb::acquireFramework()). It can also be `nullptr` for a plugin if the plugin is used externally and was not loaded //! by framework itself. //! //! After starting up, @ref carb::getFramework() can be considered a getter for a global singleton that is the @ref //! carb::Framework. //! //! @return The Carbonite framework. inline Framework* getFramework() { return g_carbFramework; } inline void Framework::loadPlugins(const PluginLoadingDesc& desc) { return this->loadPluginsEx(desc); } template <typename T> T* Framework::verifyInterface(T* interfaceCandidate) { const auto desc = T::getInterfaceDesc(); return static_cast<T*>(getFramework()->verifyInterfaceEx(desc, interfaceCandidate)); } template <typename T> T* Framework::acquireInterface(const char* pluginName) { const char* clientName = g_needToCall_CARB_GLOBALS_atGlobalScope ? g_carbClientName : nullptr; if (this->internalAcquireInterface) return static_cast<T*>(this->internalAcquireInterface( { sizeof(AcquireInterfaceOptions), clientName, T::getInterfaceDesc(), eAIFDefaultType, pluginName })); else return static_cast<T*>(this->acquireInterfaceWithClient(clientName, T::getInterfaceDesc(), pluginName)); } template <typename T> T* Framework::tryAcquireInterface(const char* pluginName) { const char* clientName = g_needToCall_CARB_GLOBALS_atGlobalScope ? g_carbClientName : nullptr; if (this->internalAcquireInterface) return static_cast<T*>( this->internalAcquireInterface({ sizeof(AcquireInterfaceOptions), clientName, T::getInterfaceDesc(), AcquireInterfaceFlags(eAIFDefaultType | fAIFOptional), pluginName })); else return static_cast<T*>(this->tryAcquireInterfaceWithClient(clientName, T::getInterfaceDesc(), pluginName)); } template <typename T> T* Framework::acquireInterface(const void* pluginInterface) { const char* clientName = g_needToCall_CARB_GLOBALS_atGlobalScope ? g_carbClientName : nullptr; if (this->internalAcquireInterface) return static_cast<T*>( this->internalAcquireInterface({ sizeof(AcquireInterfaceOptions), clientName, T::getInterfaceDesc(), eAIFFromInterfaceType, pluginInterface })); else return static_cast<T*>( this->acquireInterfaceFromInterfaceWithClient(clientName, T::getInterfaceDesc(), pluginInterface)); } template <typename T> T* Framework::tryAcquireInterface(const void* pluginInterface) { const char* clientName = g_needToCall_CARB_GLOBALS_atGlobalScope ? g_carbClientName : nullptr; if (this->internalAcquireInterface) return static_cast<T*>(this->internalAcquireInterface( { sizeof(AcquireInterfaceOptions), clientName, T::getInterfaceDesc(), AcquireInterfaceFlags(eAIFFromInterfaceType | fAIFOptional), pluginInterface })); else return static_cast<T*>( this->tryAcquireInterfaceFromInterfaceWithClient(clientName, T::getInterfaceDesc(), pluginInterface)); } template <typename T> T* Framework::acquireInterfaceFromLibrary(const char* libraryPath) { const char* clientName = g_needToCall_CARB_GLOBALS_atGlobalScope ? g_carbClientName : nullptr; if (this->internalAcquireInterface) return static_cast<T*>(this->internalAcquireInterface( { sizeof(AcquireInterfaceOptions), clientName, T::getInterfaceDesc(), eAIFFromLibraryType, libraryPath })); else return static_cast<T*>( this->acquireInterfaceFromLibraryWithClient(clientName, T::getInterfaceDesc(), libraryPath)); } template <typename T> T* Framework::tryAcquireInterfaceFromLibrary(const char* libraryPath) { const char* clientName = g_needToCall_CARB_GLOBALS_atGlobalScope ? g_carbClientName : nullptr; if (this->internalAcquireInterface) return static_cast<T*>( this->internalAcquireInterface({ sizeof(AcquireInterfaceOptions), clientName, T::getInterfaceDesc(), AcquireInterfaceFlags(eAIFFromLibraryType | fAIFOptional), libraryPath })); else return static_cast<T*>( this->tryAcquireInterfaceFromLibraryWithClient(clientName, T::getInterfaceDesc(), libraryPath)); } template <typename T> T* Framework::tryAcquireExistingInterface(const char* pluginName) { const char* clientName = g_needToCall_CARB_GLOBALS_atGlobalScope ? g_carbClientName : nullptr; return this->internalAcquireInterface ? static_cast<T*>(this->internalAcquireInterface( { sizeof(AcquireInterfaceOptions), clientName, T::getInterfaceDesc(), AcquireInterfaceFlags(eAIFDefaultType | fAIFOptional | fAIFNoInitialize), pluginName })) : nullptr; } template <typename T> uint32_t Framework::getInterfacesCount() { const InterfaceDesc desc = T::getInterfaceDesc(); return this->getInterfacesCountEx(desc); } template <typename T> void Framework::acquireInterfaces(T** interfaces, uint32_t interfacesSize) { const InterfaceDesc desc = T::getInterfaceDesc(); const char* clientName = g_needToCall_CARB_GLOBALS_atGlobalScope ? g_carbClientName : nullptr; this->acquireInterfacesWithClient(clientName, desc, reinterpret_cast<void**>(interfaces), interfacesSize); } template <typename T> void Framework::releaseInterface(T* pluginInterface) { (void)(T::getInterfaceDesc()); // Compile-time check that the type is plugin interface const char* clientName = g_needToCall_CARB_GLOBALS_atGlobalScope ? g_carbClientName : nullptr; this->releaseInterfaceWithClient(clientName, pluginInterface); } template <typename T> void Framework::setDefaultPlugin(const char* pluginName) { const char* clientName = g_needToCall_CARB_GLOBALS_atGlobalScope ? g_carbClientName : nullptr; this->setDefaultPluginEx(clientName, T::getInterfaceDesc(), pluginName); } template <typename T> LoadHookHandle Framework::addLoadHook(const char* pluginName, LoadHookFn func, void* user) { const char* clientName = g_needToCall_CARB_GLOBALS_atGlobalScope ? g_carbClientName : nullptr; return this->internalAddLoadHook(T::getInterfaceDesc(), pluginName, clientName, func, user, true); } } // namespace carb #ifdef CARB_FREE_UNDEFINED # pragma pop_macro("free") # undef CARB_FREE_UNDEFINED #endif
omniverse-code/kit/include/carb/BindingsPythonUtils.h
// Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include "BindingsUtils.h" #include "IObject.h" #include "cpp/TypeTraits.h" #include "cpp/Functional.h" // Python uses these in modsupport.h, so undefine them now #pragma push_macro("min") #undef min #pragma push_macro("max") #undef max CARB_IGNOREWARNING_MSC_WITH_PUSH(4668) // 'X' is not defined as a preprocessor macro, replacing with '0' for '#if/#elif' #include <pybind11/chrono.h> #include <pybind11/functional.h> #include <pybind11/pybind11.h> #include <pybind11/stl.h> CARB_IGNOREWARNING_MSC_POP #pragma pop_macro("min") #pragma pop_macro("max") namespace py = pybind11; PYBIND11_DECLARE_HOLDER_TYPE(T, carb::ObjectPtr<T>, true); // Provide simple implementations of types used in multiple bindings. namespace carb { template <typename InterfaceType, typename ReturnType, typename... Args> auto wrapInterfaceFunctionReleaseGIL(ReturnType (*InterfaceType::*p)(Args...)) -> std::function<ReturnType(InterfaceType&, Args...)> { return [p](InterfaceType& c, Args... args) { py::gil_scoped_release g; return (c.*p)(args...); }; } template <typename InterfaceType, typename ReturnType, typename... Args> auto wrapInterfaceFunctionReleaseGIL(const InterfaceType* c, ReturnType (*InterfaceType::*p)(Args...)) -> std::function<ReturnType(Args...)> { return [c, p](Args... args) { py::gil_scoped_release g; return (c->*p)(args...); }; } template <typename InterfaceType, typename... PyClassArgs> py::class_<InterfaceType, PyClassArgs...> defineInterfaceClass(py::module& m, const char* className, const char* acquireFuncName, const char* releaseFuncName = nullptr, const char* classDocstring = nullptr) { auto cls = classDocstring ? py::class_<InterfaceType, PyClassArgs...>(m, className, classDocstring) : py::class_<InterfaceType, PyClassArgs...>(m, className); m.def(acquireFuncName, [](const char* pluginName, const char* libraryPath) { return libraryPath ? acquireInterfaceFromLibraryForBindings<InterfaceType>(libraryPath) : acquireInterfaceForBindings<InterfaceType>(pluginName); }, py::arg("plugin_name") = nullptr, py::arg("library_path") = nullptr, py::return_value_policy::reference); if (releaseFuncName) { m.def(releaseFuncName, [](InterfaceType* iface) { carb::getFramework()->releaseInterface(iface); }); } return cls; } /** * Assuming std::function will call into python code this function makes it safe. * It wraps it into try/catch, acquires GIL lock and log errors. */ template <typename Sig, typename... ArgsT> auto callPythonCodeSafe(const std::function<Sig>& fn, ArgsT&&... args) { using ReturnT = cpp::invoke_result_t<decltype(fn), ArgsT...>; try { if (fn) { py::gil_scoped_acquire gilLock; return fn(std::forward<ArgsT>(args)...); } } catch (const py::error_already_set& e) { CARB_LOG_ERROR("%s", e.what()); } catch (const std::runtime_error& e) { CARB_LOG_ERROR("%s", e.what()); } return ReturnT(); } /** * Helper class implement scripting callbacks. * It extends ScriptCallbackRegistry to provide facility to make safe calls of python callback. It adds GIL lock and * error handling. ScriptCallbackRegistryPython::call can be passed into C API as C function, as long as FuncT* is * passed into as userData. */ template <class KeyT, typename ReturnT, typename... Args> class ScriptCallbackRegistryPython : public ScriptCallbackRegistry<KeyT, ReturnT, Args...> { public: using typename ScriptCallbackRegistry<KeyT, ReturnT, Args...>::FuncT; static ReturnT call(Args... args, void* userData) { return callTyped((FuncT*)userData, std::forward<Args>(args)...); } static ReturnT callTyped(FuncT* f, Args&&... args) { return callPythonCodeSafe(*f, std::forward<Args>(args)...); } }; /** * Holds subscription for python in RAII way. Unsubscribe function is called when destroyed. */ class Subscription { public: template <class Unsubscribe> explicit Subscription(Unsubscribe&& unsubscribe) : m_unsubscribeFn(std::forward<Unsubscribe>(unsubscribe)) { } void unsubscribe() { if (m_unsubscribeFn) { m_unsubscribeFn(); m_unsubscribeFn = nullptr; } } ~Subscription() { unsubscribe(); } private: std::function<void()> m_unsubscribeFn; }; template <class Ret, class... Args> class PyAdapter { using Function = std::function<Ret(Args...)>; Function m_func; struct ScopedDestroy { PyAdapter* m_callable; ScopedDestroy(PyAdapter* callable) : m_callable(callable) { } ~ScopedDestroy() { delete m_callable; } }; public: PyAdapter(Function&& func) : m_func(std::move(func)) { } template <class... Args2> auto call(Args2&&... args) { using ReturnType = cpp::invoke_result_t<Function, Args2...>; try { py::gil_scoped_acquire gil; if (m_func) { return cpp::invoke(std::move(m_func), std::forward<Args2>(args)...); } } catch (const py::error_already_set& e) { CARB_LOG_ERROR("%s", e.what()); } catch (const std::runtime_error& e) { CARB_LOG_ERROR("%s", e.what()); } py::gil_scoped_acquire gil; // Hold the GIL while constructing whatever return type return ReturnType(); } // Direct adapter to Carbonite callback when userData is the last argument, the PyAdapter* is the userdata, and // multiple calls to this adapter are desired. The adapter must be deleted with `delete` or `destroy()` later. static auto adaptCallAndKeep(Args... args, void* user) { return static_cast<PyAdapter*>(user)->call(std::forward<Args>(args)...); } // Direct adapter to Carbonite callback when userData is the last argument, the PyAdapter* is the userdata, and // there will be only one call to the adapter. static auto adaptCallAndDestroy(Args... args, void* user) { PyAdapter* callable = static_cast<PyAdapter*>(user); ScopedDestroy scopedDestroy(callable); return callable->call(std::forward<Args>(args)...); } // Call the adapter with perfect forwarding and keep the adapter around for future calls. template <class... Args2> static auto callAndKeep(void* user, Args2&&... args) { return static_cast<PyAdapter*>(user)->call(std::forward<Args2>(args)...); } // Call the adapter with perfect forwarding and destroy the adapter. template <class... Args2> static auto callAndDestroy(void* user, Args2&&... args) { PyAdapter* callable = static_cast<PyAdapter*>(user); ScopedDestroy scopedDestroy(callable); return callable->call(std::forward<Args2>(args)...); } static void destroy(void* user) { delete static_cast<PyAdapter*>(user); } }; template <class Ret, class... Args> std::unique_ptr<PyAdapter<Ret, Args...>> createPyAdapter(std::function<Ret(Args...)>&& func) { return std::make_unique<PyAdapter<Ret, Args...>>(std::move(func)); } template <class Callback, class Subscribe, class Unsubscribe> std::shared_ptr<Subscription> createPySubscription(Callback&& func, Subscribe&& subscribe, Unsubscribe&& unsub) { auto callable = createPyAdapter(std::forward<Callback>(func)); using Callable = typename decltype(callable)::element_type; auto&& id = subscribe(Callable::adaptCallAndKeep, callable.get()); return std::make_shared<Subscription>( [unsub = std::forward<Unsubscribe>(unsub), id = std::move(id), callable = callable.release()] { unsub(id); delete callable; }); } /** * Set of helpers to pass std::function (from python bindings) in Carbonite interfaces. * Deprecated: use PyAdapter instead via createPyAdapter()/createPySubscription() */ template <typename ReturnT, typename... ArgsT> class FuncUtils { public: using StdFuncT = std::function<ReturnT(ArgsT...)>; using CallbackT = ReturnT (*)(ArgsT..., void*); static ReturnT callPythonCodeSafe(const std::function<ReturnT(ArgsT...)>& fn, ArgsT... args) { return carb::callPythonCodeSafe(fn, args...); } static ReturnT callbackWithUserData(ArgsT... args, void* userData) { StdFuncT* fn = (StdFuncT*)userData; if (fn) return callPythonCodeSafe(*fn, args...); else return ReturnT(); } static StdFuncT* createStdFuncCopy(const StdFuncT& fn) { return new StdFuncT(fn); } static void destroyStdFuncCopy(StdFuncT* fn) { delete fn; } /** * If you have std::function which calls into python code and an interface with pair of subscribe/unsubscribe * functions, this function: * 1. Prolong lifetime of std::function (and thus python callable) by making copy of it on heap. * 2. Subscribes to interface C-style subscribe function by passing this std::function as void* userData (and * calling it back safely) * 3. Wraps subscription id into Subscription class returned to python. Which holds subscription and * automatically unsubscribes when dead. */ template <class SubscriptionT> static std::shared_ptr<Subscription> buildSubscription(const StdFuncT& fn, SubscriptionT (*subscribeFn)(CallbackT, void*), void (*unsubscribeFn)(SubscriptionT)) { StdFuncT* funcCopy = new StdFuncT(fn); auto id = subscribeFn(callbackWithUserData, funcCopy); auto subscription = std::make_shared<Subscription>([=]() { unsubscribeFn(id); delete funcCopy; }); return subscription; } }; template <class T> struct StdFuncUtils; template <class R, class... Args> struct StdFuncUtils<std::function<R(Args...)>> : public FuncUtils<R, Args...> { }; template <class R, class... Args> struct StdFuncUtils<const std::function<R(Args...)>> : public FuncUtils<R, Args...> { }; template <class R, class... Args> struct StdFuncUtils<const std::function<R(Args...)>&> : public FuncUtils<R, Args...> { }; /** * Helper to wrap function that returns `IObject*` into the same function that returns stolen ObjectPtr<IObject> holder */ template <typename ReturnT, typename... Args> std::function<ReturnT(Args...)> wrapPythonCallback(std::function<ReturnT(Args...)>&& c) { return [c = std::move(c)](Args... args) -> ReturnT { return callPythonCodeSafe(c, std::forward<Args>(args)...); }; } } // namespace carb #ifdef DOXYGEN_BUILD /** * Macro that allows disabling pybind's use of RTTI to perform duck typing. * * Given a pointer, pybind uses RTTI to figure out the actual type of the pointer (e.g. given an `IObject*`, RTTI can be * used to figure out the pointer is really an `IWindow*`). once pybind knows the "real" type, is generates a PyObject * that contains wrappers for all of the "real" types methods. * * Unfortunately, RTTI is compiler dependent (not @rstref{ABI-safe <abi-compatibility>}) and we've disabled it in much * of our code. * * The `polymorphic_type_hook` specializations generated by this macro disables pybind from using RTTI to find the * "real" type of a pointer. this mean that when using our bindings in Python, you have to "cast" objects to access a * given interface. For example: * ```python * obj = func_that_returns_iobject() * win = IWindow(obj) # a cast. None is returned if the cast fails. * if win: * win->title = "hi" * ``` * * As an aside, since implementations can implement multiple interfaces and the actual implementations are hidden to * pybind (we create bindings for interfaces not implementations), the pybind "duck" typing approach was never going to * work for us. Said differently, some sort of "cast to this interface" was inevitable. * @param TYPE The type to disable Pythonic dynamic casting for. */ # define DISABLE_PYBIND11_DYNAMIC_CAST(TYPE) #else # define DISABLE_PYBIND11_DYNAMIC_CAST(TYPE) \ namespace pybind11 \ { \ template <> \ struct polymorphic_type_hook<TYPE> \ { \ static const void* get(const TYPE* src, const std::type_info*&) \ { \ return src; \ } \ }; \ template <typename itype> \ struct polymorphic_type_hook< \ itype, \ detail::enable_if_t<std::is_base_of<TYPE, itype>::value && !std::is_same<TYPE, itype>::value>> \ { \ static const void* get(const TYPE* src, const std::type_info*&) \ { \ return src; \ } \ }; \ } #endif DISABLE_PYBIND11_DYNAMIC_CAST(carb::IObject)
omniverse-code/kit/include/carb/RenderingTypes.h
// Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include "Types.h" namespace carb { /** * Defines a resource format. */ enum class Format { eUnknown, eR8_UNORM, eR8_SNORM, eR8_UINT, eR8_SINT, eRG8_UNORM, eRG8_SNORM, eRG8_UINT, eRG8_SINT, eBGRA8_UNORM, eBGRA8_SRGB, eRGBA8_UNORM, eRGBA8_SNORM, eRGBA8_UINT, eRGBA8_SINT, eRGBA8_SRGB, eR16_UNORM, eR16_SNORM, eR16_UINT, eR16_SINT, eR16_SFLOAT, eRG16_UNORM, eRG16_SNORM, eRG16_UINT, eRG16_SINT, eRG16_SFLOAT, eRGBA16_UNORM, eRGBA16_SNORM, eRGBA16_UINT, eRGBA16_SINT, eRGBA16_SFLOAT, eR32_UINT, eR32_SINT, eR32_SFLOAT, eRG32_UINT, eRG32_SINT, eRG32_SFLOAT, eRGB32_UINT, eRGB32_SINT, eRGB32_SFLOAT, eRGBA32_UINT, eRGBA32_SINT, eRGBA32_SFLOAT, eR10_G10_B10_A2_UNORM, eR10_G10_B10_A2_UINT, eR11_G11_B10_UFLOAT, eR9_G9_B9_E5_UFLOAT, eB5_G6_R5_UNORM, eB5_G5_R5_A1_UNORM, eBC1_RGBA_UNORM, eBC1_RGBA_SRGB, eBC2_RGBA_UNORM, eBC2_RGBA_SRGB, eBC3_RGBA_UNORM, eBC3_RGBA_SRGB, eBC4_R_UNORM, eBC4_R_SNORM, eBC5_RG_UNORM, eBC5_RG_SNORM, eBC6H_RGB_UFLOAT, eBC6H_RGB_SFLOAT, eBC7_RGBA_UNORM, eBC7_RGBA_SRGB, eD16_UNORM, eD24_UNORM_S8_UINT, eD32_SFLOAT, eD32_SFLOAT_S8_UINT_X24, // Formats for depth-stencil views eR24_UNORM_X8, eX24_R8_UINT, eX32_R8_UINT_X24, eR32_SFLOAT_X8_X24, // Formats for sampler-feedback eSAMPLER_FEEDBACK_MIN_MIP, eSAMPLER_FEEDBACK_MIP_REGION_USED, // Little-Endian Formats eABGR8_UNORM, eABGR8_SRGB, // Must be last eCount }; /** * Defines a sampling count for a resource. */ enum class SampleCount { e1x, e2x, e4x, e8x, e16x, e32x, e64x }; /** * Defines the presentation mode for the rendering system. */ enum class PresentMode : uint8_t { eNoTearing, //!< No tearing. eAllowTearing //!< Allow tearing. }; /** * Defines a descriptor for clearing color values. */ union ClearColorValueDesc { Color<float> rgba32f; Color<uint32_t> rgba32ui; Color<int32_t> rgba32i; }; /** * Defines a descriptor for clearing depth-stencil values. */ struct ClearDepthStencilValueDesc { float depth; uint32_t stencil; }; enum class TextureGamma { eDefault, ///< treat as linear for HDR formats, as sRGB for LDR formats (use e*_SRGB tex format or convert on load) eLinear, ///< treat as linear, leaves data unchanged eSRGB, ///< treat as sRGB, (use e*_SRGB texture format or convert on load) eCount }; } // namespace carb
omniverse-code/kit/include/carb/FrameworkUtils.h
// Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include "Framework.h" #include "extras/Path.h" #include <string> #include <unordered_set> #include <vector> namespace carb { /** * Get all registered plugins and collect folders they are located in. */ inline std::unordered_set<std::string> getPluginFolders() { Framework* framework = carb::getFramework(); std::vector<PluginDesc> plugins(framework->getPluginCount()); framework->getPlugins(plugins.data()); std::unordered_set<std::string> folders; for (const auto& desc : plugins) { extras::Path p(desc.libPath); const std::string& folder = p.getParent(); if (!folder.empty()) { folders.insert(folder); } } return folders; } } // namespace carb
omniverse-code/kit/include/carb/PluginUtils.h
// Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file //! //! @brief Utilities to ease the creation of Carbonite plugins. #pragma once #include "ClientUtils.h" #include "PluginInitializers.h" #include "PluginCoreUtils.h" namespace omni { namespace structuredlog { void addModulesSchemas() noexcept; } } // namespace omni //! Plugin helper macro to define boiler-plate code to register and unregister the plugin with various other components //! in the system (e.g. logging channels, profiler, localization, etc.). //! //! Do not directly call this macro, rather call @ref CARB_PLUGIN_IMPL() which will call this macro for you. #define CARB_DEFAULT_INITIALIZERS() \ CARB_EXPORT void carbOnPluginPreStartup() \ { \ carb::pluginInitialize(); \ omni::structuredlog::addModulesSchemas(); \ } \ \ CARB_EXPORT void carbOnPluginPostShutdown() \ { \ carb::pluginDeinitialize(); \ } //! Main macro to declare a plugin implementation where multiple interface versions are not required. //! //! Authors of Carbonite plugins must use this macro in exactly one compilation unit for their plugin to generate code //! expected by the Carbonite framework. //! //! @note Carbonite plugins can provide multiple versions of an interface to remain backwards compatible with apps and //! modules that are built against earlier versions of plugins. In order to do this, see \ref CARB_PLUGIN_IMPL_EX. //! //! In particular, this macro: //! //! - Defines global variables, such as @ref g_carbFramework. //! //! - Registers a default logging channel with @ref omni::log::ILog. //! //! - Adds boiler-plate code for @oni_overview interop. //! //! - Adds boiler-plate code for plugin startup, shutdown, and registration. (See @carb_framework_overview for more //! information). //! //! This macro must be used in the global namespace. A @ref carb::PluginImplDesc must be provided as well as all //! interfaces exported by this plugin. Each interface must be declared with @ref CARB_PLUGIN_INTERFACE. There must also //! exist a @ref fillInterface(InterfaceType&) function for each interface type that is exported by this plugin. //! A trailing semicolon is optional. //! //! Example: //! @code{.cpp} //! // Plugin Implementation Descriptor //! const carb::PluginImplDesc kPluginImpl{ "carb.windowing-glfw.plugin", "Windowing (glfw).", "NVIDIA", //! carb::PluginHotReload::eDisabled, "dev" }; //! //! // Generate boilerplate code //! CARB_PLUGIN_IMPL(kPluginImpl, carb::windowing::IWindowing, carb::windowing::IGLContext) //! //! // Construct the carb::windowing::IWindowing interface //! void fillInterface(carb::windowing::IWindowing& iface) { /* ... */ } //! //! // Construct the carb::windowing::IGLContext interface //! void fillInterface(carb::windowing::IGLContext& iface) { /* ... */ } //! @endcode //! //! See @carb_framework_overview and @carb_interfaces for more information on creating Carbonite plugins. //! //! @param impl The @ref carb::PluginImplDesc constant to be used as plugin description. //! //! @param ... One or more interface types to be implemented by the plugin. An interface is a `struct` or `class` with //! a use of @ref CARB_PLUGIN_INTERFACE() inside it. These interface types are constructed by a global function //! @ref fillInterface(InterfaceType&) that must exist in the plugin. See @ref fillInterface(InterfaceType&) for more //! information about interface construction and destruction. #define CARB_PLUGIN_IMPL(impl, ...) \ CARB_GLOBALS_EX(impl.name, impl.description) \ OMNI_MODULE_GLOBALS_FOR_PLUGIN() \ CARB_PLUGIN_IMPL_WITH_INIT_0_5(impl, __VA_ARGS__) /* for backwards compatibility */ \ CARB_PLUGIN_IMPL_WITH_INIT(impl, __VA_ARGS__) \ CARB_DEFAULT_INITIALIZERS() //! Main macro to declare a plugin implementation where multiple interface versions are required. //! //! Authors of Carbonite plugins must use this macro in exactly one compilation unit for their plugin to generate code //! expected by the Carbonite framework. //! //! @note This implementation macro allows Carbonite plugins to provide multiple versions of an interface in order to //! remain backwards compatible with apps and modules that are built against earlier versions of plugins. Every //! interface exported by the plugin must have a @ref fillInterface(carb::Version*, void*) function. //! //! In particular, this macro: //! //! - Defines global variables, such as @ref g_carbFramework. //! //! - Registers a default logging channel with @ref omni::log::ILog. //! //! - Adds boiler-plate code for @oni_overview interop. //! //! - Adds boiler-plate code for plugin startup, shutdown, and registration. (See @carb_framework_overview for more //! information). //! //! This macro must be used in the global namespace. A @ref carb::PluginImplDesc must be provided as well as all //! interfaces exported by this plugin. Each interface must be declared with @ref CARB_PLUGIN_INTERFACE. There must also //! exist a @ref fillInterface(carb::Version*, void*) function for each interface type that is exported by this plugin. //! A trailing semicolon is optional. //! //! Example: //! @code{.cpp} //! // Plugin Implementation Descriptor //! const carb::PluginImplDesc kPluginImpl{ "carb.windowing-glfw.plugin", "Windowing (glfw).", "NVIDIA", //! carb::PluginHotReload::eDisabled, "dev" }; //! //! // Generate boilerplate code //! CARB_PLUGIN_IMPL_EX(kPluginImpl, carb::windowing::IWindowing, carb::windowing::IGLContext) //! //! // Construct the carb::windowing::IWindowing interface //! template <> void fillInterface<carb::windowing::IWindowing>(carb::Version* v, void* iface) { /* ... */ } //! //! // Construct the carb::windowing::IGLContext interface //! template <> void fillInterface<carb::windowing::IGLContext>(carb::Version* v, void* iface) { /* ... */ } //! @endcode //! //! See @carb_framework_overview and @carb_interfaces for more information on creating Carbonite plugins. //! //! @param impl The @ref carb::PluginImplDesc constant to be used as plugin description. //! //! @param ... One or more interface types to be implemented by the plugin. An interface is a `struct` or `class` with //! a use of @ref CARB_PLUGIN_INTERFACE() inside it. These interface types are constructed by a global explicitly- //! specialized template function @ref fillInterface(carb::Version*, void*) that must exist in the plugin. See //! @ref fillInterface(carb::Version*, void*) for more information about interface construction and destruction. #define CARB_PLUGIN_IMPL_EX(impl, ...) \ CARB_GLOBALS_EX(impl.name, impl.description) \ OMNI_MODULE_GLOBALS_FOR_PLUGIN() \ CARB_PLUGIN_IMPL_WITH_INIT_EX(impl, __VA_ARGS__) \ CARB_PLUGIN_IMPL_WITH_INIT_0_5_EX(impl, __VA_ARGS__) /* for backwards compatibility */ \ CARB_DEFAULT_INITIALIZERS() /** * Macros to declare a plugin implementation dependencies. * * If a plugin lists an interface "A" as dependency it is guaranteed that `carb::Framework::acquireInterface<A>()` call * will return it, otherwise it can return `nullptr`. The Framework checks and resolves all dependencies before loading * the plugin. If the dependency cannot be loaded (i.e. no plugin satisfies the interface, or a circular load is * discovered) then the plugin will fail to load and `nullptr` will be returned from the * carb::Framework::acquireInterface() function. * * @note Circular dependencies can exist as long as they are not stated in the CARB_PLUGIN_IMPL_DEPS() macros. For * instance, assume plugins *Alpha*, *Beta*, and *Gamma*. *Alpha* is dependent on *Beta*; *Beta* is dependent on * *Gamma*. *Gamma* is dependent on *Alpha*, but cannot list *Alpha* in its CARB_PLUGIN_IMPL_DEPS() macro, nor * attempt to acquire and use it in *Gamma*'s carbOnPluginStartup() function. At a later point from within *Gamma*, the * desired interface from *Alpha* may be acquired and used. However, in terms of unload order, *Alpha* will be unloaded * first, followed by *Beta* and finally *Gamma*. In this case the *Gamma* carbOnPluginShutdown() function must account * for the fact that *Alpha* will already be unloaded. * * @param ... One or more interface types (e.g. `carb::settings::ISettings`) to list as dependencies for this plugin. */ #define CARB_PLUGIN_IMPL_DEPS(...) \ template <typename... Types> \ static void getPluginDepsTyped(struct carb::InterfaceDesc** deps, size_t* count) \ { \ static carb::InterfaceDesc depends[] = { Types::getInterfaceDesc()... }; \ *deps = depends; \ *count = sizeof(depends) / sizeof(depends[0]); \ } \ \ CARB_EXPORT void carbGetPluginDeps(struct carb::InterfaceDesc** deps, size_t* count) \ { \ getPluginDepsTyped<__VA_ARGS__>(deps, count); \ } /** * Macro to declare a plugin without dependencies. * * Calling this macro is not required if there are no dependencies. This macro exists to make your plugin more * readable. */ #define CARB_PLUGIN_IMPL_NO_DEPS() \ CARB_EXPORT void carbGetPluginDeps(struct carb::InterfaceDesc** deps, size_t* count) \ { \ *deps = nullptr; \ *count = 0; \ } /** * Macro to declare a "minimal" plugin. * * Plugins in the Carbonite ecosystem tend to depend on other plugins. For example, plugins often want to access * Carbonite's logging system via @ref carb::logging::ILogging. When calling @ref CARB_PLUGIN_IMPL, boiler-plate code * is injected to ensure the plugin can use these "common" plugins. * * This macro avoids taking dependencies on these "common" plugins. When calling this macro, only the "minimal" boiler * plate code is generated in order for the plugin to work. It's up to the developer to add additional code to make the * plugin compatible with any desired "common" plugin. * * Use of this macro is rare in Omniverse. */ #define CARB_PLUGIN_IMPL_MINIMAL(impl, ...) \ CARB_FRAMEWORK_GLOBALS(kPluginImpl.name) \ CARB_PLUGIN_IMPL_WITH_INIT(impl, __VA_ARGS__)
omniverse-code/kit/include/carb/Version.h
// Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file //! //! @brief Utilities for Carbonite version. #pragma once #include <cinttypes> #include <cstdint> #include <cstdio> #include <type_traits> // Note: Ideally this would be in Defines.h, but there is a weird circular dependency: // Defines.h -> assert/IAssert.h -> Interface.h -> Version.h //! A macro to ensure interop safety by assertion //! //! In order to have @rstref{interop safety <abi-compatibility>} a type must be //! <a href="https://en.cppreference.com/w/cpp/named_req/TriviallyCopyable">trivially-copyable</a> and conform to //! <a href="https://en.cppreference.com/w/cpp/named_req/StandardLayoutType">StandardLayoutType</a>. //! @param ... The Type to check #define CARB_ASSERT_INTEROP_SAFE(...) \ static_assert(std::is_standard_layout<__VA_ARGS__>::value, "Must have standard layout to be interop safe"); \ static_assert(std::is_trivially_copyable<__VA_ARGS__>::value, "Must be trivially copyable to be interop safe") namespace carb { /** * Defines a version consisting of a major and minor version. */ struct Version { uint32_t major; //!< The major version. uint32_t minor; //!< The minor version. }; CARB_ASSERT_INTEROP_SAFE(Version); /** * Less-than comparison operator. * * Compares two versions and reports true if the left version is lower than the right. * * @note The major and minor versions are compared independently. While the \a number `1.11` is less than the \a number * `1.9`, \a version `1.11` is considered to be higher, so `Version{ 1, 9 } < Version{ 1, 11 }` would be `true`. * @param lhs The version on the left side of the operation * @param rhs The version on the right side of the operation * @returns `true` if \p lhs is a lower version than \p rhs; `false` otherwise. */ constexpr bool operator<(const Version& lhs, const Version& rhs) noexcept { if (lhs.major == rhs.major) { return lhs.minor < rhs.minor; } return lhs.major < rhs.major; } /** * Less-than-or-equal comparison operator. * * Compares two versions and reports true if the left version is lower than or equal to the right. * * @note The major and minor versions are compared independently. While the \a number `1.11` is less than the \a number * `1.9`, \a version `1.11` is considered to be higher, so `Version{ 1, 9 } <= Version{ 1, 11 }` would be `true`. * @param lhs The version on the left side of the operation * @param rhs The version on the right side of the operation * @returns `true` if \p lhs is a version that is lower than or equal to \p rhs; `false` otherwise. */ constexpr bool operator<=(const Version& lhs, const Version& rhs) noexcept { if (lhs.major == rhs.major) { return lhs.minor <= rhs.minor; } return lhs.major < rhs.major; } /** * Equality operator. * * Compares two versions and reports true if the left version and the right version are equal. * * @param lhs The version on the left side of the operation * @param rhs The version on the right side of the operation * @returns `true` if \p lhs is equal to \p rhs; `false` otherwise. */ constexpr bool operator==(const Version& lhs, const Version& rhs) noexcept { return lhs.major == rhs.major && lhs.minor == rhs.minor; } /** * Inequality operator. * * Compares two versions and reports true if the left version and the right version are not equal. * * @param lhs The version on the left side of the operation * @param rhs The version on the right side of the operation * @returns `true` if \p lhs is not equal to \p rhs; `false` otherwise. */ constexpr bool operator!=(const Version& lhs, const Version& rhs) noexcept { return !(lhs == rhs); } /** * Checks two versions to see if they are semantically compatible. * * For more information on semantic versioning, see https://semver.org/. * * @warning A major version of `0` is considered to be the "development/experimental" version and `0.x` minor versions * may be but are not required to be compatible with each other. This function will consider \p minimum version `0.x` to * be semantically compatible to different \p candidate version `0.y`, but will emit a warning to `stderr` if a \p name * is provided. * * @param name An optional name that, if provided, will enable the warning message to `stderr` for `0.x` versions * mentioned above. * @param minimum The minimum version required. This is typically the version being tested. * @param candidate The version offered. This is typically the version being tested against. * @retval true If \p minimum and \p candidate share the same major version and \p candidate has a minor version that is * greater-than or equal to the minor version in \p minimum. * @retval false If \p minimum and \p candidate have different major versions or \p candidate has a minor version that * is lower than the minor version requested in \p minimum. */ inline bool isVersionSemanticallyCompatible(const char* name, const Version& minimum, const Version& candidate) { if (minimum.major != candidate.major) { return false; } else if (minimum.major == 0) { // Need to special case when major is equal but zero, then any difference in minor makes them // incompatible. See http://semver.org for details. // the case of version 0.x (major of 0), we are only going to "warn" the user of possible // incompatibility when a user asks for 0.x and we have an implementation 0.y (where y > x). // see https://nvidia-omniverse.atlassian.net/browse/CC-249 if (minimum.minor > candidate.minor) { return false; } else if (minimum.minor < candidate.minor && name) { // using CARB_LOG maybe pointless, as logging may not be set up yet. fprintf(stderr, "Warning: Possible version incompatibility. Attempting to load %s with version v%" PRIu32 ".%" PRIu32 " against v%" PRIu32 ".%" PRIu32 ".\n", name, candidate.major, candidate.minor, minimum.major, minimum.minor); } } else if (minimum.minor > candidate.minor) { return false; } return true; } } // namespace carb
omniverse-code/kit/include/carb/BindingsPythonTypes.h
// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file //! //! @brief A collection of opaque type definitions needed by multiple Python bindings #pragma once #ifndef DOXYGEN_BUILD namespace carb { namespace input { struct Mouse { }; struct Keyboard { }; } // namespace input namespace windowing { struct Window { }; } // namespace windowing } // namespace carb #endif
omniverse-code/kit/include/carb/RStringEnum.inl
// Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // This file is purposely missing #pragma once or any sort of include guard as it is included multiple times. //! @file //! //! @brief Registered String utility enum values. See carb::RString for more info. #if !defined(RSTRINGENUM_FROM_RSTRING_H) && !defined(RSTRINGENUM_FROM_RSTRING_INL) # error This file may only be included from RString.h or RString.inl. #endif #if (defined(__INTELLISENSE__) && defined(RSTRINGENUM_FROM_RSTRING_H)) || !defined(ENTRY) || defined(DOXYGEN_BUILD) namespace carb { //! The maximum number of static RString values. Values over this amount are guaranteed to be dynamic. constexpr size_t kMaxStaticRString = 500; # ifndef DOXYGEN_SHOULD_SKIP_THIS # define ENTRY(index, name) RS_##name = index, # define EMPTY_ENTRY(index, name) name = index, # define BUILDING_ENUM 1 # endif //! Enum values for pre-defined registered strings. enum class eRString : unsigned { #else # define BUILDING_ENUM 0 #endif // clang-format off // For step 4 in the Increasing Version checklist in RStringInternals.inl, copy the block below into the saved-off // version of RStringInternals.inl // vvvvvvvvvv EMPTY_ENTRY(0, Empty) //!< Default static registered string for unassigned RString values. Specifically missing the //!< RS_ prefix because the string does not match the enum name in case RS_Empty is added //!< later. ENTRY(1, RString) //!< Static registered string describing the RString class. ENTRY(2, carb) //!< Static registered string describing the carb namespace. ENTRY(3, omni) //!< Static registered string describing the omni namespace. ENTRY(4, Carbonite) //!< Static registered string "Carbonite". ENTRY(5, Omniverse) //!< Static registered string "Omniverse". ENTRY(6, None) //!< Static registered string "None". ENTRY(7, null) //!< Static registered string "null". ENTRY(8, bool) //!< Static registered string "bool". ENTRY(9, uint8) //!< Static registered string "uint8". ENTRY(10, uint16) //!< Static registered string "uint16". ENTRY(11, uint32) //!< Static registered string "uint32". ENTRY(12, uint64) //!< Static registered string "uint64". ENTRY(13, int8) //!< Static registered string "int8". ENTRY(14, int16) //!< Static registered string "int16". ENTRY(15, int32) //!< Static registered string "int32". ENTRY(16, int64) //!< Static registered string "int64". ENTRY(17, float) //!< Static registered string "float". ENTRY(18, double) //!< Static registered string "double". ENTRY(19, string) //!< Static registered string "string". ENTRY(20, charptr) //!< Static registered string "charptr". ENTRY(21, dictionary) //!< Static registered string "dictionary". ENTRY(22, variant_pair) //!< Static registered string "variant_pair". ENTRY(23, variant_array) //!< Static registered string "variant_array". ENTRY(24, RStringU) //!< Static registered string "RStringU". ENTRY(25, RStringKey) //!< Static registered string "RStringKey". ENTRY(26, RStringUKey) //!< Static registered string "RStringUKey". ENTRY(27, variant_map) //!< Static registered string "variant_map". // ^^^^^^^^^^ // For step 4 in the Increasing Version checklist in RStringInternals.inl, copy the block above into the saved-off // version of RStringInternals.inl // clang-format on #if BUILDING_ENUM RS_Max //!< Must be the last value. }; static_assert(unsigned(eRString::RS_Max) <= kMaxStaticRString, "Too many static RString values!"); # undef ENTRY # undef EMPTY_ENTRY } // namespace carb #endif #undef BUILDING_ENUM
omniverse-code/kit/include/carb/RStringInternals.inl
// Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #include "extras/Library.h" #include "extras/SharedMemory.h" #include "cpp/Atomic.h" #include "math/Util.h" #include "memory/Util.h" #include "extras/ScopeExit.h" #include "thread/Util.h" #if CARB_PLATFORM_WINDOWS # include "CarbWindows.h" #else # include <sys/mman.h> # include <unistd.h> #endif #include <inttypes.h> namespace carb { namespace detail { namespace rstring { // Make sure everything is packed to 8 bytes. This is important since different modules can link this code but all // instances of the code have to work with the same virtual memory. #pragma pack(push, 8) CARB_IGNOREWARNING_MSC_WITH_PUSH(4200) // nonstandard extension used: zero-sized array in struct/union struct Rec { Rec* m_next; uint32_t m_stringId; uint32_t m_stringLen : 31; uint32_t m_authority : 1; size_t const m_uncasedHash; size_t m_hash{ 0 }; char m_string[0]; // Actual size is m_stringLen + 1 Rec(Rec* next, uint32_t stringId, uint32_t stringLen, bool authority, size_t uncasedHash, const char* s) : m_next(next), m_stringId(stringId), m_stringLen(stringLen), m_authority(authority), m_uncasedHash(uncasedHash) { memcpy(m_string, s, stringLen); m_string[stringLen] = '\0'; } }; CARB_IGNOREWARNING_MSC_POP struct MemoryAlloc { MemoryAlloc* m_next; size_t m_size; }; enum LockState : uint8_t { Unlocked, Locked, LockedMaybeWaiting, }; constexpr eRString kNoHint = eRString(-1); namespace versioned { // NOTE NOTE NOTE: These constants should only be used in the initializer data for struct Data, below. Since a different // module could have constructed Data, we need to read the appropriate values out of the Data struct itself at runtime. // version 1: initial release // version 2: added memory add/remove handlers // version 3: linked list of Internals structures for memory tracking constexpr uint8_t Version = 3; // Checklist for increasing Version: // 1. In premake5.lua, add a `define_rstringversiontest(X)` call where X is your new version number. // 2. Copy this file (RStringInternals.inl) to a new directory `X` under *source/tests/plugins/carb/rstringversiontest* // where `X` is your new version number. // 3. In the new copy of RStringInternals.inl produced in step 2, change the `rstring` namespace to `rstring_X` where // `X` is your new version number. // 4. In the new copy of RStringInternals.inl produced in step 2, remove the line that is: // #include "RStringEnum.inl" // and replace it with the EMPTY_ENTRY() and ENTRY() definitions currently present in RStringEnum.inl // These values can ONLY be changed if the Version is changed constexpr size_t kNumHashBuckets = 2 << 10; // ~2k constexpr size_t kMaxEntries = 2 << 20; // ~2m constexpr size_t kEntriesPerChunk = 16 << 10; // ~16k constexpr size_t kNumChunks = kMaxEntries / kEntriesPerChunk; // 128 constexpr size_t kAllocSize = 64 << 10; // ~64k static_assert(math::isPowerOf2(kNumHashBuckets), "Hash table bucket count must be power of 2"); static_assert(math::isPowerOf2(kAllocSize), "Alloc size must be power of 2"); static_assert(kMaxStaticRString < kEntriesPerChunk, "All static entries must fit within first chunk"); } // namespace versioned using Bucket = Rec*; using HashTable = Rec**; using Chunk = Rec*; using ChunkList = Chunk*; using VisualizerType = ChunkList*; class Internals; // This is for *debugging only*. A Visual Studio visualizer exists in carb.natvis to allow looking up RString values // via this variable. Note that each module (executable or shared library) will have its own instance of this variable, // which is expected. This allows all modules that use RString to have a Visualizer variable present that Visual Studio // can use for debugging purposes. CARB_WEAKLINK VisualizerType volatile Visualizer; using OnMemoryChange = void (*)(const void*, size_t, void*); struct MemoryChangeNotifier { OnMemoryChange callback; void* user; bool operator==(const MemoryChangeNotifier& rhs) const { return callback == rhs.callback && user == rhs.user; } }; // This structure is mapped into memory and carefully versioned as each binary (DLL/EXE/shared object/etc) can open the // memory mapping and manipulate the data. Therefore, everything that uses this structure must agree on the layout and // size and changes to this must be done in a very careful manner. struct Data { uint8_t version{ versioned::Version }; // Byte offsets used: 0 - 1 std::atomic<LockState> lock{ Unlocked }; // 1 - 2 std::atomic_ushort initialized{ 0 }; // 2 - 4 size_t const MaxEntries{ versioned::kMaxEntries }; // 8 - 16 size_t const EntriesPerChunk{ versioned::kEntriesPerChunk }; // 16 - 24 size_t const StaticEntries{ kMaxStaticRString }; // 24 - 32 size_t AllocSize{ versioned::kAllocSize }; // 32 - 40 size_t nextIndex{ 0 }; // 40 - 48 MemoryAlloc* allocList{ nullptr }; // 48 - 56 uint8_t* mem{ nullptr }; // 56 - 64 uint8_t* memEnd{ nullptr }; // 64 - 72 size_t const NumHashBuckets{ versioned::kNumHashBuckets }; // 72 - 80 Bucket* hashTableBuckets{ nullptr }; // 80 - 88 size_t const ChunkListSize{ versioned::kNumChunks }; // 88 - 96 ChunkList* chunkLists{ nullptr }; // 96 - 104 // Don't change this count; add a new member instead. char loadingModule[256]; // 104 - 360 // Don't change this count; add a new member instead. MemoryChangeNotifier onMemoryChange[16]; // 360 - 616 // Linked list of all Internals objects in various modules Internals* head; // 616 - 624 Internals* tail; // 624 - 632 // NOTE: Always add new members here! }; // Size and member asserts static_assert(sizeof(MemoryChangeNotifier) == 16, "sizeof(MemoryChangeNotifier) may not change"); static_assert(offsetof(Data, version) == 0, "Member location and size may not change"); static_assert(offsetof(Data, lock) == 1, "Member location and size may not change"); static_assert(offsetof(Data, initialized) == 2, "Member location and size may not change"); static_assert(offsetof(Data, MaxEntries) == 8, "Member location and size may not change"); static_assert(offsetof(Data, EntriesPerChunk) == 16, "Member location and size may not change"); static_assert(offsetof(Data, StaticEntries) == 24, "Member location and size may not change"); static_assert(offsetof(Data, AllocSize) == 32, "Member location and size may not change"); static_assert(offsetof(Data, nextIndex) == 40, "Member location and size may not change"); static_assert(offsetof(Data, allocList) == 48, "Member location and size may not change"); static_assert(offsetof(Data, mem) == 56, "Member location and size may not change"); static_assert(offsetof(Data, memEnd) == 64, "Member location and size may not change"); static_assert(offsetof(Data, NumHashBuckets) == 72, "Member location and size may not change"); static_assert(offsetof(Data, hashTableBuckets) == 80, "Member location and size may not change"); static_assert(offsetof(Data, ChunkListSize) == 88, "Member location and size may not change"); static_assert(offsetof(Data, chunkLists) == 96, "Member location and size may not change"); static_assert(offsetof(Data, loadingModule) == 104, "Member location and size may not change"); static_assert(offsetof(Data, onMemoryChange) == 360, "Member location and size may not change"); static_assert(offsetof(Data, head) == 616, "Member location and size may not change"); static_assert(offsetof(Data, tail) == 624, "Member location and size may not change"); // This may change if new members are added to the end static_assert(sizeof(Data) == 632, "Please update this value to reflect new members. Make sure that version was" "increased and old versions accounted for. Existing members and their size may not be changed."); inline bool casedEqual(const char* str1, const char* str2, size_t len) { return std::memcmp(str1, str2, len) == 0; } inline bool uncasedEqual(const char* str1, const char* str2, size_t len) { const char* const str1end = str1 + len; while (str1 != str1end) { if (carb::tolower(*(str1++)) != carb::tolower(*(str2++))) return false; } return true; } inline int casedCompare(const char* str1, size_t len1, const char* str2, size_t len2) { int result = std::memcmp(str1, str2, ::carb_min(len1, len2)); if (result == 0 && len1 != len2) return len1 < len2 ? -1 : 1; return result; } inline int uncasedCompare(const char* str1, size_t len1, const char* str2, size_t len2) { size_t minlen = ::carb_min(len1, len2); for (; minlen != 0; --minlen, ++str1, ++str2) { signed char c1 = carb::tolower(*str1); signed char c2 = carb::tolower(*str2); int val = c1 - c2; if (val != 0) return val; } if (len1 != len2) return len1 < len2 ? -1 : 1; return 0; } class Internals { public: static Internals& get() { static Internals internals{}; return internals; } const Rec* at(uint32_t stringId) const { CARB_UNUSED(m_unused); // Don't need to hold the mutex for this size_t chunkList = stringId / m_data->EntriesPerChunk; size_t chunkListOffset = stringId % m_data->EntriesPerChunk; if (chunkList < m_data->ChunkListSize && m_data->chunkLists[chunkList]) { return m_data->chunkLists[chunkList][chunkListOffset]; } return nullptr; } const Rec* operator[](uint32_t stringId) const { return at(stringId); } uint32_t findOrAdd(const char* str, bool uncased, RStringOp op) { if (!str || *str == '\0') return uint32_t(eRString::Empty); return findOrAdd(kNoHint, str, std::strlen(str), uncased, op); } uint32_t findOrAdd(const char* str, size_t len, bool uncased, RStringOp op) { if (!str || len == 0) return uint32_t(eRString::Empty); return findOrAdd(kNoHint, str, len, uncased, op); } uint32_t convertUncased(uint32_t stringId) const { const Rec* rec = at(stringId); if (rec) { // This is already the case-insensitive authority. if (rec->m_authority) { return stringId; } // The uncased record should always be found. rec = hashTableFind(rec->m_string, rec->m_stringLen, true, rec->m_uncasedHash); CARB_ASSERT(rec); return rec->m_stringId; } return uint32_t(eRString::Empty); } size_t getHash(uint32_t stringId) { Rec* rec = at(stringId); if (!rec) return 0; carb::cpp::atomic_ref<size_t> hashRef(rec->m_hash); size_t hash = hashRef.load(std::memory_order_acquire); if (CARB_LIKELY(hash)) return hash; // The hash for this registered string hasn't been computed yet. If multiple threads enter this function // simultaneously, they should all compute the same value, so it doesn't matter if it's written multiple times. hash = carb::hashString(rec->m_string); hashRef.store(hash, std::memory_order_release); return hash; } bool addMemoryNotifier(OnMemoryChange callback, void* user, bool callForCurrent) { if (m_data->version >= 2 && callback) { lockMutex(); CARB_SCOPE_EXIT { unlockMutex(); }; size_t const kCount = CARB_COUNTOF(m_data->onMemoryChange); size_t i = 0; for (; i != kCount; ++i) { if (m_data->onMemoryChange[i].callback == nullptr) { m_data->onMemoryChange[i] = { callback, user }; break; } } if (i == kCount) { // No empty slots return false; } if (callForCurrent) { callback(m_data, sizeof(*m_data), user); // Report all registered visualizer variables if (m_data->version >= 3) { for (Internals* p = m_data->head; p; p = p->m_next) { callback(p->m_visualizer, sizeof(VisualizerType), user); } } else { // Report only our visualizer variable since we don't have a list to walk callback(m_visualizer, sizeof(VisualizerType), user); } // Report all allocations in the list for (MemoryAlloc* alloc = m_data->allocList; alloc; alloc = alloc->m_next) { callback(alloc, alloc->m_size, user); } // ChunkLists after the first one aren't included in the allocList, so add those separately for (size_t chunk = 1; chunk != m_data->ChunkListSize; ++chunk) { if (!m_data->chunkLists[chunk]) break; callback(m_data->chunkLists[chunk], sizeof(Chunk) * m_data->EntriesPerChunk, user); } } } return false; } void removeMemoryNotifier(OnMemoryChange callback, void* user) { if (m_data->version >= 2 && callback) { lockMutex(); CARB_SCOPE_EXIT { unlockMutex(); }; auto const end = std::find(m_data->onMemoryChange, m_data->onMemoryChange + CARB_COUNTOF(m_data->onMemoryChange), MemoryChangeNotifier{}); // Remove matching entries and fill to the end with empty entries. std::fill(std::remove(m_data->onMemoryChange, end, MemoryChangeNotifier{ callback, user }), end, MemoryChangeNotifier{}); } } void notifyQuickShutdown() { // The process is about to call _exit(), so close our shared memory regions. m_data = nullptr; m_view.reset(); m_shm.close(true); // force unlink of the shared memory } bool initializedByMe() const { return m_initializedByMe; } private: static bool validate(Data* pData) { // Either a shared object with an older version of this code exists and created the RString mapping, or // an existing file was found on disk from a previous crash and we're reusing the pid. If it's the former, // we want to use it gracefully. If it's the latter, we're going to erase it and do the new method. bool valid; // If initialization is in progress, wait for it to finish. However, this could be completely garbage memory // so we're going to put a time limit on it. NOTE: This used to use cpp::atomic::wait_for(), but this does not // work properly since different binaries map the shared memory to different addresses, and for the underlying // futex implementation to work properly the address must be unique. Therefore, we just spin for a bit. auto timeout = std::chrono::steady_clock::now() + std::chrono::seconds(5); while ((valid = pData->initialized.load(std::memory_order_acquire)) == false && std::chrono::steady_clock::now() < timeout) std::this_thread::yield(); valid = valid && memory::testReadable(pData->mem); valid = valid && memory::testReadable(pData->hashTableBuckets); valid = valid && memory::testReadable(pData->chunkLists); if (valid) { // Walk the alloc list MemoryAlloc* alloc = pData->allocList; while (alloc) { valid = memory::testReadable(alloc); if (!valid) break; alloc = alloc->m_next; } } if (valid) { // Walk the memory change notifier list if (pData->version >= 2) { auto const end = pData->onMemoryChange + CARB_COUNTOF(pData->onMemoryChange); for (auto p = pData->onMemoryChange; valid && p != end && p->callback; ++p) { valid = memory::testReadable((const void*)p->callback); } } } if (valid) { // Walk the internals list if (pData->version >= 3) { Internals* p = pData->head; while (p) { valid = memory::testReadable(p); if (!valid) break; p = p->m_next; } } } return valid; } void init() { // We created the memory, so it's our responsibility to initialize it. m_initializedByMe = true; new (m_data) Data{}; // Set the constructing module file name { const volatile void* addr = &Visualizer; auto libName = extras::getLibraryFilename(const_cast<void*>(addr)); auto len = ::carb_min(CARB_COUNTOF(m_data->loadingModule) - 1, libName.size()); memcpy(m_data->loadingModule, libName.data(), len); m_data->loadingModule[len] = '\0'; } size_t const allocGranularity = m_shm.getSystemAllocationGranularity(); CARB_ASSERT(math::isPowerOf2(allocGranularity)); // Round AllocSize up to allocation granularity m_data->AllocSize = (m_data->AllocSize + allocGranularity - 1) & -ptrdiff_t(allocGranularity); // Allocate everything needed initially from one chunk. Reserve extra bytes that will be used as Rec memory // once rounded up to allocation granularity. size_t sizeNeeded = sizeof(MemoryAlloc) + (sizeof(Bucket) * versioned::kNumHashBuckets) + (sizeof(ChunkList) * versioned::kNumChunks) + (sizeof(Rec*) * versioned::kEntriesPerChunk) + sizeof(Rec) + 1; // Round up to allocation granularity. sizeNeeded = (sizeNeeded + allocGranularity - 1) & -ptrdiff_t(allocGranularity); void* mem = sysAlloc(sizeNeeded); CARB_FATAL_UNLESS(mem, "Failed to allocate system memory for RString space"); // Don't need to notifyMemory() here because it's impossible that anything has registered at this point. Any // other threads will be waiting on m_data->initialized. uint8_t* bytes = static_cast<uint8_t*>(mem); m_data->memEnd = bytes + sizeNeeded; // Set all of the members m_data->allocList = new (bytes) MemoryAlloc{ m_data->allocList, sizeNeeded }; bytes += sizeof(MemoryAlloc); m_data->hashTableBuckets = reinterpret_cast<Bucket*>(bytes); bytes += (sizeof(Bucket) * versioned::kNumHashBuckets); // Set our Visualizer for debugging Visualizer = m_data->chunkLists = reinterpret_cast<ChunkList*>(bytes); bytes += (sizeof(ChunkList) * versioned::kNumChunks); m_data->chunkLists[0] = reinterpret_cast<Chunk*>(bytes); bytes += (sizeof(Chunk) * versioned::kEntriesPerChunk); m_data->mem = bytes; CARB_ASSERT(size_t(m_data->memEnd - m_data->mem) > sizeof(Rec)); // Should at least be able to fit one Rec struct Entry { eRString enumVal; const char* str; size_t len; }; #define EMPTY_ENTRY(a, b) { eRString(a), "", 0 }, #define ENTRY(a, b) { eRString(a), #b, CARB_COUNTOF(#b) - 1 }, static const Entry entries[] = { #define RSTRINGENUM_FROM_RSTRING_INL #include "RStringEnum.inl" #undef RSTRINGENUM_FROM_RSTRING_INL #undef ENTRY #undef EMPTY_ENTRY }; // Register the static strings for (size_t i = 0; i != CARB_COUNTOF(entries); ++i) { findOrAdd(entries[i].enumVal, entries[i].str, entries[i].len, false, RStringOp::eRegister); } m_data->nextIndex = kMaxStaticRString + 1; // Set up the linked list of Internal structures m_data->head = m_data->tail = this; m_next = m_prev = nullptr; // redundant // Last step: set initialized. Anyone spinning on `initialized` will wake. // NOTE: Older versions of RString used cpp::atomic::wait and notify_all(), but this doesn't work properly // because each binary will map the shared memory at a different address and the underlying futex system // requires a unique address to work properly. auto old = m_data->initialized.exchange(1, std::memory_order_release); CARB_FATAL_UNLESS(old == 0, "Initialization of internal data already performed!"); } Internals() { // Static checks static_assert(offsetof(Internals, m_version) == 0, "Member size and offset may not change"); static_assert(sizeof(m_version) == 1, "Member size and offset may not change"); static_assert(offsetof(Internals, m_initializedByMe) == 1, "Member size and offset may not change"); static_assert(sizeof(m_initializedByMe) == 1, "Member size and offset may not change"); static_assert(offsetof(Internals, m_next) == 8, "Member size and offset may not change"); static_assert(sizeof(m_next) == 8, "Member size and offset may not change"); static_assert(offsetof(Internals, m_prev) == 16, "Member size and offset may not change"); static_assert(sizeof(m_prev) == 8, "Member size and offset may not change"); static_assert(offsetof(Internals, m_visualizer) == 24, "Member size and offset may not change"); static_assert(sizeof(m_visualizer) == 8, "Member size and offset may not change"); process::ProcessId pid = this_process::getId(); char name[256]; // PID does not a good ID make. Ideally we would use this_process::getUniqueId(), but we cannot change the name // because we need to be backwards compatible with modules built with an old version of this code. Windows does // a good job of cleaning up named objects once all references to them expire. Since this is only used within // the context of a process, that means that we're very unlikely to run into a situation on Windows where the // data is invalid because we're reusing a PID. On Linux however, that's not the case. Linux doesn't // automatically clean up shared memory objects. This is unfortunately as it means that the shared memory region // that we opened could have been garbage from a different process that has crashed or did not shut down // cleanly. As the mapped region contains pointers that are only good when the process is running, we need to // validate the data and unlink it if it isn't valid. For Linux, to ensure that we have exclusive access, we're // also going to hold the same shared semaphore that SharedMemory uses. extras::formatString(name, CARB_COUNTOF(name), "carb-RStringInternals-%" OMNI_PRIpid, pid); auto result = extras::SharedMemory::Result::eOpened; uint32_t shmFlags = 0; #if CARB_PLATFORM_LINUX // Keep the global semaphore locked the whole time this is occurring. This will allow us to check the old shm // region and create a new one under the lock. extras::detail::NamedSemaphore sema{ extras::detail::getGlobalSemaphoreName() }; std::unique_lock<extras::detail::NamedSemaphore> guard(sema); shmFlags = extras::SharedMemory::fNoMutexLock; if (m_shm.open(name, sizeof(Data), shmFlags | extras::SharedMemory::fQuiet)) { m_view.reset(m_shm.createView()); if (!m_view) { guard.unlock(); // Don't crash with the global semaphore locked! CARB_FATAL_UNLESS(false, "Error while mapping shared memory %s", name); } m_data = static_cast<Data*>(m_view->getAddress()); if (!validate(m_data)) { m_data = nullptr; m_view.reset(); m_shm.close(true); // Force unlink since nothing could successfully use this shm object // Should now be able to create a new one if (!m_shm.create(name, sizeof(Data), shmFlags)) { guard.unlock(); // Don't crash with the global semaphore locked! CARB_FATAL_UNLESS(false, "Failed to create shared memory named %s", name); } result = extras::SharedMemory::Result::eCreated; } } #endif if (!m_shm.isOpen()) { result = m_shm.createOrOpen(name, sizeof(Data), shmFlags); } #if CARB_PLATFORM_LINUX // The rest of initialization can proceed without the global lock. guard.unlock(); #endif CARB_FATAL_UNLESS(result != extras::SharedMemory::eError, "Error while opening shared memory %s", name); m_view.reset(m_shm.createView()); CARB_FATAL_UNLESS(m_view, "Error while mapping shared memory %s", name); m_data = static_cast<Data*>(m_view->getAddress()); if (result == extras::SharedMemory::eCreated) { // We created the shm object, so it's our job to initialize init(); } else { // Wait until initialized is non-zero. NOTE: Previously this used a cpp::atomic::wait(), but this will not // work properly because different binaries map the shared memory to different addresses, so `*this` of the // atomic variable varies and the underlying futex may not wake all waiters. Therefore, just spin and wait // for initialization. while (!m_data->initialized.load(std::memory_order_acquire)) std::this_thread::yield(); CARB_FATAL_UNLESS( m_data->StaticEntries >= kMaxStaticRString, "RString: version mismatch: this module expects static RString entries that the loading module (%s) is not aware of. Please re-build the loading module with the latest version of Carbonite.", m_data->loadingModule); lockMutex(); // Set our Visualizer for debugging Visualizer = m_data->chunkLists; // Add ourself to the internal list if correct version. If we're working with an older version, next and // prev will remain nullptr. if (m_data->version >= 3) { m_prev = m_data->tail; if (m_prev) { m_data->tail = m_prev->m_next = this; } else { m_data->head = m_data->tail = this; } } // Notify about our visualizer notifyMemory(m_visualizer, sizeof(VisualizerType)); unlockMutex(); } } ~Internals() { // We should only get here if notifyQuickShutdown() was not called. This assert fires if it was. CARB_ASSERT(m_data); lockMutex(); // Remove ourself from the linked list if we were registered into it if (m_data->version >= 3) { if (m_next) m_next->m_prev = m_prev; else { CARB_ASSERT(m_data->tail == this); m_data->tail = m_prev; } if (m_prev) m_prev->m_next = m_next; else { CARB_ASSERT(m_data->head == this); m_data->head = m_next; } m_next = m_prev = nullptr; } // Remove our Visualizer from memory registration notifyMemory(m_visualizer, 0); unlockMutex(); // Close our reference to the mapping and shared memory, but leak any memory created with sysAlloc() since it // may still be in use by other modules within the process. } Rec* at(uint32_t stringId) { // Don't need to hold the mutex for this size_t chunkList = stringId / m_data->EntriesPerChunk; size_t chunkListOffset = stringId % m_data->EntriesPerChunk; if (chunkList < m_data->ChunkListSize && m_data->chunkLists[chunkList]) { return m_data->chunkLists[chunkList][chunkListOffset]; } return nullptr; } void* sysAlloc(size_t size) { // Allocate memory directly from the system. This is necessary as different modules can have different heaps and // we want memory that won't be affected when modules are unloaded. #if CARB_PLATFORM_WINDOWS return ::VirtualAlloc(nullptr, size, CARBWIN_MEM_COMMIT | CARBWIN_MEM_RESERVE, CARBWIN_PAGE_READWRITE); #else return ::mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); #endif } void sysFree(void* mem, size_t size) { #if CARB_PLATFORM_WINDOWS CARB_UNUSED(size); ::VirtualFree(mem, 0, CARBWIN_MEM_RELEASE); #else ::munmap(mem, size); #endif } void notifyMemory(void* mem, size_t size) { // Assumes that the mutex is locked before calling if (m_data->version >= 2) { for (size_t i = 0; i != CARB_COUNTOF(m_data->onMemoryChange); ++i) { auto& onMemoryChange = m_data->onMemoryChange[i]; if (!onMemoryChange.callback) break; onMemoryChange.callback(mem, size, onMemoryChange.user); } } } void lockMutex() { LockState state = Unlocked; if (CARB_UNLIKELY(!m_data->lock.compare_exchange_strong( state, Locked, std::memory_order_acquire, std::memory_order_relaxed))) { if (state == LockedMaybeWaiting) { // Failed to lock and need to wait // NOTE: This used to use cpp::atomic::wait(), but this does not work properly since each binary maps // the shared memory to a different address, and the underlying futex needs a unique address. Therefore, // we just spin with a backoff as a workaround. The `LockedMaybeWaiting` state is a remnant from that // era, but we maintain it for older versions of RString that may still expect it to be used. this_thread::spinWaitWithBackoff( [&] { return m_data->lock.load(std::memory_order_acquire) != LockedMaybeWaiting; }); } while (m_data->lock.exchange(LockedMaybeWaiting, std::memory_order_acquire) != Unlocked) { this_thread::spinWaitWithBackoff( [&] { return m_data->lock.load(std::memory_order_acquire) != LockedMaybeWaiting; }); } } // Now inside the lock. } void unlockMutex() { // Unlock the mutex. Older versions of RString used cpp::atomic::notify_one(), but this doesn't work // properly for the reasons mentioned in lockMutex(). m_data->lock.store(Unlocked, std::memory_order_release); } Rec* hashTableFind(const char* str, size_t len, bool uncased, size_t const uncasedHash) const { // Load from the bucket head with an atomic op because this is not called under the lock and another thread // could be modifying the table (under the lock) in findOrAdd(), below. This operation synchronizes-with the // store in findOrAdd(). Rec* rec = carb::cpp::atomic_ref<Rec*>(m_data->hashTableBuckets[uncasedHash & (m_data->NumHashBuckets - 1)]) .load(std::memory_order_acquire); for (; rec; rec = rec->m_next) { if (rec->m_uncasedHash == uncasedHash && len == rec->m_stringLen) { if ((!uncased && casedEqual(str, rec->m_string, len)) || (uncased && rec->m_authority && uncasedEqual(str, rec->m_string, len))) { // Found in the hash table return rec; } } } return nullptr; } uint32_t findOrAdd(eRString enumVal, const char* str, size_t const len, bool uncased, RStringOp op) { CARB_ASSERT(str); CARB_ASSERT(unsigned(enumVal) <= kMaxStaticRString || enumVal == kNoHint); size_t const uncasedHash = carb::hashLowercaseBuffer(str, len); // Check the hash-table for an existing entry. We don't need to lock to do this because nothing is ever deleted. // Also this can happen during initialization, but only in the thread/module constructing Data. All other // threads will be blocked on initializing or constructing the static Internals. Rec* rec = hashTableFind(str, len, uncased, uncasedHash); if (rec) { return rec->m_stringId; } // Not found in hash table. Bail if we're only doing a find. if (op == RStringOp::eFindExisting) { return uint32_t(eRString::Empty); } // Now need the lock. Make sure to unlock when we leave scope. lockMutex(); CARB_SCOPE_EXIT { unlockMutex(); }; // Search the hash table again as it could have been inserted by a different thread under the lock, but we don't // expect this to be the case. So we do a broader search to see if there's a case-insensitive "authority" // already. If there isn't then the new one that we're adding will become the authority. Rec* authority = nullptr; Rec*& pBucketHead = m_data->hashTableBuckets[uncasedHash & (m_data->NumHashBuckets - 1)]; for (rec = pBucketHead; rec; rec = rec->m_next) { if (rec->m_uncasedHash == uncasedHash && len == rec->m_stringLen) { // We're now looking for a case-insensitive authority, so do an case-insensitive check. if (uncasedEqual(str, rec->m_string, len)) { if (rec->m_authority) { CARB_ASSERT(!authority); // Should only be one. authority = rec; if (uncased) { // Unlikely case, but the one we wanted was added by another thread since we didn't find it // earlier when not under the lock. return rec->m_stringId; } } if (!uncased && casedEqual(str, rec->m_string, len)) { // Unlikely case, but the exact match we wanted was added by another thread since we didn't find // it earlier when not under the lock. return rec->m_stringId; } } } } Rec** ppRec; size_t index = size_t(enumVal); if (enumVal == kNoHint) { index = m_data->nextIndex++; // Make sure that we have the chunklist for this index size_t chunkList = index / m_data->EntriesPerChunk; CARB_FATAL_UNLESS(chunkList < m_data->ChunkListSize, "Too many registered strings!"); if (!m_data->chunkLists[chunkList]) { // Allocate a chunk list const static size_t kAllocSize = sizeof(Chunk) * m_data->EntriesPerChunk; m_data->chunkLists[chunkList] = static_cast<ChunkList>(sysAlloc(kAllocSize)); CARB_FATAL_UNLESS(m_data->chunkLists[chunkList], "Failed to allocate ChunkList!"); notifyMemory(m_data->chunkLists[chunkList], kAllocSize); } size_t chunkListOffset = index % m_data->EntriesPerChunk; ppRec = &m_data->chunkLists[chunkList][chunkListOffset]; } else { // Static strings always fit within the first chunk (static_assert'd above). ppRec = &m_data->chunkLists[0][index]; } // Figure out how much space we need for the string. Rec ends with a zero-length string so allocate extra space // for the string and NUL terminator and round up to Rec's alignment. size_t sizeNeeded = sizeof(Rec) + len + 1; // Round up to Rec alignment sizeNeeded = (sizeNeeded + (alignof(Rec) - 1)) & -ptrdiff_t(alignof(Rec)); // Can we fit in the current memory block? If not, we need to allocate a new block. Unfortunately, this means // that the bit of memory remaining in the current memory block is unused. if (size_t(m_data->memEnd - m_data->mem) < sizeNeeded) { CARB_FATAL_UNLESS(sizeNeeded < (m_data->AllocSize - sizeof(MemoryAlloc)), "Trying to register massive string of size %zu!", len); uint8_t* bytes = static_cast<uint8_t*>(sysAlloc(m_data->AllocSize)); CARB_FATAL_UNLESS(bytes, "Memory allocation failed"); notifyMemory(bytes, m_data->AllocSize); m_data->memEnd = bytes + m_data->AllocSize; m_data->allocList = new (bytes) MemoryAlloc{ m_data->allocList, m_data->AllocSize }; bytes += sizeof(MemoryAlloc); m_data->mem = bytes; } // If we don't have an existing authority, we want this new Rec to be the uncased authority. *ppRec = new (m_data->mem) Rec(pBucketHead, uint32_t(index), uint32_t(len), !authority, uncasedHash, str); m_data->mem += sizeNeeded; CARB_ASSERT(m_data->mem <= m_data->memEnd); // Add to the hash table. Do this with an atomic op even though we're under lock because other threads can be // walking the hash table without the lock. This operation synchronizes-with findHashTable(). carb::cpp::atomic_ref<Rec*>(pBucketHead).store(*ppRec, std::memory_order_release); return (*ppRec)->m_stringId; } // These members may not change and are tied to versioned::Version as they can be changed by other instances of // Internal in other modules. uint8_t m_version{ versioned::Version }; // 0-1 bool m_initializedByMe{ false }; // 1-2 uint8_t m_unused[6]{}; // 2-8 Internals* m_next{ nullptr }; // 8-16 Internals* m_prev{ nullptr }; // 16-24 VisualizerType* m_visualizer{ const_cast<VisualizerType*>(&Visualizer) }; // 24-32 // New versioned members must be added here. // These members can vary carb::extras::SharedMemory m_shm; std::unique_ptr<carb::extras::SharedMemory::View> m_view; Data* m_data; }; #pragma pack(pop) } // namespace rstring } // namespace detail } // namespace carb
omniverse-code/kit/include/carb/FindPlugins.h
// Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! \file //! \brief Utilities for finding plugins. #pragma once #include "Framework.h" #include "extras/Library.h" #include "filesystem/IFileSystem.h" #include "filesystem/FindFiles.h" #include "logging/Log.h" #include "../omni/str/Wildcard.h" #include <cstring> namespace carb { //! Callback that is called when a candidate plugin file is located. //! @param canonical The canonical name of the file. //! @param reloadable Indicates that the filename matches a pattern in \ref FindPluginsArgs::reloadableFileWildcards. //! @param context The `onMatchedContext` from \ref FindPluginsArgs. using FindPluginsOnMatchedFn = void(const char* canonical, bool reloadable, void* context); //! Arguments that are passed to \ref findPlugins(). struct FindPluginsArgs { //! Search folders to look for plugins in. //! //! This may contain relative or absolute paths. All relative paths will be resolved relative to //! \ref carb::filesystem::IFileSystem::getAppDirectoryPath(), not the current working directory. Absolute paths //! in the list will be searched directly. //! //! If search paths configuration is invalid (e.g. search paths count is zero), the fall-back values are taken from //! the default plugin desc. //! //! May be nullptr. const char* const* searchPaths; size_t searchPathCount; //!< Number of entries in `searchPaths`. May be 0. //! Search the given paths recursively if `true`. bool searchRecursive; //! Filename wildcards to select loaded files. `*` and `?` can be used, e.g. "carb.*.pl?gin" //! //! If nullptr, a reasonable default is used. const char* const* loadedFileWildcards; size_t loadedFileWildcardCount; //!< Number of entries in `loadedFileWildcards`. May be 0. //! Filename wildcards to mark loaded files as reloadable. Framework will treat them specially to allow //! overwriting source plugins and will monitor them for changes. //! //! May be nullptr. const char* const* reloadableFileWildcards; size_t reloadableFileWildcardCount; //!< Number of entries in `reloadableFileWildcards`. May be 0. //! Filename wildcards to select excluded files. `*` and `?` can be used. //! //! May be nullptr. const char* const* excludedFileWildcards; size_t excludedFileWildcardCount; //!< Number of entries in `excludedFileWildcards`. May be 0. //! Callback when a file is matched but not excluded. //! //! @warning Must not be nullptr. FindPluginsOnMatchedFn* onMatched; void* onMatchedContext; //!< Context for onMatched. May be nullptr. //! Callback when a file is matched and excluded. //! //! May be nullptr. filesystem::FindFilesOnExcludedFn* onExcluded; void* onExcludedContext; //!< Context for onExcluded. May be nullptr. //! Callback when a file is not match one of the "loadedFileWildcard" patterns //! //! May be nullptr. filesystem::FindFilesOnSkippedFn* onSkipped; void* onSkippedContext; //!< Context for onSkipped. May be nullptr. //! Callback invoked before searching one of the given directories. //! //! May be nullptr. filesystem::FindFilesOnSearchPathFn* onSearchPath; void* onSearchPathContext; //!< Context for onSearchPath. May be nullptr. //! IFileSystem object to use to walk the file system. //! //! If nullptr, tryAcquireInterface<IFileSystem> is called. filesystem::IFileSystem* fs; }; #ifndef DOXYGEN_BUILD namespace detail { inline bool caseInsensitiveEndsWith(const char* str, const char* tail) { const size_t strLen = std::strlen(str); const size_t tailLen = std::strlen(tail); // String should be at least as long as tail if (strLen < tailLen) { return false; } // Compare with tail, character by character for (size_t i = 0; i < tailLen; ++i) { // Tail is assumed to already be lowercase if (tail[tailLen - i - 1] != std::tolower(str[strLen - i - 1])) { return false; } } return true; } } // namespace detail #endif //! Helper function to find plugins in a given list of search paths. //! //! See \ref FindPluginsArgs for argument documentation. //! //! When finding plugins, the following assumptions are made: //! //! * The file's extension is ignored. //! * On Linux, the "lib" prefix is ignored. //! * Tokens such as ${MY_ENV_VAR} in a search path is replaced with the corresponding env var. //! //! \param inArgs The arguments to use. //! \returns `true` if the filesystem was searched; `false` otherwise (i.e. bad args). inline bool findPlugins(const FindPluginsArgs& inArgs) noexcept { filesystem::FindFilesArgs args{}; args.searchPaths = inArgs.searchPaths; args.searchPathsCount = uint32_t(inArgs.searchPathCount); PluginLoadingDesc defaultPluginDesc = PluginLoadingDesc::getDefault(); if (!args.searchPaths || (0 == args.searchPathsCount)) { // If search path count it not specified, fall back to the default desc search paths args.searchPaths = defaultPluginDesc.searchPaths; args.searchPathsCount = uint32_t(defaultPluginDesc.searchPathCount); } args.matchWildcards = inArgs.loadedFileWildcards; args.matchWildcardsCount = uint32_t(inArgs.loadedFileWildcardCount); args.excludeWildcards = inArgs.excludedFileWildcards; args.excludeWildcardsCount = uint32_t(inArgs.excludedFileWildcardCount); #if CARB_PLATFORM_LINUX || CARB_PLATFORM_MACOS constexpr const char* const kIgnorePrefixes[] = { "lib" }; constexpr uint32_t kIgnorePrefixesCount = 1; #elif CARB_PLATFORM_WINDOWS constexpr const char* const* kIgnorePrefixes = nullptr; constexpr uint32_t kIgnorePrefixesCount = 0; #else CARB_UNSUPPORTED_PLATFORM(); #endif args.ignorePrefixes = kIgnorePrefixes; args.ignorePrefixesCount = kIgnorePrefixesCount; args.fs = inArgs.fs; // to avoid the expensive filename canonicalization and pattern matching, we do a quick check to make sure the // extension is for a plugin args.onFilterNonCanonical = [](const char* path, void*) { if (detail::caseInsensitiveEndsWith(path, carb::extras::getDefaultLibraryExtension())) { return filesystem::WalkAction::eContinue; // could be a plugin (i.e. correct .ext) } else { return filesystem::WalkAction::eSkip; // not a plug .ext. skip } }; args.onMatched = [](const char* canonical, void* context) { auto inArgs = static_cast<FindPluginsArgs*>(context); bool reloadable = false; if (inArgs->reloadableFileWildcards && inArgs->reloadableFileWildcardCount) { extras::Path path(canonical); auto stemBuffer = path.getStem(); const char* stem = stemBuffer.getStringBuffer(); reloadable = omni::str::matchWildcards( stem, inArgs->reloadableFileWildcards, uint32_t(inArgs->reloadableFileWildcardCount)); #if CARB_PLATFORM_LINUX || CARB_PLATFORM_MACOS if (!reloadable) { if (extras::startsWith(stem, "lib")) { stem += 3; reloadable = omni::str::matchWildcards( stem, inArgs->reloadableFileWildcards, uint32_t(inArgs->reloadableFileWildcardCount)); } } #endif } inArgs->onMatched(canonical, reloadable, inArgs->onMatchedContext); }; args.onMatchedContext = const_cast<FindPluginsArgs*>(&inArgs); args.onExcluded = inArgs.onExcluded; args.onExcludedContext = inArgs.onExcludedContext; if (!args.onExcluded) { args.onExcluded = [](const char* canonical, void*) { CARB_LOG_VERBOSE("Excluding potential plugin file: %s.", canonical); }; } args.onSkipped = inArgs.onSkipped; args.onSkippedContext = inArgs.onSkippedContext; args.onSearchPath = inArgs.onSearchPath; args.onSearchPathContext = inArgs.onSearchPathContext; if (!args.onSearchPath) { args.onSearchPath = [](const char* path, void* context) { auto inArgs = static_cast<FindPluginsArgs*>(context); CARB_LOG_VERBOSE("Searching plugins %sin folder: %s", (inArgs->searchRecursive ? "recursively " : ""), path); }; args.onSearchPathContext = const_cast<FindPluginsArgs*>(&inArgs); } args.flags = (filesystem::kFindFilesFlagMatchStem | filesystem::kFindFilesFlagReplaceEnvironmentVariables); if (inArgs.searchRecursive) { args.flags |= filesystem::kFindFilesFlagRecursive; } return filesystem::findFiles(args); } } // namespace carb
omniverse-code/kit/include/carb/Format.h
// Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <cstring> #include <sstream> namespace carb { namespace fmt { namespace detail { inline void format(std::ostringstream& stream, const char* str) { stream << str; } template <class Arg, class... Args> inline void format(std::ostringstream& stream, const char* str, Arg&& arg, Args&&... args) { const char* p = strstr(str, "{}"); if (p) { stream.write(str, p - str); stream << arg; format(stream, p + 2, std::forward<Args>(args)...); } else { stream << str; } } } // namespace detail /** * Formats a string similar to the {fmt} library (https://fmt.dev), but header-only and without requiring an external * library be included * * NOTE: This is not intended to be a full replacement for {fmt}. Only '{}' is supported (i.e. no non-positional * support). And any type can be formatted, but must be streamable (i.e. have an appropriate operator<<) * * Example: format("{}, {} and {}: {}", "Peter", "Paul", "Mary", 42) would produce the string "Peter, Paul and Mary: 42" * @param str The format string. Use '{}' to indicate where the next parameter would be inserted. * @returns The formatted string */ template <class... Args> inline std::string format(const char* str, Args&&... args) { std::ostringstream stream; detail::format(stream, str, std::forward<Args>(args)...); return stream.str(); } } // namespace fmt } // namespace carb
omniverse-code/kit/include/carb/Interface.h
// Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file //! //! @brief Macros for defining a Carbonite interface. #pragma once #include "Version.h" #include <type_traits> namespace carb { //! Defines a descriptor for the plugin interface. //! //! In order to get this information from an interface, call the interface's `getInterfaceDesc` function. struct InterfaceDesc { const char* name = nullptr; //!< Name of the interface. Version version = { 0, 0 }; //!< Version of the interface. }; CARB_ASSERT_INTEROP_SAFE(InterfaceDesc); } // namespace carb //! Macro to declare a `struct` as a Carbonite interface. //! //! This macro must be used in a public section of the interface `struct`. It is recommended to have it be the first //! public "member" of the struct. //! //! @param name The name of the interface. //! //! @param major The major <a href="https://semver.org/">Semantic Version</a> of the interface. It is recommended to //! start at `1`. //! @param minor The minor <a href="https://semver.org/">Semantic Version</a> of the interface. It is recommended to //! start at `0`. //! //! For plugins that support multiple interface versions through @ref CARB_PLUGIN_IMPL_EX, the @p major and @p minor //! version represent the highest version available. This version will also be requested immediately after plugin //! registration and **must** succeed. //! //! When using, a trailing semicolon is optional. //! //! @note A @p major of `0` has special significance to <a href="https://semver.org/">Semantic Versioning</a>: every //! iteration of @p minor is also considered a breaking change. However, @ref carb::isVersionSemanticallyCompatible will //! warn on different \a minor versions if the \a major version is `0`, but still report `true`. //! //! See @carb_framework_overview and @carb_interfaces for more information on creating Carbonite plugins. //! @code{.cpp} //! // Effective implementation //! #define CARB_PLUGIN_INTERFACE(name, major, minor) //! static constexpr carb::InterfaceDesc getInterfaceDesc() //! { //! return carb::InterfaceDesc{ name, { major, minor } }; //! } //! @endcode #define CARB_PLUGIN_INTERFACE(name, major, minor) \ /** \ * Returns information about this interface. Auto-generated by @ref CARB_PLUGIN_INTERFACE(). \ * @returns The @ref carb::InterfaceDesc struct with information about this interface. \ */ \ static constexpr carb::InterfaceDesc getInterfaceDesc() \ { \ return carb::InterfaceDesc{ name, { major, minor } }; \ } // note that this needs to be included last to avoid a circular include dependency in // 'carb/Defines.h'. A lot of source files and tests depend on 'carb/Interface.h' // also pulling in 'carb/Defines.h'. Since nothing here strictly requires 'Defines.h', // we'll just defer it's include until everything else useful in here has been defined. #include "Defines.h"
omniverse-code/kit/include/carb/PluginCoreUtils.h
// Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file //! //! @brief Utilities to ease the creation of Carbonite plugins. Most code will include carb/PluginUtils.h //! instead of this file. #pragma once #include "Defines.h" #include "../omni/core/Api.h" // OMNI_API #include "../omni/core/Omni.h" #include <cstddef> #include <cstdint> //! See @ref carb::GetFrameworkVersionFn. Required by plugins. const char* const kCarbGetFrameworkVersionFnName = "carbGetFrameworkVersion"; //! See @ref carb::OnPluginRegisterFn. Required by plugins. const char* const kCarbOnPluginRegisterFnName = "carbOnPluginRegister"; //! See @ref carb::OnPluginRegisterExFn. Required by plugins. const char* const kCarbOnPluginRegisterExFnName = "carbOnPluginRegisterEx"; //! See @ref carb::OnPluginRegisterEx2Fn. Required by plugins. const char* const kCarbOnPluginRegisterEx2FnName = "carbOnPluginRegisterEx2"; //! See @ref carb::OnPluginPreStartupFn. Optional for plugins. const char* const kCarbOnPluginPreStartupFnName = "carbOnPluginPreStartup"; //! See @ref carb::OnPluginStartupFn. Optional for plugins. const char* const kCarbOnPluginStartupFnName = "carbOnPluginStartup"; //! See @ref carb::OnPluginStartupExFn. Optional for plugins. const char* const kCarbOnPluginStartupExFnName = "carbOnPluginStartupEx"; //! See @ref carb::OnPluginShutdownFn. Optional for plugins. const char* const kCarbOnPluginShutdownFnName = "carbOnPluginShutdown"; //! See @ref carb::OnPluginQuickShutdownFn. Optional for plugins. const char* const kCarbOnPluginQuickShutdownFnName = "carbOnPluginQuickShutdown"; //! See @ref carb::OnPluginPostShutdownFn. Optional for plugins. const char* const kCarbOnPluginPostShutdownFnName = "carbOnPluginPostShutdown"; //! See @ref carb::GetPluginDepsFn. Optional for plugins. const char* const kCarbGetPluginDepsFnName = "carbGetPluginDeps"; //! See @ref carb::OnReloadDependencyFn. Optional for plugins. const char* const kCarbOnReloadDependencyFnName = "carbOnReloadDependency"; namespace omni { namespace core { OMNI_DECLARE_INTERFACE(ITypeFactory) // forward declaration } namespace log { class ILog; // forward declaration } } // namespace omni /// @cond DEV /** * Helper macro to declare globals needed by Carbonite plugins. * * Do not directly use this macro. Rather use @ref CARB_PLUGIN_IMPL() which will call it for you. */ #define OMNI_MODULE_GLOBALS_FOR_PLUGIN() \ namespace \ { \ ::omni::core::ITypeFactory* s_omniTypeFactory = nullptr; \ ::omni::log::ILog* s_omniLog = nullptr; \ ::omni::structuredlog::IStructuredLog* s_omniStructuredLog = nullptr; \ } \ OMNI_MODULE_DEFINE_LOCATION_FUNCTIONS() \ OMNI_MODULE_GLOBALS_BUILD_CONFIG_SYMBOLS(); \ OMNI_API void* omniGetBuiltInWithoutAcquire(::OmniBuiltIn type) \ { \ switch (type) \ { \ case ::OmniBuiltIn::eITypeFactory: \ return s_omniTypeFactory; \ case ::OmniBuiltIn::eILog: \ return s_omniLog; \ case ::OmniBuiltIn::eIStructuredLog: \ return s_omniStructuredLog; \ default: \ return nullptr; \ } \ } /** * Populates the Omniverse interfaces portion of @ref carb::PluginFrameworkDesc. * * Do not directly use this macro. This macro is called by default by the @ref CARB_PLUGIN_IMPL_WITH_INIT() provided * version of @ref carb::OnPluginRegisterFn. */ #define OMNI_MODULE_SET_GLOBALS_FOR_PLUGIN(in_) \ s_omniTypeFactory = (in_)->omniTypeFactory; \ s_omniLog = (in_)->omniLog; \ s_omniStructuredLog = (in_)->omniStructuredLog; #ifndef DOXYGEN_SHOULD_SKIP_THIS // FOR_EACH macro implementation, use as FOR_EACH(OTHER_MACRO, p0, p1, p2,) # define EXPAND(x) x # define FE_1(WHAT, X) EXPAND(WHAT(X)) # define FE_2(WHAT, X, ...) EXPAND(WHAT(X) FE_1(WHAT, __VA_ARGS__)) # define FE_3(WHAT, X, ...) EXPAND(WHAT(X) FE_2(WHAT, __VA_ARGS__)) # define FE_4(WHAT, X, ...) EXPAND(WHAT(X) FE_3(WHAT, __VA_ARGS__)) # define FE_5(WHAT, X, ...) EXPAND(WHAT(X) FE_4(WHAT, __VA_ARGS__)) # define FE_6(WHAT, X, ...) EXPAND(WHAT(X) FE_5(WHAT, __VA_ARGS__)) # define FE_7(WHAT, X, ...) EXPAND(WHAT(X) FE_6(WHAT, __VA_ARGS__)) # define FE_8(WHAT, X, ...) EXPAND(WHAT(X) FE_7(WHAT, __VA_ARGS__)) # define FE_9(WHAT, X, ...) EXPAND(WHAT(X) FE_8(WHAT, __VA_ARGS__)) # define FE_10(WHAT, X, ...) EXPAND(WHAT(X) FE_9(WHAT, __VA_ARGS__)) # define FE_11(WHAT, X, ...) EXPAND(WHAT(X) FE_10(WHAT, __VA_ARGS__)) # define FE_12(WHAT, X, ...) EXPAND(WHAT(X) FE_11(WHAT, __VA_ARGS__)) # define FE_13(WHAT, X, ...) EXPAND(WHAT(X) FE_12(WHAT, __VA_ARGS__)) # define FE_14(WHAT, X, ...) EXPAND(WHAT(X) FE_13(WHAT, __VA_ARGS__)) # define FE_15(WHAT, X, ...) EXPAND(WHAT(X) FE_14(WHAT, __VA_ARGS__)) # define FE_16(WHAT, X, ...) EXPAND(WHAT(X) FE_15(WHAT, __VA_ARGS__)) # define FE_17(WHAT, X, ...) EXPAND(WHAT(X) FE_16(WHAT, __VA_ARGS__)) # define FE_18(WHAT, X, ...) EXPAND(WHAT(X) FE_17(WHAT, __VA_ARGS__)) # define FE_19(WHAT, X, ...) EXPAND(WHAT(X) FE_18(WHAT, __VA_ARGS__)) # define FE_20(WHAT, X, ...) EXPAND(WHAT(X) FE_19(WHAT, __VA_ARGS__)) # define FE_21(WHAT, X, ...) EXPAND(WHAT(X) FE_20(WHAT, __VA_ARGS__)) # define FE_22(WHAT, X, ...) EXPAND(WHAT(X) FE_21(WHAT, __VA_ARGS__)) # define FE_23(WHAT, X, ...) EXPAND(WHAT(X) FE_22(WHAT, __VA_ARGS__)) # define FE_24(WHAT, X, ...) EXPAND(WHAT(X) FE_23(WHAT, __VA_ARGS__)) # define FE_25(WHAT, X, ...) EXPAND(WHAT(X) FE_24(WHAT, __VA_ARGS__)) # define FE_26(WHAT, X, ...) EXPAND(WHAT(X) FE_25(WHAT, __VA_ARGS__)) # define FE_27(WHAT, X, ...) EXPAND(WHAT(X) FE_26(WHAT, __VA_ARGS__)) # define FE_28(WHAT, X, ...) EXPAND(WHAT(X) FE_27(WHAT, __VA_ARGS__)) # define FE_29(WHAT, X, ...) EXPAND(WHAT(X) FE_28(WHAT, __VA_ARGS__)) # define FE_30(WHAT, X, ...) EXPAND(WHAT(X) FE_29(WHAT, __VA_ARGS__)) # define FE_31(WHAT, X, ...) EXPAND(WHAT(X) FE_30(WHAT, __VA_ARGS__)) # define FE_32(WHAT, X, ...) EXPAND(WHAT(X) FE_31(WHAT, __VA_ARGS__)) //... repeat as needed # define GET_MACRO(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, \ _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, NAME, ...) \ NAME # define FOR_EACH(action, ...) \ EXPAND(GET_MACRO(__VA_ARGS__, FE_32, FE_31, FE_30, FE_29, FE_28, FE_27, FE_26, FE_25, FE_24, FE_23, FE_22, \ FE_21, FE_20, FE_19, FE_18, FE_17, FE_16, FE_15, FE_14, FE_13, FE_12, FE_11, FE_10, FE_9, \ FE_8, FE_7, FE_6, FE_5, FE_4, FE_3, FE_2, FE_1)(action, __VA_ARGS__)) # define DECLARE_FILL_FUNCTION(X) void fillInterface(X& iface); // carbOnPluginRegisterEx2() was added with carbonite version 0.5 without changing the carbonite version number. // Therefore, this exists only to support older carbonite version 0.5 instances that are not aware of // carbOnPluginRegisterEx2. This macro can be safely removed when Framework version 0.5 is no longer supported. static_assert(carb::kFrameworkVersion.major == 0, "Remove CARB_PLUGIN_IMPL_WITH_INIT_0_5"); # define CARB_PLUGIN_IMPL_WITH_INIT_0_5(impl, ...) \ FOR_EACH(DECLARE_FILL_FUNCTION, __VA_ARGS__) \ template <typename T1> \ void fillInterface0_5(carb::PluginRegistryEntry::Interface* interfaces) \ { \ interfaces[0].desc = T1::getInterfaceDesc(); \ static T1 s_pluginInterface{}; \ fillInterface(s_pluginInterface); \ interfaces[0].ptr = &s_pluginInterface; \ interfaces[0].size = sizeof(T1); \ } \ template <typename T1, typename T2, typename... Types> \ void fillInterface0_5(carb::PluginRegistryEntry::Interface* interfaces) \ { \ fillInterface0_5<T1>(interfaces); \ fillInterface0_5<T2, Types...>(interfaces + 1); \ } \ template <typename... Types> \ static void onPluginRegister0_5(carb::PluginFrameworkDesc* frameworkDesc, carb::PluginRegistryEntry* outEntry) \ { \ static carb::PluginRegistryEntry::Interface s_interfaces[sizeof...(Types)] = {}; \ fillInterface0_5<Types...>(s_interfaces); \ outEntry->interfaces = s_interfaces; \ outEntry->interfaceCount = sizeof(s_interfaces) / sizeof(s_interfaces[0]); \ outEntry->implDesc = impl; \ \ g_carbFramework = frameworkDesc->framework; \ g_carbClientName = impl.name; \ OMNI_MODULE_SET_GLOBALS_FOR_PLUGIN(frameworkDesc) \ } \ CARB_EXPORT void carbOnPluginRegisterEx( \ carb::PluginFrameworkDesc* frameworkDesc, carb::PluginRegistryEntry* outEntry) \ { \ onPluginRegister0_5<__VA_ARGS__>(frameworkDesc, outEntry); \ } # define CARB_PLUGIN_IMPL_WITH_INIT_0_5_EX(impl, ...) \ template <typename T1> \ void fillInterface0_5(carb::PluginRegistryEntry::Interface* interfaces) \ { \ interfaces->desc = T1::getInterfaceDesc(); \ static void* s_pluginInterface[(sizeof(T1) / sizeof(void*)) + 1] = {}; \ carb::Version ver = interfaces->desc.version; \ bool b = fillInterface<T1>(&ver, s_pluginInterface); \ CARB_FATAL_UNLESS(b, "Failed to construct interface for type %s", interfaces->desc.name); \ CARB_FATAL_UNLESS( \ ver == interfaces->desc.version, \ "Interface %s constructor requested version %u.%u but got %u.%u (must match exactly as the interface declared itself to be this version)", \ interfaces[0].desc.name, interfaces->desc.version.major, interfaces->desc.version.minor, ver.major, \ ver.minor); \ interfaces->ptr = &s_pluginInterface; \ interfaces->size = sizeof(T1); \ } \ template <typename T1, typename T2, typename... Types> \ void fillInterface0_5(carb::PluginRegistryEntry::Interface* interfaces) \ { \ fillInterface0_5<T1>(interfaces); \ fillInterface0_5<T2, Types...>(interfaces + 1); \ } \ template <typename... Types> \ static void onPluginRegister0_5(carb::PluginFrameworkDesc* frameworkDesc, carb::PluginRegistryEntry* outEntry) \ { \ static carb::PluginRegistryEntry::Interface s_interfaces[sizeof...(Types)]; \ fillInterface0_5<Types...>(s_interfaces); \ outEntry->interfaces = s_interfaces; \ outEntry->interfaceCount = sizeof(s_interfaces) / sizeof(s_interfaces[0]); \ outEntry->implDesc = impl; \ \ g_carbFramework = frameworkDesc->framework; \ g_carbClientName = impl.name; \ OMNI_MODULE_SET_GLOBALS_FOR_PLUGIN(frameworkDesc) \ } \ CARB_EXPORT void carbOnPluginRegisterEx( \ carb::PluginFrameworkDesc* frameworkDesc, carb::PluginRegistryEntry* outEntry) \ { \ onPluginRegister0_5<__VA_ARGS__>(frameworkDesc, outEntry); \ } #endif // DOXYGEN_SHOULD_SKIP_THIS /** * Defines boiler-plate code to declare the plugin's interfaces and registration code. * * Rather than directly calling this macro, consider calling @ref CARB_PLUGIN_IMPL which calls this macro for you. * * This macro does the following: * * - Defines `carbGetFrameworkVersion` and `carbOnPluginRegisterEx2` functions. * * - Sets the @ref g_carbFramework variable so @ref carb::getFramework() works. * * - Sets the plugin client variable: @ref g_carbClientName. The client name is used by @ref carb::Framework to * create a graph of client inter-dependencies. * * - Advertises to @ref carb::Framework the interfaces implemented by this plugin. * * - Enables the usage of ONI (see @oni_overview) in the plugin. * * This macro must be defined in the global namespace. * * @param impl The PluginImplDesc constant to be used as plugin description. * * @param ... One or more interface types to be implemented by the plugin. An interface is a `struct` with * a call to @ref CARB_PLUGIN_INTERFACE() inside it. These interface types are constructed during plugin registration * (prior to the plugin startup) and destructed immediately after plugin shutdown. A global fillInterface() function * must exist and will be called immediately after instantiating the interface type. The interface types need not be * trivially constructed or destructed, but their constructors and destructors MUST NOT use any Carbonite framework * functions. */ #define CARB_PLUGIN_IMPL_WITH_INIT(impl, ...) \ \ /* Forward declare fill functions for every interface */ \ FOR_EACH(DECLARE_FILL_FUNCTION, __VA_ARGS__) \ \ template <typename T1> \ void populate(carb::PluginRegistryEntry2::Interface2* iface) \ { \ iface->sizeofThisStruct = sizeof(carb::PluginRegistryEntry2::Interface2); \ iface->desc = T1::getInterfaceDesc(); \ iface->size = sizeof(T1); \ iface->align = alignof(T1); \ iface->Constructor = [](void* p) { fillInterface(*new (p) T1); }; \ iface->Destructor = [](void* p) { static_cast<T1*>(p)->~T1(); }; \ } \ \ template <typename T1, typename T2, typename... Types> \ void populate(carb::PluginRegistryEntry2::Interface2* interfaces) \ { \ populate<T1>(interfaces); \ populate<T2, Types...>(interfaces + 1); \ } \ \ template <typename... Types> \ static void registerPlugin(carb::PluginFrameworkDesc* frameworkDesc, carb::PluginRegistryEntry2* outEntry) \ { \ outEntry->sizeofThisStruct = sizeof(carb::PluginRegistryEntry2); \ static carb::PluginRegistryEntry2::Interface2 s_interfaces[sizeof...(Types)] = {}; \ populate<Types...>(s_interfaces); \ outEntry->interfaces = s_interfaces; \ outEntry->interfaceCount = CARB_COUNTOF(s_interfaces); \ outEntry->implDesc = impl; \ \ g_carbFramework = frameworkDesc->framework; \ g_carbClientName = impl.name; \ OMNI_MODULE_SET_GLOBALS_FOR_PLUGIN(frameworkDesc) \ } \ \ CARB_EXPORT void carbOnPluginRegisterEx2( \ carb::PluginFrameworkDesc* frameworkDesc, carb::PluginRegistryEntry2* outEntry) \ { \ registerPlugin<__VA_ARGS__>(frameworkDesc, outEntry); \ } \ \ CARB_EXPORT carb::Version carbGetFrameworkVersion() \ { \ return carb::kFrameworkVersion; \ } #define CARB_PLUGIN_IMPL_WITH_INIT_EX(impl, ...) \ /* forward declare fillInterface function */ \ template <class T1> \ bool fillInterface(carb::Version*, void*); \ template <class T1> \ void destroyInterface(carb::Version, void*) \ { \ } \ \ template <typename T1> \ void populate(carb::PluginRegistryEntry2::Interface2* iface) \ { \ iface->sizeofThisStruct = sizeof(carb::PluginRegistryEntry2::Interface2); \ iface->desc = T1::getInterfaceDesc(); \ iface->size = sizeof(T1); \ iface->align = alignof(T1); \ iface->Constructor = nullptr; \ iface->Destructor = nullptr; \ iface->VersionedConstructor = &fillInterface<T1>; \ iface->VersionedDestructor = &destroyInterface<T1>; \ } \ \ template <typename T1, typename T2, typename... Types> \ void populate(carb::PluginRegistryEntry2::Interface2* interfaces) \ { \ populate<T1>(interfaces); \ populate<T2, Types...>(interfaces + 1); \ } \ \ template <typename... Types> \ static void registerPlugin(carb::PluginFrameworkDesc* frameworkDesc, carb::PluginRegistryEntry2* outEntry) \ { \ outEntry->sizeofThisStruct = sizeof(carb::PluginRegistryEntry2); \ static carb::PluginRegistryEntry2::Interface2 s_interfaces[sizeof...(Types)] = {}; \ populate<Types...>(s_interfaces); \ outEntry->interfaces = s_interfaces; \ outEntry->interfaceCount = CARB_COUNTOF(s_interfaces); \ outEntry->implDesc = impl; \ \ g_carbFramework = frameworkDesc->framework; \ g_carbClientName = impl.name; \ OMNI_MODULE_SET_GLOBALS_FOR_PLUGIN(frameworkDesc) \ } \ \ CARB_EXPORT void carbOnPluginRegisterEx2( \ carb::PluginFrameworkDesc* frameworkDesc, carb::PluginRegistryEntry2* outEntry) \ { \ registerPlugin<__VA_ARGS__>(frameworkDesc, outEntry); \ } \ \ CARB_EXPORT carb::Version carbGetFrameworkVersion() \ { \ return carb::kFrameworkVersion; \ } /// @endcond #if CARB_COMPILER_MSC # pragma section(".state", read, write) #endif /** * Macro to mark static and global variables to keep them when plugin is hot-reloaded. * * @rst .. deprecated:: 132.0 Hot reloading support has been removed. No replacement will be provided. Note that any symbol this decorator is used on will generate a deprecation warning. @endrst */ #define CARB_STATE \ CARB_DEPRECATED("hot reload has been removed") CARB_DECLSPEC(allocate(".state")) CARB_ATTRIBUTE(section(".state")) // The below is for documentation only #ifdef DOXYGEN_BUILD /** * An automatically-generated function exported by Carbonite plugins used to check the plugin's Framework compatibility. * * \note This function is automatically generated for each plugin by the \ref CARB_PLUGIN_IMPL macro. It is called by * the Framework when registering a plugin. This serves as documentation of this function only. * * The type of this function is \ref carb::GetFrameworkVersionFn and named \ref kCarbGetFrameworkVersionFnName. * * @returns The Framework version that the plugin was built against. The Framework uses this result to check if the * plugin is <a href="https://semver.org/">semantically compatible</a> with the Framework in order to continue loading. */ CARB_EXPORT carb::Version carbGetFrameworkVersion(); /** * An automatically-generated function exported by some Carbonite plugins (now deprecated). * * \note This function is automatically generated in some older plugins by the \ref CARB_PLUGIN_IMPL macro. It may be * called by the Framework when registering a plugin. This serves as documentation of this function only. * * \warning This function has been superseded by \ref carbOnPluginRegisterEx and \ref carbOnPluginRegisterEx2 in * Framework version 0.5. The Framework will look for and call the first available function from the following list: * \ref carbOnPluginRegisterEx2, \ref carbOnPluginRegisterEx, `carbOnPluginRegister` (this function). * * The type of this function is \ref carb::OnPluginRegisterFn and named \ref kCarbOnPluginRegisterFnName. * * Only plugins built with Framework versions prior to 0.5 export this function. * * @param framework The Framework will pass this function a pointer to itself when calling. * @param outEntry The plugin will populate this structure to inform the Framework about itself. */ CARB_EXPORT void carbOnPluginRegister(carb::Framework* framework, carb::PluginRegistryEntry* outEntry); /** * An automatically-generated function exported by some Carbonite plugins (now deprecated). * * \note This function is automatically generated in some older plugins by the \ref CARB_PLUGIN_IMPL macro. It may be * called by the Framework when registering a plugin. This serves as documentation of this function only. * * \warning This function has been superseded by \ref carbOnPluginRegisterEx2 in Framework version 0.5. The Framework * will look for and call the first available function from the following list: * \ref carbOnPluginRegisterEx2, `carbOnPluginRegisterEx` (this function), \ref carbOnPluginRegister. * * The type of this function is \ref carb::OnPluginRegisterExFn and named \ref kCarbOnPluginRegisterExFnName. * * This function is generated for all plugins built against Framework 0.5. Since \ref carbOnPluginRegisterEx2 was added * to Framework version 0.5 without changing the Framework version (in Carbonite release v111.17), this function exists * and is exported in all plugins compatible with Framework version 0.5 to allow the plugins to load properly in earlier * editions of Framework version 0.5. * * @param frameworkDesc A description of the Framework provided by the Framework when it calls this function. * @param outEntry The plugin will populate this structure to inform the Framework about itself. */ CARB_EXPORT void carbOnPluginRegisterEx(carb::PluginFrameworkDesc* frameworkDesc, carb::PluginRegistryEntry* outEntry); /** * An automatically-generated function exported by some Carbonite plugins. * * \note This function is automatically generated in plugins by the \ref CARB_PLUGIN_IMPL macro. It is called by the * Framework when registering a plugin. This serves as documentation of this function only. * * \note Older versions of this function exist in older plugins. This is the most current registration function as of * Framework version 0.5 and intended to be "future proof" for future Framework versions. The Framework will look for * and call the first available function from the following list: * `carbOnPluginRegisterEx2` (this function), \ref carbOnPluginRegisterEx, \ref carbOnPluginRegister. * * The type of this function is \ref carb::OnPluginRegisterEx2Fn and named \ref kCarbOnPluginRegisterEx2FnName. * * This function is generated for all plugins built against Carbonite v111.17 and later. Prior Carbonite releases with * Framework version 0.5 will use the \ref carbOnPluginRegisterEx function, whereas Carbonite releases v111.17 and later * will use this function to register a plugin. * * @param frameworkDesc A description of the Framework provided by the Framework when it calls this function. * @param outEntry The plugin will populate this structure to inform the Framework about itself. */ CARB_EXPORT void carbOnPluginRegisterEx2(carb::PluginFrameworkDesc* frameworkDesc, carb::PluginRegistryEntry2* outEntry); /** * An automatically-generated function exported by Carbonite plugins. * * \note This function is automatically generated in plugins by the \ref CARB_DEFAULT_INITIALIZERS macro via the * \ref CARB_PLUGIN_IMPL macro. It is called by the Framework when starting the plugin (the first time an interface is * acquired from the plugin) prior to calling \ref carbOnPluginStartup. This serves as documentation of the generated * function only. * * This function starts up various Framework-provided subsystems for the plugin: logging, profiling, asserts, * localization and structured logging. The following functions are called in the plugin context: * - \ref carb::logging::registerLoggingForClient * - \ref carb::profiler::registerProfilerForClient * - \ref carb::assert::registerAssertForClient * - \ref carb::l10n::registerLocalizationForClient * - \ref omni::structuredlog::addModulesSchemas() * * The type of this function is \ref carb::OnPluginPreStartupFn and named \ref kCarbOnPluginPreStartupFnName. */ CARB_EXPORT void carbOnPluginPreStartup(); /** * An optional function that a plugin author can export from their plugin to start their plugin. * * The Framework will call this function after \ref carbOnPluginPreStartup when starting the plugin (the first time an * interface is acquired from the plugin). This serves as a guide for plugin authors. * * Providing this function is completely optional. * * Generally, if this function is provided, a \ref carbOnPluginShutdown should also be provided to cleanup any work * done by this function. * * This function is superseded by \ref carbOnPluginStartupEx, which allows startup to fail gracefully. If that function * does not exist, this function is called if it exists. * * Any interfaces declared as a dependency in \ref CARB_PLUGIN_IMPL_DEPS will be available to this plugin by the time * this function is called. * * This function is allowed to acquire interfaces and interact with the Framework normally (e.g. add hooks, etc.). * However, keep in mind that this function is called by the Framework when the Application or another plugin is trying * to acquire an interface from this plugin; actions that result in recursively starting the plugin will result in * failure to acquire the interface. However, your plugin is allowed to acquire other interfaces from itself in this * function. * * Once this function returns, the Framework considers your plugin as initialized. * * Typical things this function might do: * - Allocate memory and data structures for your plugin * - Load settings from \ref carb::settings::ISettings (if available) * - Start up libraries and subsystems * * The type of this function is \ref carb::OnPluginStartupFn and named \ref kCarbOnPluginStartupFnName. * * @note The thread context must be the same when this function returns as when the function is called (i.e. if called * within a fiber context, the same thread must return). However, if carb.tasking.plugin is used, this need not be the * case as Carbonite can handle that case properly. */ CARB_EXPORT void carbOnPluginStartup(); /** * An optional function that a plugin author can export from their plugin to start their plugin. * * The Framework will call this function after \ref carbOnPluginPreStartup when starting the plugin (the first time an * interface is acquired from the plugin). This serves as a guide for plugin authors. * * Providing this function is completely optional. * * Generally, if this function is provided, a \ref carbOnPluginShutdown should also be provided to cleanup any work * done by this function. * * This function supersedes \ref carbOnPluginStartup. The main difference is that this function allows the plugin to * indicate if startup fails (such as if a required subsystem fails to start) and allow the Framework to fail acquiring * an interface gracefully. If this function does not exist, \ref carbOnPluginStartup is called if it exists. * * Any interfaces declared as a dependency in \ref CARB_PLUGIN_IMPL_DEPS will be available to this plugin by the time * this function is called. * * This function is allowed to acquire interfaces and interact with the Framework normally (e.g. add hooks, etc.). * However, keep in mind that this function is called by the Framework when the Application or another plugin is trying * to acquire an interface from this plugin; actions that result in recursively starting the plugin will result in * failure to acquire the interface. However, your plugin is allowed to acquire other interfaces from itself in this * function. * * Once this function returns successfully, the Framework considers your plugin as initialized. If this function reports * failure, the plugin will be unloaded but remain registered. Attempting to acquire an interface from this plugin in * the future will reload the plugin and attempt to call this function again. * * Typical things this function might do: * - Allocate memory and data structures for your plugin * - Load settings from \ref carb::settings::ISettings (if available) * - Start up libraries and subsystems * * The type of this function is \ref carb::OnPluginStartupExFn and named \ref kCarbOnPluginStartupExFnName. * @returns `true` if the plugin started successfully; `false` otherwise. * * @note The thread context must be the same when this function returns as when the function is called (i.e. if called * within a fiber context, the same thread must return). However, if carb.tasking.plugin is used, this need not be the * case as Carbonite can handle that case properly. */ CARB_EXPORT bool carbOnPluginStartupEx(); /** * An optional function that a plugin author can export from their plugin to shutdown their plugin. * * The Framework will call this function when directed to unload a plugin, immediately before calling * \ref carbOnPluginPostShutdown and before requesting that the OS release the plugin library. This function will also * be called if \ref carb::Framework::unloadAllPlugins is called, but *not* if * \ref carb::quickReleaseFrameworkAndTerminate is called. This serves as a guide for plugin authors. * * This function is mutually exclusive with \ref carbOnPluginQuickShutdown; either this function or that one is called * depending on the shutdown type. * * Providing this function is completely optional. * * Generally, this function should be provided if \ref carbOnPluginStartup or \ref carbOnPluginStartupEx is provided in * order to clean up the work done in the startup function. * * Any interfaces declared as a dependency in \ref CARB_PLUGIN_IMPL_DEPS will still be available to this plugin when * this function is called. * * \warning This function should not attempt to acquire any interfaces that the plugin has not previously acquired. * In other words, only use interfaces in this function that the plugin has already acquired. * * During shutdown, if a circular reference exists between interfaces acquired by this plugin and those interfaces * (possibly indirectly) acquiring interfaces from this plugin, then it is possible that interfaces acquired by this * plugin may already be shut down. Using \ref carb::getCachedInterface or \ref carb::Framework::tryAcquireInterface may * result in an error log being issued. In this case, \ref carb::Framework::tryAcquireExistingInterface will only * acquire the interface if the plugin providing it is still started. * * Once this function returns successfully, the Framework considers your plugin as shut down and will typically proceed * to unload the library. * * Typical things this function might do: * - Deallocate memory and data structures for your plugin * - Report on leaked objects * - Shut down libraries and subsystems * * The type of this function is \ref carb::OnPluginShutdownFn and named \ref kCarbOnPluginShutdownFnName. * * @note The thread context must be the same when this function returns as when the function is called (i.e. if called * within a fiber context, the same thread must return). This function *does not* allow context switches even when * used with carb.tasking.plugin. */ CARB_EXPORT void carbOnPluginShutdown(); /** * An optional function that a plugin author can export from their plugin to quick-shutdown their plugin. * * The Framework will call this function for each plugin only when \ref carb::quickReleaseFrameworkAndTerminate is * called, in the unload order determined by the Framework. This serves as a guide for plugin authors. * * This function is mutually exclusive with \ref carbOnPluginShutdown; either this function or that one is called * depending on the shutdown type. * * Providing this function is completely optional. * * Since \ref carb::quickReleaseFrameworkAndTerminate will terminate the process without running static destructors or * closing files/connections/etc., this function should be provided to do the bare minimum of work as quickly as * possible to ensure data is written out and stable. * * Any interfaces declared as a dependency in \ref CARB_PLUGIN_IMPL_DEPS will still be available to this plugin when * this function is called. * * While this function can acquire new interfaces (unlike \ref carbOnPluginShutdown), it is generally undesired to do * so as that can be time-consuming and antithetical to quick shutdown. * * Typical things this function might do: * - Close network connections * - Commit database transactions * - Flush and close files open for write * * The type of this function is \ref carb::OnPluginQuickShutdownFn and named \ref kCarbOnPluginQuickShutdownFnName. */ CARB_EXPORT void carbOnPluginQuickShutdown(); /** * An automatically-generated function exported by Carbonite plugins. * * \note This function is automatically generated in plugins by the \ref CARB_DEFAULT_INITIALIZERS macro via the * \ref CARB_PLUGIN_IMPL macro. It is called by the Framework when shutting down the plugin immediately after calling * \ref carbOnPluginShutdown. This serves as documentation of the generated function only. * * This function shuts down various Framework-provided subsystems for the plugin: logging, profiling, asserts, and * localization. The following functions are called in the plugin context: * - \ref carb::assert::deregisterAssertForClient * - \ref carb::profiler::deregisterProfilerForClient * - \ref carb::logging::deregisterLoggingForClient * - \ref carb::l10n::deregisterLocalizationForClient * * The type of this function is \ref carb::OnPluginPostShutdownFn and named \ref kCarbOnPluginPostShutdownFnName. */ CARB_EXPORT void carbOnPluginPostShutdown(); /** * An automatically-generated function exported by some Carbonite plugins. * * \note This function is automatically generated in plugins by the \ref CARB_PLUGIN_IMPL_DEPS or * \ref CARB_PLUGIN_IMPL_NO_DEPS macros. It is called by the Framework when registering a plugin in order to determine * dependencies for the plugin. This serves as documentation of the generated function only. * * If neither of the above macros are used, this function is not generated for the plugin. The Framework considers this * function optional. * * The type of this function is \ref carb::GetPluginDepsFn and named \ref kCarbGetPluginDepsFnName. * * @param deps Assigned to static memory inside the plugin that is the array of interfaces the plugin is dependent on. * May be `nullptr` if there are no dependencies (in this case \p count must be `0`). * @param count Assigned to the number of items in the array of interfaces the plugin is dependent on. */ CARB_EXPORT void carbGetPluginDeps(struct carb::InterfaceDesc** deps, size_t* count); /** * An optional function that a plugin author can export from their plugin to receive dependency reload notifications. * * When \ref carb::Framework::tryReloadPlugins is called, if a plugin is reloaded, any plugins which have acquired * interfaces from the reloading plugin will receive notifications before and after the plugin is reloaded via this * function. This serves as a guide for plugin authors. * * Providing this function is completely optional. * * Typical things this function might do (\p reloadState == `eBefore`): * - Release objects created from the interface * - Clear cached pointers to the interface * * Typical things this function might do (\p reloadState == `eAfter`): * - Update pointers to the new interface * - Reinstate objects * * The type of this function is \ref carb::OnReloadDependencyFn and named \ref kCarbOnReloadDependencyFnName. * * @param reloadState the callback phase * @param pluginInterface a pointer to the interface * @param desc a descriptor for the plugin */ CARB_EXPORT void carbOnReloadDependency(carb::PluginReloadState reloadState, void* pluginInterface, carb::PluginImplDesc desc); //! A dummy type representing a Carbonite Interface for documentation purposes. struct InterfaceType { CARB_PLUGIN_INTERFACE("carb::InterfaceType", 1, 0); }; /** * A required function that a plugin author must provide to construct an interface. * * @note This version of the function is required when using \ref CARB_PLUGIN_IMPL. When using \ref CARB_PLUGIN_IMPL_EX, * \ref fillInterface(carb::Version*, void*) is used instead. * * This function is called by the framework when the plugin is loaded, after calling \ref carbOnPluginRegisterEx2. The * plugin will fail to link if this function is not provided for all of the interfaces specified in the use of the * \ref CARB_PLUGIN_IMPL macro. * * @code{.cpp} * // Example * void fillInterface(carb::tasking::IFiberEvents& iface) * { * using namespace carb::fibereventtest; * iface = * { * notifyFiberStart, * notifyFiberStop, * }; * } * @endcode * * @param iface This must be a reference to an interface struct exported by your plugin. The members of this struct must * be set before returning. The underlying memory for `iface` is allocated and owned by the plugin and previously * constructed with placement new. This type will be destructed by calling the destructor explicitly immediately * before the plugin is unloaded. */ void fillInterface(InterfaceType& iface); /** * A required function that a plugin author must provide to construct a requested version of an interface. * * @note This version of the function is required when using \ref CARB_PLUGIN_IMPL_EX. When using \ref CARB_PLUGIN_IMPL, * \ref fillInterface(InterfaceType&) is used instead. * * This function can be called at any time after \ref carbOnPluginRegisterEx2 is called by the framework when the plugin * is loaded. Generally this is called immediately for all interfaces for the version specified in their * \ref CARB_PLUGIN_INTERFACE declaration, and at later points when a different interface version is requested. The * plugin will fail to link if this function is not provided for all of the interfaces specified in the use of the * \ref CARB_PLUGIN_IMPL_EX macro. * * @warning This function **must** succeed (return `true`) with \p v unchanged when called with \p v equal to the * version specified in \ref CARB_PLUGIN_INTERFACE, otherwise the plugin will fail to register or load. * * @warning If the type `T` that you're explicitly specializing is a complex type, make sure to provide a * \ref destroyInterface function to destruct the interface. * * @code{.cpp} * // Example * template <> * bool fillInterface<carb::stats::IStats>(carb::Version* v, void* iface) * { * using namespace carb::stats; * switch (v->major) * { * case IStats::getInterfaceDesc().version.major: * *v = IStats::getInterfaceDesc().version; * *static_cast<IStats*>(iface) = { addStat, removeStat, addValue, getValue, getCount }; * return true; * * default: * return false; * } * } * @endcode * * @tparam T The interface type handled by this function. The generic class template for any type `T` is declared but * not implemented by the \ref CARB_PLUGIN_IMPL_EX macro. This allows explicit specialization for the plugin author to * provide the function for their types. For instance, if you are providing the function for `carb::IObject`, your * function would be `template <> bool fillInterface<carb::IObject>(carb::Version* v, void* iface)`. * @param v When called, the framework provides the version requested. **The function must write the version constructed * into this parameter before returning.** The version constructed does not need to be the same as (or even * semantically compatible with) the version requested, but if a semantically compatible interface to the requested * version is available it should be provided. * @param buf A memory buffer that is guaranteed to be at least as large as `sizeof(T)`, allocated and owned by the * framework. For the initial allocation of an interface, this buffer is zeroed memory. If `T` is a POD type, the * memory can be cast to `T*` and filled in, or placement new can be used to construct your type. If `T` is a more * complex type, provide a \ref destroyInterface function to destruct the type. * @retval true to indicate that the type was constructed and \p v contains the version that was constructed, even if * the version is different from the value that was in \p v when the function was called. * @retval false to indicate that the requested interface version could not be constructed. */ template <class T> bool fillInterface(carb::Version* v, void* buf); /** * An optional function that a plugin author can provide to destroy an interface. * * @note This function is only optionally required when using \ref CARB_PLUGIN_IMPL_EX. When using \ref CARB_PLUGIN_IMPL * the destruction of interfaces is handled through calling the type destructor explicitly. A plugin author need not * provide this function if `T` is a POD type. * * @warning This function is called immediately before the plugin is unloaded, and after \ref carbOnPluginShutdown. It * **must not use** any Carbonite Framework functions. This function is not called in the event of shutdown via * \ref carb::quickReleaseFrameworkAndTerminate. * * @tparam T The interface type handled by this function. The generic class template for any type `T` is declared and * implemented to do nothing by the \ref CARB_PLUGIN_IMPL_EX macro, making this function optional. A plugin author may * provide an explicit specialization in order to handle destruction of the interface type. For `carb::IObject` for * example, this would be: `template <> void destroyInterface<carb::IObject>(carb::Version v, void* buf)`. * @param v The exact version that was 'returned' from \ref fillInterface(carb::Version*, void*). * @param buf The memory buffer that was passed to \ref fillInterface(carb::Version*, void*). */ template <class T> void destroyInterface(Version v, void* buf); #endif
omniverse-code/kit/include/carb/Error.h
// Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! \file //! //! \brief Core components for error handling. //! These functions are the foundation for language-specific bindings and should not be used directly. See //! \c omni/Error.h for user-friendly C++ interfaces. #pragma once #include "Defines.h" #include "Interface.h" #include "Types.h" #include "Version.h" #include "cpp/StringView.h" #include "detail/DeferredLoad.h" #include "extras/Errors.h" #include "../omni/core/Result.h" #include "../omni/String.h" #include <cinttypes> namespace carb { //! @copydoc omni::core::Result using omni::core::Result; // bring all the kResult___ values into carb namespace //! Undocumented #define CARB_RESULT_USE_OMNI_RESULT_GEN(symbol_, ...) \ /** @copydoc omni::core::kResult##symbol_ \ */ \ using omni::core::kResult##symbol_; OMNI_RESULT_CODE_LIST(CARB_RESULT_USE_OMNI_RESULT_GEN) //! Opaque type which stores an error. Use \c ErrorApi::getErrorInfo to extract information from it. //! //! \warning //! Error pointers are intended to be owned by a single thread. They can be safely sent between threads, but it is not //! safe to access their contents from multiple threads simultaneously. class Error; //! The low-level API for interacting with the Carbonite error handling system. At the core, this system maintains a //! thread-specific error code and optional error message. //! //! The state of the error objects are maintained through `libcarb.so`/`carb.dll`, the API is accessed through //! \c carbGetErrorApi or \c ErrorApi::instance. Different versions of the API interact with the same global state. A //! call to \c ErrorApi::viewCurrentError with version 1.0 will still be able to read a \c ErrorApi::setError from a //! version 4.0 API (version 4.0 does not exist at this time). struct ErrorApi { CARB_PLUGIN_INTERFACE("carb::ErrorApi", 1, 0); //! Get the singleton instance of this API. //! //! The implementation of this is backed by `libcarb.so`/`carb.dll`, so it is a singleton instance for all loaded //! modules. This function is backed by \c carbGetErrorApi with the \c version argument provided with the value of //! the API version the calling module was built against. static ErrorApi const& instance() noexcept; //! Get a view of this thread's current error, if it is set. //! //! \warning //! The caller does not own the returned error. Any information pointed to by the returned value can be altered or //! cleared by a function calling any of the \c setError calls on this thread. Errors accessed are meant to be //! acted upon immediately. If you wish to preserve information, copy the pieces you wish to save or store the whole //! thing with \c errorClone or \c takeCurrentError. //! //! \param[out] code If not \c nullptr and there is a current error, then \c *code will be set to the error code of //! the current error. Use this to save an extra call to \c getErrorInfo when you are going to act on //! the code value. If there is no current error, then this will be set to \c kResultSuccess. //! //! \returns The current error, if it was set. If there is no error, this returns \c nullptr. Error const*(CARB_ABI* viewCurrentError)(Result* code); //! Get this thread's current error as an owned object, if it is set. After this call, this thread's current error //! will be cleared. //! //! It is the caller's responsibility to clean up the returned value, unless it is \c nullptr. //! //! This function is equivalent to cloning the current error, then resetting it: //! //! \code //! ErrorApi const& api = ErrorApi::instance(); //! Result code{}; // <- optimization only -- see viewCurrentError //! Error* my_err = api.errorClone(api.viewCurrentError(&code)); //! api.setErrorTo(nullptr); //! \endcode //! //! \param[out] code If not \c nullptr and there is a current error, then \c *code will be set to the error code of //! the current error. Use this to save an extra call to \c getErrorInfo when you are going to act on //! the code value. If there is no current error, then this will be set to \c kResultSuccess. //! \returns An owned version of the current error, if it was set. If there is no error, this returns \c nullptr. Error*(CARB_ABI* takeCurrentError)(Result* code); //! \private Result(CARB_ABI* internalSetError)(Result code, char const* message, std::size_t message_size); //! Set this thread's current error to the exact \a error or clear the current error if \a error is \c nullptr. //! Responsibility for this instance is taken by the error system, so you should not call \c errorRelease on it. //! //! \param error The error to set. If this is \c nullptr, this clears the current error. //! //! \retval kResultSuccess when the error is set without issue. //! \retval kResultInvalidOperation if `error == viewCurrentError()`. Restoring a previously-saved error should //! occur through a \c errorClone call, so doing this represents a bug in the code (note that you must have //! used \c const_cast or equivalent to get this to compile). In this case, no action is performed, as the //! current error is already set to the error. Result(CARB_ABI* setErrorTo)(Error* error); //! Set this thread's current error to a pre-allocated error indicating the system is out of memory. It will always //! succeed. void(CARB_ABI* setErrorOom)(); //! Release the \a error which was previously `errorClone`d or `errorCreate`d. //! //! \retval kResultSuccess when the error was released. //! \retval kResultInvalidOperation when `error == viewCurrentError()`. The current error does not need to be //! released. Result(CARB_ABI* errorRelease)(Error* error); //! Create a copy of the \a source error. //! //! \param source The error to clone from. If this is \c nullptr, \c nullptr will be returned without error. //! //! \returns The cloned error. In case of error, \c nullptr is returned and the current error is set with a message //! containing additional details. Error*(CARB_ABI* errorClone)(Error const* source); //! Create an error message with the \a code and pre-formatted \a message string without setting the current error. //! The parameters operate similarly to \c setError, but are more strict. Where \c setError would return a non-OK //! code but still set the current error, this function would return \c nullptr and set the current error to the //! failure. //! //! \param code The code for the created error, which callers can potentially act on. //! \param message The associated message containing additional details about the error. If this is \c nullptr, the //! default message for the \a code is used. //! \param message_size The number of bytes \a message is. //! //! \returns The created error on success. On failure, \c nullptr is returned and the current error is set with the //! reason for the failure. The code values are the same as \c setError. Error*(CARB_ABI* errorCreate)(Result code, const char* message, std::size_t message_size); //! Extract the information associated with the \a error message. The output parameters are views of the properties, //! so they are only valid as long as \a error is valid. //! //! \param error The source to extract data from. This can not be \c nullptr. //! \param[out] code If not \c nullptr, \c *code will be set to the code of this error. //! \param[out] message If not \c nullptr, \c *message will point to the error's detail message. //! \param[out] message_size If not \c nullptr, \c *message_size will be set to the size of the error's detail //! message. Note that \c *message is always null-terminated, but this is useful for optimization when //! copying. //! //! \retval kResultSuccess when the operation was successfully performed. //! \retval kResultInvalidArgument if \a error is \c nullptr. The current error is not set in this case (since the //! \a error can be \c current_error, we do not want to clear it for you. Result(CARB_ABI* getErrorInfo)(Error const* error, Result* code, char const** message, std::size_t* message_size); //! Get the name and default message for the given \a code. The \a name a symbol-like name in snake case like //! `"invalid_argument"`. The \a message is the default message for the \a code as a sentence fragment like //! `"invalid argument"`. //! //! Note that all of the output parameters are allowed to be \c nullptr. In this case, \c kResultSuccess is still //! returned. This can be useful for checking if a given \a code exists at all. //! //! \param code The error code to look up. //! \param[out] name A pointer to a place to put the name of the code. If this is \c nullptr, it will not be set. //! \param[out] name_size A pointer to place to put the size of \a name (in UTF-8 code units). Since \a name is //! null-terminated, this is not strictly needed, but can save you a \c strlen call. If this is //! \c nullptr, it will not be set. //! \param[out] message A pointer to the place to put the default message for this code. If this is \c nullptr, it //! will not be set. //! \param[out] message_size A pointer to the place to put the size of \a message (in UTF-8 code units). Since //! \a message is null-terminated, this is not strictly needed, but can save you a \c strlen call. If //! this is \c nullptr, it will not be set. //! //! \retval kResultSuccess when the operation was successfully performed. //! \retval kResultNotFound when \a code is not in the known list of error codes. The current error is not set. Result(CARB_ABI* getCodeDescription)( Result code, char const** name, std::size_t* name_size, char const** message, std::size_t* message_size); // // Inline helper functions // //! Set this thread's current error to a pre-formatted string. //! //! In the case of any error of a call to this function, the current error will always be set with \a code. However, //! the associated message of the error will be the default for that code. //! //! \param code The code for the created error, which callers can potentially act on. //! \retval kResultSuccess when the error is set without issue. //! \retval kResultInvalidArgument if the \a message is \c nullptr and \a message_size is not 0 or if \a message is //! not \c nullptr and the \a message_size is 0. The current error will still be set to \a code, but the //! message will be the default for that code. //! \retval kResultOutOfMemory if the message is too large to fit in the string buffer, but the call to allocate //! memory failed. The current error is still set to \a code, but the error message is saved as the default //! for that code. //! \retval kResultTooMuchData If the provided message is too large to fit in any error message buffer (the current //! maximum is 64 KiB). The current error will be set with a truncated version of the provided message. Result setError(Result code) const noexcept; //! \copydoc setError(Result) const noexcept //! \param message The associated message containing additional details about the error. If empty, the default //! default message for \p code is used. Result setError(Result code, const std::string& message) const noexcept; //! \copydoc setError(Result) const noexcept //! \param message The associated message containing additional details about the error. If empty, the default //! default message for \p code is used. Result setError(Result code, const omni::string& message) const noexcept; //! \copydoc setError(Result) const noexcept //! \param message The associated message containing additional details about the error. If empty, the default //! default message for \p code is used. Result setError(Result code, cpp::string_view message) const noexcept; //! \copydoc setError(Result) const noexcept //! \param message The associated message containing additional details about the error. If this is \c nullptr, the //! default message for the \a code is used. Result setError(Result code, const char* message) const noexcept; //! \copydoc setError(Result) const noexcept //! \param message The associated message containing additional details about the error. If this is \c nullptr, the //! default message for the \a code is used. //! \param message_size The number of bytes \a message is. If \a message is \c nullptr, this must be \c 0. Result setError(Result code, const char* message, std::size_t message_size) const noexcept; // // Static Inline helper functions // //! Clears any error for the current thread. //! //! This function is syntactic sugar around `setErrorTo(nullptr)`. //! \see setErrorTo static void clearError() noexcept; //! Get a view of this thread's current error. //! //! This function is syntactic sugar around \ref viewCurrentError(). If further information is desired, including //! the current thread's error message, use \ref getErrorInfo(). //! //! \see viewCurrentError() takeCurrentError() getErrorInfo() //! \returns If the calling thread has a current error, then the return value will be the result code of the current //! error. If the calling thread has no current error, the result will be \ref omni::core::kResultSuccess. static Result getError() noexcept; //! Sets the current thread's error code value based on the value of `errno`. //! //! The following table is a mapping of `errno` values to \ref omni::core::Result codes: //! @rst //! .. list-table:: Mapping of errno value to Result codes //! :widths: 50 50 //! :header-rows: 1 //! //! * - errno values //! - :cpp:type:`omni::core::Result` //! * - ``0`` //! - :cpp:member:`omni::core::kResultSuccess` //! * - ``ENOSYS`` //! - :cpp:member:`omni::core::kResultNotImplemented` //! * - ``EACCES`` //! - :cpp:member:`omni::core::kResultAccessDenied` //! * - ``ENOMEM`` //! - :cpp:member:`omni::core::kResultOutOfMemory` //! * - ``EINVAL`` //! - :cpp:member:`omni::core::kResultInvalidArgument` //! * - ``EAGAIN`` //! - :cpp:member:`omni::core::kResultTryAgain` //! * - ``EWOULDBLOCK`` //! - :cpp:member:`omni::core::kResultTryAgain` //! * - ``EINTR`` //! - :cpp:member:`omni::core::kResultInterrupted` //! * - ``EEXIST`` //! - :cpp:member:`omni::core::kResultAlreadyExists` //! * - ``EPERM`` //! - :cpp:member:`omni::core::kResultInvalidOperation` //! * - ``ENOENT`` //! - :cpp:member:`omni::core::kResultNotFound` //! * - Everything else //! - :cpp:member:`omni::core::kResultFail` //! @endrst //! @note The value of `errno` remains consistent across the call to this function. static void setFromErrno(); #if CARB_PLATFORM_WINDOWS || defined(DOXYGEN_BUILD) //! (Windows only) Sets the current thread's error code value based on the value of `GetLastError()`. //! //! The following table is a mapping of Windows error values to \ref omni::core::Result codes: //! @rst //! .. list-table:: Mapping of Windows error value to Result code //! :widths: 50 50 //! :header-rows: 1 //! //! * - errno values //! - :cpp:type:`omni::core::Result` //! * - ``ERROR_SUCCESS`` //! - :cpp:member:`omni::core::kResultSuccess` //! * - ``ERROR_PATH_NOT_FOUND`` //! - :cpp:member:`omni::core::kResultNotFound` //! * - ``ERROR_FILE_NOT_FOUND`` //! - :cpp:member:`omni::core::kResultNotFound` //! * - ``ERROR_ACCESS_DENIED`` //! - :cpp:member:`omni::core::kResultAccessDenied` //! * - ``ERROR_ALREADY_EXISTS`` //! - :cpp:member:`omni::core::kResultAlreadyExists` //! * - ``ERROR_FILE_EXISTS`` //! - :cpp:member:`omni::core::kResultAlreadyExists` //! * - ``ERROR_OUTOFMEMORY`` //! - :cpp:member:`omni::core::kResultOutOfMemory` //! * - ``ERROR_NO_MORE_FILES`` //! - :cpp:member:`omni::core::kResultNoMoreItems` //! * - ``ERROR_NO_MORE_ITEMS`` //! - :cpp:member:`omni::core::kResultNoMoreItems` //! * - ``ERROR_NOT_IMPLEMENTED`` //! - :cpp:member:`omni::core::kResultNotImplemented` //! * - ``ERROR_WAIT_TIMEOUT`` //! - :cpp:member:`omni::core::kResultTryAgain` //! * - ``ERROR_ERROR_TIMEOUT`` //! - :cpp:member:`omni::core::kResultTryAgain` //! * - Everything else //! - :cpp:member:`omni::core::kResultFail` //! @endrst //! @note The value of `errno` remains consistent across the call to this function. static void setFromWinApiErrorCode(); #endif }; } // namespace carb //! Get the instance of the error-handling API. //! //! \param version The requested version of the error-handling API to return; this value will be set to the maximum //! supported version. //! \returns On success, this returns a pointer to the error API which is compatible with the provided \a version. #if CARB_REQUIRE_LINKED CARB_DYNAMICLINK carb::ErrorApi const* carbGetErrorApi(carb::Version* version); #else CARB_DYNAMICLINK carb::ErrorApi const* carbGetErrorApi(carb::Version* version) CARB_ATTRIBUTE(weak); #endif namespace carb { namespace detail { //! \fn getCarbErrorApiFunc //! Loads the function which loads the \c ErrorApi. CARB_DETAIL_DEFINE_DEFERRED_LOAD(getCarbErrorApiFunc, carbGetErrorApi, (carb::ErrorApi const* (*)(carb::Version*))); } // namespace detail inline ErrorApi const& ErrorApi::instance() noexcept { static ErrorApi const* const papi = []() -> ErrorApi const* { const Version expected_version = ErrorApi::getInterfaceDesc().version; Version found_version = expected_version; auto p = detail::getCarbErrorApiFunc()(&found_version); CARB_FATAL_UNLESS(p != nullptr, "Failed to load Error API for version this module was compiled against. This module was " "compiled with Error API %" PRIu32 ".%" PRIu32 ", but the maximum-supported version of the " "API in the linked %s is %" PRIu32 ".%" PRIu32, expected_version.major, expected_version.minor, CARB_PLATFORM_WINDOWS ? "carb.dll" : "libcarb.so", found_version.major, found_version.minor); return p; }(); return *papi; } inline void ErrorApi::clearError() noexcept { auto r = instance().setErrorTo(nullptr); CARB_UNUSED(r); CARB_ASSERT(r == omni::core::kResultSuccess); } inline Result ErrorApi::getError() noexcept { Result r; instance().viewCurrentError(&r); return r; } inline Result ErrorApi::setError(Result code) const noexcept { return internalSetError(code, nullptr, 0); } inline Result ErrorApi::setError(Result code, const std::string& message) const noexcept { return internalSetError(code, message.c_str(), message.length()); } inline Result ErrorApi::setError(Result code, const omni::string& message) const noexcept { return internalSetError(code, message.c_str(), message.length()); } inline Result ErrorApi::setError(Result code, cpp::string_view message) const noexcept { return internalSetError(code, message.data(), message.length()); } inline Result ErrorApi::setError(Result code, const char* message) const noexcept { return internalSetError(code, message, message ? std::strlen(message) : 0); } inline Result ErrorApi::setError(Result code, const char* message, std::size_t message_size) const noexcept { CARB_ASSERT(message_size != size_t(-1)); return internalSetError(code, message, message_size); } inline void ErrorApi::setFromErrno() { auto e = errno; switch (e) { case 0: instance().setError(kResultSuccess); break; case ENOSYS: instance().setError(kResultNotImplemented); break; case EACCES: instance().setError(kResultAccessDenied); break; case ENOMEM: instance().setError(kResultOutOfMemory); break; case EINVAL: instance().setError(kResultInvalidArgument); break; case EAGAIN: #if !CARB_POSIX // This is different on Windows but the same for POSIX case EWOULDBLOCK: #endif instance().setError(kResultTryAgain); break; case EINTR: instance().setError(kResultInterrupted); break; case EEXIST: instance().setError(kResultAlreadyExists); break; case EPERM: instance().setError(kResultInvalidOperation); break; case ENOENT: instance().setError(kResultNotFound); break; default: instance().setError(kResultFail, extras::convertErrnoToMessage(e)); break; } errno = e; } #if CARB_PLATFORM_WINDOWS inline void ErrorApi::setFromWinApiErrorCode() { auto e = GetLastError(); switch (e) { case CARBWIN_ERROR_SUCCESS: instance().setError(kResultSuccess); break; case CARBWIN_ERROR_FILE_NOT_FOUND: case CARBWIN_ERROR_PATH_NOT_FOUND: instance().setError(kResultNotFound); break; case CARBWIN_ERROR_ACCESS_DENIED: instance().setError(kResultAccessDenied); break; case CARBWIN_ERROR_ALREADY_EXISTS: case CARBWIN_ERROR_FILE_EXISTS: instance().setError(kResultAlreadyExists); break; case CARBWIN_ERROR_OUTOFMEMORY: instance().setError(kResultOutOfMemory); break; case CARBWIN_ERROR_NO_MORE_FILES: case CARBWIN_ERROR_NO_MORE_ITEMS: instance().setError(kResultNoMoreItems); break; case CARBWIN_ERROR_CALL_NOT_IMPLEMENTED: instance().setError(kResultNotImplemented); break; case CARBWIN_WAIT_TIMEOUT: case CARBWIN_ERROR_TIMEOUT: instance().setError(kResultTryAgain); break; default: instance().setError(kResultFail, extras::convertWinApiErrorCodeToMessage(e)); break; } SetLastError(e); } #endif } // namespace carb
omniverse-code/kit/include/carb/BindingsUtils.h
// Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file //! //! @brief Utilities for script bindings #pragma once #include "ClientUtils.h" #include "Defines.h" #include "Format.h" #include "Framework.h" #include "InterfaceUtils.h" #include "ObjectUtils.h" #include "assert/AssertUtils.h" #include "logging/Log.h" #include "profiler/Profile.h" #include <functional> #include <sstream> #include <string> #include <unordered_map> namespace carb { /** * Wraps an interface function into a `std::function<>`. * * @tparam InterfaceType The Carbonite interface type (i.e. `logging::ILogging`); can be inferred. * @tparam ReturnType The return type of @p p; can be inferred. * @tparam Args Arguments of @p p; can be inferred. * @param p The interface function to wrap. * @returns A `std::function<ReturnType(InterfaceType&, Args...)>` wrapper around @p p. */ template <typename InterfaceType, typename ReturnType, typename... Args> auto wrapInterfaceFunction(ReturnType (*InterfaceType::*p)(Args...)) -> std::function<ReturnType(InterfaceType&, Args...)> { return [p](InterfaceType& c, Args... args) { return (c.*p)(args...); }; } /** * Wraps an interface function into a `std::function<>`. This version captures the interface so that it does not need to * be passed to every invocation. * * @tparam InterfaceType The Carbonite interface type (i.e. `logging::ILogging`); can be inferred. * @tparam ReturnType The return type of @p p; can be inferred. * @tparam Args Arguments of @p p; can be inferred. * @param c The Carbonite interface to capture as part of the wrapper function. * @param p The interface function to wrap. * @returns A `std::function<ReturnType(Args...)>` wrapper around @p p. */ template <typename InterfaceType, typename ReturnType, typename... Args> auto wrapInterfaceFunction(const InterfaceType* c, ReturnType (*InterfaceType::*p)(Args...)) -> std::function<ReturnType(Args...)> { return [c, p](Args... args) { return (c->*p)(args...); }; } /** * A helper function for \ref Framework::tryAcquireInterface() that attempts to load plugins if not found. * * @tparam InterfaceType The interface to acquire (i.e. `assets::IAssets`). Must be specified and cannot be inferred. * @param pluginName An optional specific plugin to acquire the interface from. If `nullptr`, the default plugin for the * given InterfaceType is used. * @returns A pointer to the interface. * @throws std::runtime_error if the interface cannot be acquired and exceptions are enabled, otherwise this error * condition results in a \ref CARB_FATAL_UNLESS() assertion. */ template <typename InterfaceType> InterfaceType* acquireInterfaceForBindings(const char* pluginName = nullptr) { carb::Framework* framework = carb::getFramework(); InterfaceType* iface = framework->tryAcquireInterface<InterfaceType>(pluginName); if (!iface) { // Try load plugins with default desc (all of them) carb::PluginLoadingDesc desc = carb::PluginLoadingDesc::getDefault(); framework->loadPlugins(desc); iface = framework->tryAcquireInterface<InterfaceType>(pluginName); if (!iface) { // somehow this header gets picked up by code compiled by -fno-exceptions #if !CARB_EXCEPTIONS_ENABLED OMNI_FATAL_UNLESS(iface, "Failed to acquire interface: '%s' (pluginName: '%s')", InterfaceType::getInterfaceDesc().name, pluginName ? pluginName : "nullptr"); #else throw std::runtime_error(fmt::format("Failed to acquire interface: {} (pluginName: {})", InterfaceType::getInterfaceDesc().name, pluginName ? pluginName : "nullptr")); #endif } } return iface; } /** * A helper function for \ref carb::getCachedInterface() that throws on error. * * @tparam InterfaceType The interface to acquire (i.e. `assets::IAssets`). Must be specified and cannot be inferred. * @returns A pointer to the interface. * @throws std::runtime_error if the interface cannot be acquired and exceptions are enabled, otherwise this error * condition results in a \ref CARB_FATAL_UNLESS() assertion. */ template <typename InterfaceType> InterfaceType* getCachedInterfaceForBindings() { InterfaceType* iface = carb::getCachedInterface<InterfaceType>(); if (CARB_UNLIKELY(!iface)) { // somehow this header gets picked up by code compiled by -fno-exceptions #if !CARB_EXCEPTIONS_ENABLED OMNI_FATAL_UNLESS(iface, "Failed to acquire cached interface: '%s'", InterfaceType::getInterfaceDesc().name); #else throw std::runtime_error( fmt::format("Failed to acquire cached interface: {}", InterfaceType::getInterfaceDesc().name)); #endif } return iface; } /** * Helper for \ref Framework::tryAcquireInterfaceFromLibrary() that throws on error. * * @tparam InterfaceType The interface to acquire (i.e. `assets::IAssets`). Must be specified and cannot be inferred. * @param libraryPath The library path to acquire the interface from. Must be specified. May be relative or absolute. * @returns A pointer to the interface. * @throws std::runtime_error if the interface cannot be acquired and exceptions are enabled, otherwise this error * condition results in a \ref CARB_FATAL_UNLESS() assertion. */ template <typename InterfaceType> InterfaceType* acquireInterfaceFromLibraryForBindings(const char* libraryPath) { carb::Framework* framework = carb::getFramework(); InterfaceType* iface = framework->tryAcquireInterfaceFromLibrary<InterfaceType>(libraryPath); if (!iface) { // somehow this header gets picked up by code compiled by -fno-exceptions #if !CARB_EXCEPTIONS_ENABLED OMNI_FATAL_UNLESS( "Failed to acquire interface: '%s' from: '%s')", InterfaceType::getInterfaceDesc().name, libraryPath); #else throw std::runtime_error(fmt::format( "Failed to acquire interface: {} from: {})", InterfaceType::getInterfaceDesc().name, libraryPath)); #endif } return iface; } /** * Acquires the Carbonite Framework for a script binding. * * @note This is automatically called by \ref FrameworkInitializerForBindings::FrameworkInitializerForBindings() from * \ref CARB_BINDINGS(). * * @param scriptLanguage The script language that this binding works with (i.e. "python"). This binding is registered * as via `carb::getFramework()->registerScriptBinding(BindingType::Binding, g_carbClientName, scriptLanguage)`. * @returns A pointer to the Carbonite \ref Framework, or `nullptr` on error (i.e. version mismatch). * @see Framework::registerScriptBinding() */ inline Framework* acquireFrameworkForBindings(const char* scriptLanguage) { // Acquire framework and set into global variable // Is framework was previously invalid, we are the first who calling it and it will be created during acquire. // Register builtin plugin in that case const bool firstStart = !isFrameworkValid(); Framework* f = acquireFramework(g_carbClientName); if (!f) return nullptr; g_carbFramework = f; // Register as binding for the given script language f->registerScriptBinding(BindingType::Binding, g_carbClientName, scriptLanguage); // Starting up logging if (firstStart) detail::registerBuiltinLogging(f); logging::registerLoggingForClient(); // Starting up filesystem and profiling if (firstStart) { detail::registerBuiltinFileSystem(f); detail::registerBuiltinAssert(f); detail::registerBuiltinThreadUtil(f); } profiler::registerProfilerForClient(); assert::registerAssertForClient(); l10n::registerLocalizationForClient(); return f; } /** * Releases the Carbonite Framework for a script binding. * * @note This is automatically called by the \ref FrameworkInitializerForBindings destructor from \ref CARB_BINDINGS(). */ inline void releaseFrameworkForBindings() { if (isFrameworkValid()) { profiler::deregisterProfilerForClient(); logging::deregisterLoggingForClient(); assert::deregisterAssertForClient(); l10n::deregisterLocalizationForClient(); // Leave g_carbFramework intact here since the framework itself remains valid; we are just signaling our end of // using it. There may be some static destructors (i.e. CachedInterface) that still need to use it. } else { // The framework became invalid while we were loaded. g_carbFramework = nullptr; } } /** * A helper class used by \ref CARB_BINDINGS() to acquire and release the \ref Framework for a binding. */ class FrameworkInitializerForBindings { public: /** * Acquires the Carbonite \ref Framework for this binding module. * * @note Calls \ref acquireFrameworkForBindings() and \ref OMNI_CORE_START() if the ONI core is not already started. * @param scriptLanguage The script language that this binding works with. */ FrameworkInitializerForBindings(const char* scriptLanguage = "python") { acquireFrameworkForBindings(scriptLanguage); m_thisModuleStartedOmniCore = !omniGetTypeFactoryWithoutAcquire(); if (m_thisModuleStartedOmniCore) { // at this point, the core should already be started by the omniverse host executable (i.e. app). however, // if we're in the python native interpreter, it will not automatically startup the core. here we account // for this situation by checking if the core is started, and if not, start it. // // OMNI_CORE_START internally reference counts the start/stop calls, so one would think we could always make // this call (with a corresponding call to OMNI_CORE_STOP in the destructor). // // however, the Python interpreter doesn't like unloading .pyd files, meaning our destructor will not be // called. // // this shouldn't be an issue, unless the host expects to be able to load, unload, and then reload the core. // the result here would be the internal reference count would get confused, causing the core to never be // unloaded. // // we don't expect apps to reload the core, but our unit tests do. so, here we only let python increment // the ref count if we think its the first entity to start the core (i.e. running in the interpreter). OMNI_CORE_START(nullptr); } omni::structuredlog::addModulesSchemas(); } /** * Releases the Carbonite \ref Framework for this binding module. * @note Calls \ref OMNI_CORE_STOP() if the constructor initialized the ONI core, and * \ref releaseFrameworkForBindings(). */ ~FrameworkInitializerForBindings() { if (m_thisModuleStartedOmniCore) { OMNI_CORE_STOP_FOR_BINDINGS(); m_thisModuleStartedOmniCore = false; } releaseFrameworkForBindings(); } //! A boolean indicating whether the constructor called \ref OMNI_CORE_START(). bool m_thisModuleStartedOmniCore; }; /** * A helper function for combining two hash values. * * Effectively: * ```cpp * std::size_t res = 0; * using std::hash; * res = carb::hashCombine(res, hash<T1>{}(t1)); * res = carb::hashCombine(res, hash<T2>{}(t2)); * return res; * ``` * @tparam T1 A type to hash. * @tparam T2 A type to hash. * @param t1 A value to hash. * @param t2 A value to hash. * @returns A hash combined from @p t1 and @p t2. * @see hashCombine() */ template <class T1, class T2> inline size_t hashPair(T1 t1, T2 t2) { std::size_t res = 0; using std::hash; res = carb::hashCombine(res, hash<T1>{}(t1)); res = carb::hashCombine(res, hash<T2>{}(t2)); return res; } #ifndef DOXYGEN_SHOULD_SKIP_THIS /** * Helper class to store and manage lifetime of `std::function<>` for script bindings. * * It allocates std::function copy on a heap, prolonging its lifetime. That allows passing std::function as user data * into interface subscription functions. You need to associate it with some key (provide key type with a template * KeyT), usually it is some kind of Subscription Id. When unsubscribing call ScriptCallbackRegistry::removeAndDestroy * with corresponding key. The usage: * * ```cpp * std::function<int(float, char)> myFunc; * static ScriptCallbackRegistry<size_t, int, float, char> s_registry; * std::function<int(float, char)>* myFuncCopy = s_registry.create(myFunc); * // myFuncCopy can now passed into C API as user data * s_registry.add(id, myFuncCopy); * // ... * s_registry.removeAndDestroy(id); * ``` */ template <class KeyT, typename ReturnT, typename... Args> class ScriptCallbackRegistry { public: using FuncT = std::function<ReturnT(Args...)>; static FuncT* create(const FuncT& f) { return new FuncT(f); } static void destroy(FuncT* f) { delete f; } void add(const KeyT& key, FuncT* ptr) { if (!m_map.insert({ key, ptr }).second) { CARB_LOG_ERROR("Scripting callback with that key already exists."); } } bool tryRemoveAndDestroy(const KeyT& key) { auto it = m_map.find(key); if (it != m_map.end()) { destroy(it->second); m_map.erase(it); return true; } return false; } void removeAndDestroy(const KeyT& key) { if (!tryRemoveAndDestroy(key)) { CARB_LOG_ERROR("Removing unknown scripting callback."); } } private: std::unordered_map<KeyT, FuncT*> m_map; }; template <typename ClassT, typename ObjectT, typename... Args> auto wrapInStealObject(ObjectT* (ClassT::*f)(Args...)) { return [f](ClassT* c, Args... args) { return carb::stealObject<ObjectT>((c->*f)(args...)); }; } #endif } // namespace carb /** * Declare a compilation unit as script language bindings. * * @param clientName The string to pass to CARB_GLOBALS which will be used as `g_carbClientName` for the module. * @param ... Arguments passed to \ref carb::FrameworkInitializerForBindings::FrameworkInitializerForBindings(), * typically the script language. */ #define CARB_BINDINGS(clientName, ...) \ CARB_GLOBALS(clientName) \ carb::FrameworkInitializerForBindings g_carbFrameworkInitializerForBindings{ __VA_ARGS__ }; /** * Declare a compilation unit as script language bindings. * * @param clientName_ The string to pass to CARB_GLOBALS_EX which will be used as `g_carbClientName` for the module. * @param desc_ The description passed to `omni::LogChannel` for the default log channel. * @param ... Arguments passed to \ref carb::FrameworkInitializerForBindings::FrameworkInitializerForBindings(), * typically the script language. */ #define CARB_BINDINGS_EX(clientName_, desc_, ...) \ CARB_GLOBALS_EX(clientName_, desc_) \ carb::FrameworkInitializerForBindings g_carbFrameworkInitializerForBindings{ __VA_ARGS__ };
omniverse-code/kit/include/carb/InterfaceUtils.h
// Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file //! //! @brief Utilities for Carbonite Interface management. #pragma once #include "Framework.h" #include "cpp/Atomic.h" namespace carb { #ifndef DOXYGEN_BUILD namespace detail { template <typename InterfaceT, const char* PluginName> class CachedInterface { public: constexpr CachedInterface() = default; ~CachedInterface() { reset(); } InterfaceT* get() { auto iface = m_cachedInterface.load(std::memory_order_relaxed); if (CARB_LIKELY(iface)) { return iface; } return getInternal(); } void reset() { ::carb::Framework* framework = ::carb::getFramework(); if (!framework) { // Framework no longer valid or already unloaded. return; } auto iface = m_cachedInterface.exchange(nullptr, std::memory_order_relaxed); if (iface) { framework->removeReleaseHook(iface, sReleaseHook, this); } framework->removeReleaseHook(nullptr, sFrameworkReleased, this); m_reqState.store(NotRequested, std::memory_order_release); m_reqState.notify_all(); } private: enum RequestState { NotRequested, Requesting, Finished, }; std::atomic<InterfaceT*> m_cachedInterface{ nullptr }; carb::cpp::atomic<RequestState> m_reqState{ NotRequested }; static void sReleaseHook(void* iface, void* this_) { static_cast<CachedInterface*>(this_)->releaseHook(iface); } static void sFrameworkReleased(void*, void* this_) { // The Framework is fully released. Reset our request state. static_cast<CachedInterface*>(this_)->reset(); } void releaseHook(void* iface) { // Clear the cached interface pointer, but don't fully reset. Further attempts to get() will proceed to // getInternal(), but will not attempt to acquire the interface again. CARB_ASSERT(iface == m_cachedInterface); CARB_UNUSED(iface); m_cachedInterface.store(nullptr, std::memory_order_relaxed); } CARB_NOINLINE InterfaceT* getInternal() { ::carb::Framework* framework = ::carb::getFramework(); if (!framework) { return nullptr; } RequestState state = m_reqState.load(std::memory_order_acquire); while (state != Finished) { if (state == NotRequested && m_reqState.compare_exchange_weak( state, Requesting, std::memory_order_relaxed, std::memory_order_relaxed)) { InterfaceT* iface = framework->tryAcquireInterface<InterfaceT>(PluginName); if (!iface) { // Failed to acquire. Reset to initial state m_reqState.store(NotRequested, std::memory_order_release); m_reqState.notify_all(); return nullptr; } if (CARB_UNLIKELY(!framework->addReleaseHook(iface, sReleaseHook, this))) { // This could only happen if something released the interface between us acquiring it and adding // the release hook. Repeat the process again. state = NotRequested; m_reqState.store(state, std::memory_order_release); m_reqState.notify_all(); continue; } bool b = framework->addReleaseHook(nullptr, sFrameworkReleased, this); CARB_UNUSED(b); CARB_ASSERT(b); m_cachedInterface.store(iface, std::memory_order_relaxed); m_reqState.store(Finished, std::memory_order_release); m_reqState.notify_all(); return iface; } else if (state == Requesting) { m_reqState.wait(state, std::memory_order_relaxed); state = m_reqState.load(std::memory_order_acquire); } } return m_cachedInterface.load(std::memory_order_relaxed); } }; template <class T, const char* PluginName> CachedInterface<T, PluginName>& cachedInterface() { static CachedInterface<T, PluginName> cached; return cached; } } // namespace detail #endif /** * Retrieves the specified interface as if from Framework::tryAcquireInterface() and caches it for fast retrieval. * * If the interface is released with Framework::releaseInterface(), the cached interface will be automatically * cleared. Calls to getCachedInterface() after this point will return `nullptr`. In order for getCachedInterface() to * call Framework::tryAcquireInterface() again, first call resetCachedInterface(). * * @note Releasing the Carbonite Framework with carb::releaseFramework() automatically calls resetCachedInterface(). * * @tparam InterfaceT The interface class to retrieve. * @tparam PluginName The name of a specific plugin to keep cached. Note: this must be a global char array or `nullptr`. * @returns The loaded and acquired interface class if successfully acquired through Framework::tryAcquireInterface(), * or a previously cached value. If the interface could not be found, or has been released with releaseFramework(), * `nullptr` is returned. */ template <typename InterfaceT, const char* PluginName = nullptr> CARB_NODISCARD inline InterfaceT* getCachedInterface() { return ::carb::detail::cachedInterface<InterfaceT, PluginName>().get(); } /** * Resets any previously-cached interface of the given type and allows it to be acquired again. * * @note This does NOT *release* the interface as if Framework::releaseInterface() were called. It merely resets the * cached state so that getCachedInterface() will call Framework::tryAcquireInterface() again. * * @tparam InterfaceT The type of interface class to evict from cache. * @tparam PluginName The name of a specific plugin that is cached. Note: this must be a global char array or `nullptr`. */ template <typename InterfaceT, const char* PluginName = nullptr> inline void resetCachedInterface() { ::carb::detail::cachedInterface<InterfaceT, PluginName>().reset(); } } // namespace carb
omniverse-code/kit/include/carb/CarbWindows.h
// Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // /** * @file * @brief Allow access to specific parts of Windows API without including Windows.h * * This file replaces `#include <Windows.h>` for header files. *Windows.h* is monolithic and defines some oft-used * names like `min`, `max`, `count`, etc. Directives like `NOMINMAX`, `WIN32_LEAN_AND_MEAN`, and `WIN32_EXTRA_LEAN` can * help, but still leave the global namespace much more polluted than is desired. Note that the MSVC CRT does not * include *Windows.h* anywhere; instead special reserved naming conventions (such as `_PrefixedUpper`) exist to allow * the CRT to define special functions that are called by the CRT, but the implementation of those functions * (that utilize the Windows API) are hidden away in static or dynamic libraries. However, for Carbonite, since the * goal is to not require additional linking for header utilities, there is no ability for `carb` header files to * similarly hide Windows API usage in libraries. * * Anything from the Windows API that Carbonite relies upon should be defined in this file, and should not conflict in * the event that *Windows.h* is included. * * Rules for adding things to this file: * 1. Do not `#define` anything that is defined in *Windows.h*. Instead, prefix the macro name with `CARBWIN_` but give * it the same value. This is to prevent errors if *Windows.h* is included since macros may not be defined more than * once. * 2. Typedef's should be specified _exactly_ as in Windows.h * 3. Structs can be forward-declared only, otherwise definitions will conflict with *Windows.h*. If a struct * definition is required, it should be prefixed with `CARBWIN_` at the bottom of this file. *TestCarbWindows.cpp* * should then `static_assert` that the size and member offsets are the same as the *Windows.h* version. * 4. Enums cannot be redefined. Therefore, enum values needed should be changed to `#define` and prefixed with * `CARBWIN_`. * * These rules will allow compilation units to `#include <Windows.h>` before or after this file. */ #pragma once #include "Defines.h" CARB_IGNOREWARNING_MSC_WITH_PUSH(4201) // nonstandard extension used: nameless struct/union // clang-format off #if CARB_PLATFORM_WINDOWS && !defined(DOXYGEN_BUILD) #ifndef __cplusplus #define CARBWIN_NONAMELESSUNION // use strict ANSI standard #else extern "C" { #endif // Define these temporarily so that they don't conflict with Windows.h. They are #undef'd at the bottom #define CARBWIN_WINBASEAPI __declspec(dllimport) #ifndef WINBASEAPI #define WINBASEAPI CARBWIN_WINBASEAPI #define CARBWIN_WINBASEAPI_DEFINED 1 #endif #define CARBWIN_WINAPI __stdcall #ifndef WINAPI #define WINAPI CARBWIN_WINAPI #define CARBWIN_WINAPI_DEFINED 1 #endif #define CARBWIN_SHSTDAPI extern "C" __declspec(dllimport) HRESULT WINAPI #define CARBWIN_SHSTDAPI_(type) extern "C" __declspec(dllimport) type WINAPI #ifndef SHSTDAPI #define SHSTDAPI CARBWIN_SHSTDAPI #define SHSTDAPI_(type) CARBWIN_SHSTDAPI_(type) #define CARBWIN_SHSTDAPI_DEFINED 1 #endif #define CARBWIN_APIENTRY __stdcall #ifndef APIENTRY #define APIENTRY CARBWIN_APIENTRY #define CARBWIN_APIENTRY_DEFINED 1 #endif #define CARBWIN_WINADVAPI __declspec(dllimport) #ifndef WINADVAPI #define WINADVAPI CARBWIN_WINADVAPI #define CARBWIN_WINADVAPI_DEFINED 1 #endif /////////////////////////////////////////////////////////////////////////////// // #defines. Should be prefixed with CARBWIN_ and defined exactly the same as // their Windows.h counterpart. // from minwindef.h #define CARBWIN_CONST const #define CARBWIN_FALSE 0 #define CARBWIN_TRUE 1 #define CARBWIN_MAX_PATH 260 // from winnt.h #ifndef CARBWIN_DUMMYUNIONNAME #if defined(CARBWIN_NONAMELESSUNION) || !defined(_MSC_EXTENSIONS) #define CARBWIN_DUMMYUNIONNAME u #else #define CARBWIN_DUMMYUNIONNAME #endif #endif #define CARBWIN_STATUS_SUCCESS ((DWORD )0x00000000L) #define CARBWIN_STATUS_TIMEOUT ((DWORD )0x00000102L) #define CARBWIN_MEM_COMMIT 0x00001000 #define CARBWIN_MEM_RESERVE 0x00002000 #define CARBWIN_MEM_DECOMMIT 0x00004000 #define CARBWIN_MEM_RELEASE 0x00008000 #define CARBWIN_MEM_FREE 0x00010000 #define CARBWIN_MEM_PRIVATE 0x00020000 #define CARBWIN_MEM_MAPPED 0x00040000 #define CARBWIN_MEM_RESET 0x00080000 #define CARBWIN_MEM_TOP_DOWN 0x00100000 #define CARBWIN_MEM_WRITE_WATCH 0x00200000 #define CARBWIN_MEM_PHYSICAL 0x00400000 #define CARBWIN_MEM_LARGE_PAGES 0x20000000 #define CARBWIN_MEM_4MB_PAGES 0x80000000 #define CARBWIN_TOKEN_QUERY 0x0008 #ifndef CARBWIN_DUMMYSTRUCTNAME #if defined(CARBWIN_NONAMELESSUNION) || !defined(_MSC_EXTENSIONS) #define CARBWIN_DUMMYSTRUCTNAME s #else #define CARBWIN_DUMMYSTRUCTNAME #endif #endif #define CARBWIN_VOID void #define CARBWIN_DLL_PROCESS_ATTACH 1 #define CARBWIN_DLL_THREAD_ATTACH 2 #define CARBWIN_DLL_THREAD_DETACH 3 #define CARBWIN_DLL_PROCESS_DETACH 0 #define CARBWIN_VOID void #define CARBWIN_STATUS_WAIT_0 ((DWORD)0x00000000L) #define CARBWIN_RTL_SRWLOCK_INIT {0} #define CARBWIN_MAKELANGID(p, s) ((((WORD )(s)) << 10) | (WORD )(p)) #define CARBWIN_MAKELCID(lgid, srtid) ((DWORD)((((DWORD)((WORD)(srtid))) << 16) | ((DWORD)((WORD)(lgid))))) #define CARBWIN_LANG_NEUTRAL 0x00 #define CARBWIN_LANG_INVARIANT 0x7f #define CARBWIN_SUBLANG_DEFAULT 0x01 #define CARBWIN_SUBLANG_NEUTRAL 0x00 #define CARBWIN_SORT_DEFAULT 0x0 #define CARBWIN_PAGE_READONLY 0x02 #define CARBWIN_PAGE_READWRITE 0x04 #define CARBWIN_STANDARD_RIGHTS_REQUIRED (0x000F0000L) #define CARBWIN_SECTION_QUERY 0x0001 #define CARBWIN_SECTION_MAP_WRITE 0x0002 #define CARBWIN_SECTION_MAP_READ 0x0004 #define CARBWIN_SECTION_MAP_EXECUTE 0x0008 #define CARBWIN_SECTION_EXTEND_SIZE 0x0010 #define CARBWIN_SECTION_MAP_EXECUTE_EXPLICIT 0x0020 #define CARBWIN_SECTION_ALL_ACCESS (CARBWIN_STANDARD_RIGHTS_REQUIRED|CARBWIN_SECTION_QUERY|\ CARBWIN_SECTION_MAP_WRITE | \ CARBWIN_SECTION_MAP_READ | \ CARBWIN_SECTION_MAP_EXECUTE | \ CARBWIN_SECTION_EXTEND_SIZE) #define CARBWIN_LOCALE_INVARIANT \ (CARBWIN_MAKELCID(CARBWIN_MAKELANGID(CARBWIN_LANG_INVARIANT, CARBWIN_SUBLANG_NEUTRAL), CARBWIN_SORT_DEFAULT)) #define CARBWIN_LCMAP_LOWERCASE 0x00000100 #define CARBWIN_LCMAP_UPPERCASE 0x00000200 #define CARBWIN_LCMAP_TITLECASE 0x00000300 #define CARBWIN_LCMAP_SORTKEY 0x00000400 #define CARBWIN_LCMAP_BYTEREV 0x00000800 #define CARBWIN_LCMAP_HIRAGANA 0x00100000 #define CARBWIN_LCMAP_KATAKANA 0x00200000 #define CARBWIN_LCMAP_HALFWIDTH 0x00400000 #define CARBWIN_LCMAP_FULLWIDTH 0x00800000 #define CARBWIN_LCMAP_LINGUISTIC_CASING 0x01000000 #define CARBWIN_LCMAP_SIMPLIFIED_CHINESE 0x02000000 #define CARBWIN_LCMAP_TRADITIONAL_CHINESE 0x04000000 #define CARBWIN_LCMAP_SORTHANDLE 0x20000000 #define CARBWIN_LCMAP_HASH 0x00040000 #define CARBWIN_FILE_SHARE_READ 0x00000001 #define CARBWIN_FILE_SHARE_WRITE 0x00000002 #define CARBWIN_FILE_SHARE_DELETE 0x00000004 #define CARBWIN_GENERIC_READ (0x80000000L) #define CARBWIN_GENERIC_WRITE (0x40000000L) #define CARBWIN_GENERIC_EXECUTE (0x20000000L) #define CARBWIN_GENERIC_ALL (0x10000000L) #define CARBWIN_EXCEPTION_NONCONTINUABLE 0x1 #define CARBWIN_EVENTLOG_SEQUENTIAL_READ 0x0001 #define CARBWIN_EVENTLOG_SEEK_READ 0x0002 #define CARBWIN_EVENTLOG_FORWARDS_READ 0x0004 #define CARBWIN_EVENTLOG_BACKWARDS_READ 0x0008 #define CARBWIN_EVENTLOG_SUCCESS 0x0000 #define CARBWIN_EVENTLOG_ERROR_TYPE 0x0001 #define CARBWIN_EVENTLOG_WARNING_TYPE 0x0002 #define CARBWIN_EVENTLOG_INFORMATION_TYPE 0x0004 #define CARBWIN_EVENTLOG_AUDIT_SUCCESS 0x0008 #define CARBWIN_EVENTLOG_AUDIT_FAILURE 0x0010 #define CARBWIN_EVENTLOG_START_PAIRED_EVENT 0x0001 #define CARBWIN_EVENTLOG_END_PAIRED_EVENT 0x0002 #define CARBWIN_EVENTLOG_END_ALL_PAIRED_EVENTS 0x0004 #define CARBWIN_EVENTLOG_PAIRED_EVENT_ACTIVE 0x0008 #define CARBWIN_EVENTLOG_PAIRED_EVENT_INACTIVE 0x0010 #define CARBWIN_FILE_ATTRIBUTE_READONLY 0x00000001 #define CARBWIN_FILE_ATTRIBUTE_HIDDEN 0x00000002 #define CARBWIN_FILE_ATTRIBUTE_SYSTEM 0x00000004 #define CARBWIN_FILE_ATTRIBUTE_DIRECTORY 0x00000010 #define CARBWIN_FILE_ATTRIBUTE_ARCHIVE 0x00000020 #define CARBWIN_FILE_ATTRIBUTE_DEVICE 0x00000040 #define CARBWIN_FILE_ATTRIBUTE_TEMPORARY 0x00000100 #define CARBWIN_FILE_ATTRIBUTE_SPARSE_FILE 0x00000200 #define CARBWIN_FILE_ATTRIBUTE_REPARSE_POINT 0x00000400 #define CARBWIN_FILE_ATTRIBUTE_COMPRESSED 0x00000800 #define CARBWIN_FILE_ATTRIBUTE_OFFLINE 0x00001000 #define CARBWIN_FILE_ATTRIBUTE_NOT_CONTENT_INDEXED 0x00002000 #define CARBWIN_FILE_ATTRIBUTE_ENCRYPTED 0x00004000 #define CARBWIN_FILE_ATTRIBUTE_INTEGRITY_STREAM 0x00008000 #define CARBWIN_FILE_ATTRIBUTE_VIRTUAL 0x00010000 #define CARBWIN_FILE_ATTRIBUTE_NO_SCRUB_DATA 0x00020000 #define CARBWIN_FILE_ATTRIBUTE_EA 0x00040000 #define CARBWIN_FILE_ATTRIBUTE_PINNED 0x00080000 #define CARBWIN_FILE_ATTRIBUTE_UNPINNED 0x00100000 #define CARBWIN_FILE_ATTRIBUTE_RECALL_ON_OPEN 0x00040000 #define CARBWIN_FILE_ATTRIBUTE_RECALL_ON_DATA_ACCESS 0x00400000 #define CARBWIN_FILE_ATTRIBUTE_STRICTLY_SEQUENTIAL 0x20000000 // from intsafe.h #define CARBWIN_S_OK ((HRESULT)0L) // from handleapi.h #define CARBWIN_INVALID_HANDLE_VALUE ((HANDLE)(LONG_PTR)-1) // from winbase.h #define CARBWIN_WAIT_OBJECT_0 ((CARBWIN_STATUS_WAIT_0) + 0) #define CARBWIN_INFINITE 0xFFFFFFFF #define CARBWIN_FORMAT_MESSAGE_ALLOCATE_BUFFER 0x00000100 #define CARBWIN_FORMAT_MESSAGE_IGNORE_INSERTS 0x00000200 #define CARBWIN_FORMAT_MESSAGE_FROM_SYSTEM 0x00001000 #define CARBWIN_FIBER_FLAG_FLOAT_SWITCH 0x1 // context switch floating point #define CARBWIN_FILE_ATTRIBUTE_NORMAL 0x80 #define CARBWIN_FILE_FLAG_BACKUP_SEMANTICS 0x02000000 // from winerror.h #define CARBWIN_ERROR_SUCCESS 0L #define CARBWIN_ERROR_FILE_NOT_FOUND 2L #define CARBWIN_ERROR_PATH_NOT_FOUND 3L #define CARBWIN_ERROR_ACCESS_DENIED 5L #define CARBWIN_ERROR_OUTOFMEMORY 14L #define CARBWIN_ERROR_NO_MORE_FILES 18L #define CARBWIN_ERROR_FILE_EXISTS 80L #define CARBWIN_ERROR_CALL_NOT_IMPLEMENTED 120L #define CARBWIN_ERROR_INSUFFICIENT_BUFFER 122L #define CARBWIN_ERROR_MOD_NOT_FOUND 126L #define CARBWIN_ERROR_ALREADY_EXISTS 183L #define CARBWIN_ERROR_FILENAME_EXCED_RANGE 206L #define CARBWIN_WAIT_TIMEOUT 258L #define CARBWIN_ERROR_NO_MORE_ITEMS 259L #define CARBWIN_ERROR_TIMEOUT 1460L #define CARBWIN_SUCCEEDED(hr) (((HRESULT)(hr)) >= 0) #define CARBWIN_FAILED(hr) (((HRESULT)(hr)) < 0) // from synchapi.h #define CARBWIN_SRWLOCK_INIT CARBWIN_RTL_SRWLOCK_INIT // from memoryapi.h #define CARBWIN_FILE_MAP_READ CARBWIN_SECTION_MAP_READ #define CARBWIN_FILE_MAP_ALL_ACCESS CARBWIN_SECTION_ALL_ACCESS // from fileapi.h #define CARBWIN_CREATE_NEW 1 #define CARBWIN_CREATE_ALWAYS 2 #define CARBWIN_OPEN_EXISTING 3 #define CARBWIN_OPEN_ALWAYS 4 #define CARBWIN_TRUNCATE_EXISTING 5 #define CARBWIN_INVALID_FILE_ATTRIBUTES ((DWORD)-1) // from minwinbase.h #define CARBWIN_LOCKFILE_FAIL_IMMEDIATELY 0x00000001 #define CARBWIN_LOCKFILE_EXCLUSIVE_LOCK 0x00000002 // from processthreadsapi.h #define CARBWIN_TLS_OUT_OF_INDEXES ((DWORD)0xFFFFFFFF) // from pathcch.h #define CARBWIN_PATHCCH_ALLOW_LONG_PATHS 0x01 // from excpt.h #define CARBWIN_EXCEPTION_EXECUTE_HANDLER 1 /////////////////////////////////////////////////////////////////////////////// // typedefs, forward-declarations // Many of the typedefs below are not compatible with Forge's own version of the Windows typedefs. // These are more correct as they're declared exactly the same as in the Windows headers. #ifndef NV_FORGE_WINDEF_H // from basetsd.h static_assert(sizeof(void*) == 8, "This only supports 64-bit platforms"); typedef unsigned int UINT32, *PUINT32; typedef __int64 INT_PTR, *PINT_PTR; typedef __int64 LONG_PTR, *PLONG_PTR; typedef unsigned __int64 UINT_PTR, *PUINT_PTR; typedef unsigned __int64 ULONG_PTR, *PULONG_PTR; typedef ULONG_PTR SIZE_T, *PSIZE_T; typedef LONG_PTR SSIZE_T, *PSSIZE_T; typedef ULONG_PTR DWORD_PTR, *PDWORD_PTR; typedef unsigned __int64 DWORD64, *PDWORD64; // from WTypesbase.h typedef wchar_t WCHAR; typedef const WCHAR *LPCWSTR; // from minwindef.h typedef int BOOL; typedef unsigned char BYTE; typedef long *LPLONG; typedef unsigned long DWORD; typedef DWORD *LPDWORD; typedef unsigned long ULONG, *PULONG; typedef void *LPVOID; typedef const void *LPCVOID; typedef void *HANDLE; typedef HANDLE HLOCAL; typedef HANDLE *PHANDLE, *LPHANDLE; typedef struct HINSTANCE__ *HINSTANCE; typedef HINSTANCE HMODULE; typedef INT_PTR (WINAPI *FARPROC)(); typedef struct _FILETIME FILETIME, *PFILETIME, *LPFILETIME; typedef struct _SYSTEMTIME SYSTEMTIME, *PSYSTEMTIME, *LPSYSTEMTIME; typedef int INT; typedef unsigned int UINT; typedef unsigned int *PUINT; // from minwinbase.h typedef struct _SECURITY_ATTRIBUTES _SECURITY_ATTRIBUTES, *PSECURITY_ATTRIBUTES, *LPSECURITY_ATTRIBUTES; typedef DWORD *PDWORD; // from winnt.h typedef void *PVOID; typedef long LONG; typedef long HRESULT; typedef char CHAR; typedef wchar_t WCHAR; typedef BYTE BOOLEAN; typedef unsigned short WORD; typedef CHAR *LPSTR; typedef const CHAR *LPCSTR, *PCSTR; typedef WCHAR *NWPSTR, *LPWSTR, *PWSTR; typedef const WCHAR *LPCWSTR, *PCWSTR; typedef WCHAR *PWCHAR, *LPWCH, *PWCH; typedef __int64 LONGLONG; typedef unsigned __int64 ULONGLONG; typedef ULONGLONG DWORDLONG; typedef struct _RTL_SRWLOCK RTL_SRWLOCK, *PRTL_SRWLOCK; typedef DWORD LCID; typedef PDWORD PLCID; typedef WORD LANGID; typedef union _LARGE_INTEGER LARGE_INTEGER; typedef LARGE_INTEGER *PLARGE_INTEGER; typedef struct _EVENTLOGRECORD EVENTLOGRECORD, *PEVENTLOGRECORD; /////////////////////////////////////////////////////////////////////////////// // Struct redefines // See instructions for adding at the bottom of this block. struct CARBWIN_SRWLOCK { PVOID Ptr; }; struct CARBWIN_PROCESS_MEMORY_COUNTERS { DWORD cb; DWORD PageFaultCount; SIZE_T PeakWorkingSetSize; SIZE_T WorkingSetSize; SIZE_T QuotaPeakPagedPoolUsage; SIZE_T QuotaPagedPoolUsage; SIZE_T QuotaPeakNonPagedPoolUsage; SIZE_T QuotaNonPagedPoolUsage; SIZE_T PagefileUsage; SIZE_T PeakPagefileUsage; }; struct CARBWIN_MEMORYSTATUSEX { DWORD dwLength; DWORD dwMemoryLoad; DWORDLONG ullTotalPhys; DWORDLONG ullAvailPhys; DWORDLONG ullTotalPageFile; DWORDLONG ullAvailPageFile; DWORDLONG ullTotalVirtual; DWORDLONG ullAvailVirtual; DWORDLONG ullAvailExtendedVirtual; }; struct CARBWIN_SYSTEM_INFO { union { DWORD dwOemId; // Obsolete field...do not use struct { WORD wProcessorArchitecture; WORD wReserved; } CARBWIN_DUMMYSTRUCTNAME; } CARBWIN_DUMMYUNIONNAME; DWORD dwPageSize; LPVOID lpMinimumApplicationAddress; LPVOID lpMaximumApplicationAddress; DWORD_PTR dwActiveProcessorMask; DWORD dwNumberOfProcessors; DWORD dwProcessorType; DWORD dwAllocationGranularity; WORD wProcessorLevel; WORD wProcessorRevision; }; struct CARBWIN_OVERLAPPED { ULONG_PTR Internal; ULONG_PTR InternalHigh; union { struct { DWORD Offset; DWORD OffsetHigh; } CARBWIN_DUMMYSTRUCTNAME; PVOID Pointer; } CARBWIN_DUMMYUNIONNAME; HANDLE hEvent; }; struct _OVERLAPPED; typedef struct _OVERLAPPED* LPOVERLAPPED; struct CARBWIN_FILE_NOTIFY_INFORMATION { DWORD NextEntryOffset; DWORD Action; DWORD FileNameLength; WCHAR FileName[1]; }; struct CARBWIN_FILETIME { DWORD dwLowDateTime; DWORD dwHighDateTime; }; struct CARBWIN_SYSTEMTIME { WORD wYear; WORD wMonth; WORD wDayOfWeek; WORD wDay; WORD wHour; WORD wMinute; WORD wSecond; WORD wMilliseconds; }; typedef union CARBWIN_LARGE_INTEGER { struct { DWORD LowPart; LONG HighPart; } CARBWIN_DUMMYSTRUCTNAME; struct { DWORD LowPart; LONG HighPart; } u; LONGLONG QuadPart; } CARBWIN_LARGE_INTEGER; struct CARBWIN_EVENTLOGRECORD { DWORD Length; // Length of full record DWORD Reserved; // Used by the service DWORD RecordNumber; // Absolute record number DWORD TimeGenerated; // Seconds since 1-1-1970 DWORD TimeWritten; // Seconds since 1-1-1970 DWORD EventID; WORD EventType; WORD NumStrings; WORD EventCategory; WORD ReservedFlags; // For use with paired events (auditing) DWORD ClosingRecordNumber; // For use with paired events (auditing) DWORD StringOffset; // Offset from beginning of record DWORD UserSidLength; DWORD UserSidOffset; DWORD DataLength; DWORD DataOffset; // Offset from beginning of record // // Then follow: // // WCHAR SourceName[] // WCHAR Computername[] // SID UserSid // WCHAR Strings[] // BYTE Data[] // CHAR Pad[] // DWORD Length; // }; struct CARBWIN_PROCESSOR_NUMBER { WORD Group; BYTE Number; BYTE Reserved; }; typedef ULONG_PTR CARBWIN_KAFFINITY; struct CARBWIN_GROUP_AFFINITY { CARBWIN_KAFFINITY Mask; WORD Group; WORD Reserved[3]; }; // ADD NEW STRUCT REDEFINES HERE // - add to TestCarbWindows.cpp // - add forward-declared typedefs exactly for Windows types above // - Must be prefixed with CARBWIN_ and defined exactly as in Windows.h /////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////// // functions // from winbase.h typedef void (WINAPI *PFIBER_START_ROUTINE)(LPVOID lpFiberParameter); typedef PFIBER_START_ROUTINE LPFIBER_START_ROUTINE; WINBASEAPI HLOCAL WINAPI LocalFree(HLOCAL hMem); WINBASEAPI DWORD WINAPI FormatMessageW(DWORD dwFlags, LPCVOID lpSource, DWORD dwMessageId, DWORD dwLanguageId, LPWSTR lpBuffer, DWORD nSize, va_list *Arguments); WINBASEAPI int WINAPI lstrlenW(LPCWSTR lpString); WINBASEAPI LPVOID WINAPI CreateFiber(SIZE_T dwStackSize, LPFIBER_START_ROUTINE lpStartAddress, LPVOID lpParameter); WINBASEAPI LPVOID WINAPI CreateFiberEx(SIZE_T dwStackCommitSize, SIZE_T dwStackReserveSize, DWORD dwFlags, LPFIBER_START_ROUTINE lpStartAddress, LPVOID lpParameter); WINBASEAPI void WINAPI DeleteFiber(LPVOID lpFiber); WINBASEAPI void WINAPI SwitchToFiber(LPVOID lpFiber); WINBASEAPI LPVOID WINAPI ConvertThreadToFiber(LPVOID lpParameter); WINBASEAPI LPVOID WINAPI ConvertThreadToFiberEx(LPVOID lpParameter, DWORD dwFlags); WINBASEAPI BOOL WINAPI ConvertFiberToThread(); WINBASEAPI DWORD_PTR WINAPI SetThreadAffinityMask(HANDLE hThread, DWORD_PTR dwThreadAffinityMask); WINADVAPI HANDLE WINAPI OpenEventLogW(LPCWSTR lpUNCServerName, LPCWSTR lpSourceName); WINADVAPI BOOL WINAPI CloseEventLog(HANDLE hEventLog); WINADVAPI BOOL WINAPI ReadEventLogW(HANDLE hEventLog, DWORD dwReadFlags, DWORD dwRecordOffset, LPVOID lpBuffer, DWORD nNumberOfBytesToRead, DWORD *pnBytesRead, DWORD *pnMinNumberOfBytesNeeded); WINADVAPI BOOL WINAPI OpenProcessToken(LPVOID, DWORD, PHANDLE); // from debugapi.h WINBASEAPI BOOL WINAPI IsDebuggerPresent(void); WINBASEAPI void WINAPI DebugBreak(void); WINBASEAPI void WINAPI OutputDebugStringA(LPCSTR lpOutputString); // from synchapi.h typedef RTL_SRWLOCK SRWLOCK, *PSRWLOCK; WINBASEAPI HANDLE WINAPI CreateSemaphoreW(LPSECURITY_ATTRIBUTES lpSemaphoreAttributes, LONG lInitialCount, LONG lMaximumCount, LPCWSTR lpName); WINBASEAPI BOOL WINAPI ReleaseSemaphore(HANDLE hSemaphore, LONG lReleaseCount, LPLONG lpPreviousCount); WINBASEAPI DWORD WINAPI WaitForSingleObject(HANDLE hHandle, DWORD dwMilliseconds); WINBASEAPI void WINAPI InitializeSRWLock(PSRWLOCK SRWLock); WINBASEAPI void WINAPI ReleaseSRWLockExclusive(PSRWLOCK SRWLock); WINBASEAPI void WINAPI ReleaseSRWLockShared(PSRWLOCK SRWLock); WINBASEAPI void WINAPI AcquireSRWLockExclusive(PSRWLOCK SRWLock); WINBASEAPI void WINAPI AcquireSRWLockShared(PSRWLOCK SRWLock); WINBASEAPI BOOLEAN WINAPI TryAcquireSRWLockExclusive(PSRWLOCK SRWLock); WINBASEAPI BOOLEAN WINAPI TryAcquireSRWLockShared(PSRWLOCK SRWLock); WINBASEAPI HANDLE WINAPI CreateMutexA(LPSECURITY_ATTRIBUTES lpMutexAttributes, BOOL bInitialOnwer, LPCSTR lpName); WINBASEAPI HANDLE WINAPI CreateMutexW(LPSECURITY_ATTRIBUTES lpMutexAttributes, BOOL bInitialOwner, LPCWSTR lpName); WINBASEAPI BOOL WINAPI ReleaseMutex(HANDLE hMutex); WINBASEAPI HANDLE WINAPI CreateEventA(LPSECURITY_ATTRIBUTES lpEventAttributes, BOOL bManualReset, BOOL bInitialState, LPCSTR lpName); WINBASEAPI HANDLE WINAPI CreateEventW(LPSECURITY_ATTRIBUTES lpEventAttributes, BOOL bManualReset, BOOL bInitialState, LPCWSTR lpName); WINBASEAPI void WINAPI Sleep(DWORD dwMilliseconds); WINBASEAPI DWORD WINAPI SleepEx(DWORD dwMilliseconds, BOOL bAlertable); // Note that these functions are not in DLLs; you must link with synchronization.lib BOOL WINAPI WaitOnAddress(volatile void* Address, PVOID CompareAddress, SIZE_T AddressSize, DWORD dwMilliseconds); void WINAPI WakeByAddressSingle(PVOID Address); void WINAPI WakeByAddressAll(PVOID Address); // from shellapi.h SHSTDAPI_(LPWSTR *) CommandLineToArgvW(LPCWSTR lpCmdLine, int* pNumArgs); // from processenv.h WINBASEAPI LPWSTR WINAPI GetCommandLineW(void); WINBASEAPI LPWCH WINAPI GetEnvironmentStringsW(void); WINBASEAPI BOOL WINAPI FreeEnvironmentStringsW(LPWCH penv); WINBASEAPI BOOL WINAPI SetEnvironmentVariableW(LPCWSTR lpName, LPCWSTR lpValue); WINBASEAPI DWORD WINAPI GetEnvironmentVariableW(LPCWSTR lpName, LPWSTR lpBuffer, DWORD nSize); // from handleapi.h WINBASEAPI BOOL WINAPI CloseHandle(HANDLE hObject); // from errhandlingapi.h WINBASEAPI void WINAPI RaiseException(DWORD, DWORD, DWORD, const ULONG_PTR*); WINBASEAPI DWORD WINAPI GetLastError(void); WINBASEAPI void WINAPI SetLastError(DWORD dwErrCode); // from processthreadsapi.h typedef struct _PROCESSOR_NUMBER PROCESSOR_NUMBER, *PPROCESSOR_NUMBER; WINBASEAPI HANDLE WINAPI GetCurrentProcess(void); WINBASEAPI DWORD WINAPI GetCurrentProcessId(void); WINBASEAPI HANDLE WINAPI GetCurrentThread(void); WINBASEAPI DWORD WINAPI GetCurrentThreadId(void); WINBASEAPI DWORD WINAPI GetThreadId(HANDLE); WINBASEAPI BOOL WINAPI TerminateProcess(HANDLE, UINT); WINBASEAPI DWORD WINAPI TlsAlloc(void); WINBASEAPI LPVOID WINAPI TlsGetValue(DWORD dwTlsIndex); WINBASEAPI BOOL WINAPI TlsSetValue(DWORD dwTlsIndex, LPVOID lpTlsValue); WINBASEAPI BOOL WINAPI TlsFree(DWORD dwTlsIndex); WINBASEAPI BOOL WINAPI GetProcessTimes(HANDLE hProcess, LPFILETIME lpCreationTime, LPFILETIME lpExitTime, LPFILETIME lpKernelTime, LPFILETIME lpUserTime); WINBASEAPI void WINAPI GetCurrentProcessorNumberEx(PPROCESSOR_NUMBER); // from porcesstoplogyapi.h typedef struct _GROUP_AFFINITY GROUP_AFFINITY, *PGROUP_AFFINITY; WINBASEAPI BOOL WINAPI GetThreadGroupAffinity(HANDLE hThread, PGROUP_AFFINITY GroupAffinity); WINBASEAPI BOOL WINAPI SetThreadGroupAffinity(HANDLE hThread, const GROUP_AFFINITY* GroupAffinity, PGROUP_AFFINITY PreviousGroupAffinity); // from sysinfoapi.h typedef struct _MEMORYSTATUSEX MEMORYSTATUSEX, *LPMEMORYSTATUSEX; typedef struct _SYSTEM_INFO SYSTEM_INFO, *LPSYSTEM_INFO; WINBASEAPI BOOL WINAPI GlobalMemoryStatusEx(LPMEMORYSTATUSEX lpBuffer); WINBASEAPI void WINAPI GetSystemInfo(LPSYSTEM_INFO lpSystemInfo); WINBASEAPI void WINAPI GetSystemTimeAsFileTime(LPFILETIME lpSystemTimeAsFileTime); WINBASEAPI void WINAPI GetSystemTimePreciseAsFileTime(LPFILETIME lpSystemTimeAsFileTime); WINBASEAPI DWORD WINAPI GetTickCount(void); WINBASEAPI ULONGLONG WINAPI GetTickCount64(void); // from timezoneapi.h WINBASEAPI BOOL WINAPI FileTimeToSystemTime(const FILETIME* lpFileTime, LPSYSTEMTIME lpSystemTime); // from libloaderapi.h WINBASEAPI FARPROC WINAPI GetProcAddress(HMODULE hModule, LPCSTR lpProcName); WINBASEAPI HMODULE WINAPI GetModuleHandleA(LPCSTR lpModuleName); WINBASEAPI HMODULE WINAPI GetModuleHandleW(LPCWSTR lpModuleName); #if defined(ISOLATION_AWARE_ENABLED) && ISOLATION_AWARE_ENABLED != 0 // LoadLibraryExW is #defined to be IsolationAwareLoadLibraryExW, which is an inline function in winbase.inl. That // function is not replicated here; if you need it, you need to either #include <Windows.h> or replicate the inline // function in your module or header file. #else WINBASEAPI HMODULE WINAPI LoadLibraryExW(LPCWSTR lpLibFileName, HANDLE hFile, DWORD dwFlags); #endif WINBASEAPI BOOL WINAPI FreeLibrary(HMODULE hLibModule); WINBASEAPI BOOL WINAPI SetDefaultDllDirectories(DWORD DirectoryFlags); typedef PVOID DLL_DIRECTORY_COOKIE, *PDLL_DIRECTORY_COOKIE; WINBASEAPI BOOL WINAPI RemoveDllDirectory(DLL_DIRECTORY_COOKIE Cookie); #define CARBWIN_LOAD_LIBRARY_SEARCH_DLL_LOAD_DIR 0x00000100 #define CARBWIN_LOAD_LIBRARY_SEARCH_DEFAULT_DIRS 0x00001000 WINBASEAPI BOOL WINAPI GetModuleHandleExW(DWORD dwFlags, LPCWSTR lpModuleName, HMODULE* phModule); #define CARBWIN_GET_MODULE_HANDLE_EX_FLAG_PIN (0x00000001) #define CARBWIN_GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT (0x00000002) #define CARBWIN_GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS (0x00000004) WINBASEAPI DWORD WINAPI GetModuleFileNameA(HMODULE hModule, LPSTR lpFilename, DWORD nSize); WINBASEAPI DWORD WINAPI GetModuleFileNameW(HMODULE hModule, LPWSTR lpFilename, DWORD nSize); // from memoryapi.h WINBASEAPI HANDLE WINAPI CreateFileMappingW(HANDLE hFile, LPSECURITY_ATTRIBUTES lpFileMappingAttributes, DWORD flProtect, DWORD dwMaximumSizeHigh, DWORD dwMaximumSizeLow, LPCWSTR lpName); WINBASEAPI HANDLE WINAPI OpenFileMappingW(DWORD dwDesiredAccess, BOOL bInheritHandle, LPCWSTR lpName); WINBASEAPI LPVOID WINAPI MapViewOfFile(HANDLE hFileMappingObject, DWORD dwDesiredAccess, DWORD dwFileOffsetHigh, DWORD dwFileOffsetLow, SIZE_T dwNumberOfBytesToMap); WINBASEAPI LPVOID WINAPI MapViewOfFileEx(HANDLE hFileMappingObject, DWORD dwDesiredAccess, DWORD dwFileOffsetHigh, DWORD dwFileOffsetLow, SIZE_T dwNumberOfBytesToMap, LPVOID lpBaseAddress); WINBASEAPI BOOL WINAPI UnmapViewOfFile(LPCVOID lpBaseAddress); WINBASEAPI LPVOID WINAPI VirtualAlloc(LPVOID lpAddress, SIZE_T dwSize, DWORD flAllocationType, DWORD flProtect); WINBASEAPI BOOL WINAPI VirtualFree(LPVOID lpAddress, SIZE_T dwSize, DWORD dwFreeType); // from heapapi.h WINBASEAPI HANDLE WINAPI GetProcessHeap(void); // from fileapi.h WINBASEAPI BOOL WINAPI CreateDirectoryW(LPCWSTR lpPathName, LPSECURITY_ATTRIBUTES lpSecurityAttributes); WINBASEAPI DWORD WINAPI GetFullPathNameW(LPCWSTR lpFileName, DWORD nBufferLength, LPWSTR lpBuffer, LPWSTR* lpFilePart); WINBASEAPI DWORD WINAPI GetFinalPathNameByHandleW(HANDLE hFile, LPWSTR lpszFilePath, DWORD cchFilePath, DWORD dwFlags); WINBASEAPI HANDLE WINAPI CreateFileW(LPCWSTR lpFileName, DWORD dwDesiredAccess, DWORD dwShareMode, LPSECURITY_ATTRIBUTES lpSecurityAttributes, DWORD dwCreationDisposition, DWORD dwFlagsAndAttributes, HANDLE hTemplateFile); WINBASEAPI DWORD WINAPI GetFileAttributesW(LPCWSTR lpFileName); WINBASEAPI BOOL WINAPI LockFileEx(HANDLE hFile, DWORD dwFlags, DWORD dwReserved, DWORD nNumberOfBytesToLockLow, DWORD nNumberOfBytesToLockHigh, LPOVERLAPPED lpOverlapped); WINBASEAPI BOOL WINAPI UnlockFileEx(HANDLE hFile, DWORD dwReserved, DWORD nNumberOfBytesToUnlockLow, DWORD nNumberOfBytesToUnlockHigh, LPOVERLAPPED lpOverlapped); WINBASEAPI BOOL WINAPI DeleteFileA(LPCSTR lpFileName); WINBASEAPI BOOL WINAPI DeleteFileW(LPCWSTR lpFileName); WINBASEAPI BOOL WINAPI GetFileSizeEx(HANDLE hFile, PLARGE_INTEGER lpFileSize); WINBASEAPI BOOL WINAPI WriteFile(HANDLE hFile, LPCVOID lpBuffer, DWORD nNumberOfBytesToWrite, LPDWORD lpNumberOfBytesWritten, LPOVERLAPPED lpOverlapped); WINBASEAPI BOOL WINAPI FlushFileBuffers(HANDLE hFile); // from profileapi.h WINBASEAPI BOOL WINAPI QueryPerformanceCounter(LARGE_INTEGER* lpPerformanceCount); WINBASEAPI BOOL WINAPI QueryPerformanceFrequency(LARGE_INTEGER* lpFrequency); /////////////////////////////////////////////////////////////////////////////// // Header files below are not include with Windows.h // from psapi.h typedef struct _PROCESS_MEMORY_COUNTERS PROCESS_MEMORY_COUNTERS, *PPROCESS_MEMORY_COUNTERS; BOOL WINAPI K32GetProcessMemoryInfo(HANDLE hProcess, PPROCESS_MEMORY_COUNTERS ppsmemCounters, DWORD cb); // from pathcch.h WINBASEAPI HRESULT APIENTRY PathAllocCanonicalize(PCWSTR pszPathIn, ULONG dwFlags, PWSTR* ppszPathOut); // from WinNls.h WINBASEAPI int WINAPI LCMapStringW(LCID Locale, DWORD dwMapFlags, LPCWSTR lpSrcStr, int cchSrc, LPWSTR lpDestStr, int cchDest); // from userenv.h WINBASEAPI BOOL WINAPI GetUserProfileDirectoryW(HANDLE,LPWSTR,LPDWORD); #endif // NV_FORGE_WINDEF_H // Undef temporary defines #ifdef CARBWIN_WINBASEAPI_DEFINED #undef WINBASEAPI #undef CARBWIN_WINBASEAPI_DEFINED #endif #ifdef CARBWIN_WINAPI_DEFINED #undef WINAPI #undef CARBWIN_WINAPI_DEFINED #endif #ifdef CARBWIN_SHSTDAPI_DEFINED #undef SHSTDAPI #undef SHSTDAPI_ #undef CARBWIN_SHSTDAPI_DEFINED #endif #ifdef CARBWIN_APIENTRY_DEFINED #undef APIENTRY #undef CARBWIN_APIENTRY_DEFINED #endif #ifdef CARBWIN_WINADVAPI_DEFINED #undef WINADVAPI #undef CARBWIN_WINADVAPI_DEFINED #endif #ifdef __cplusplus } #endif #endif // clang-format on CARB_IGNOREWARNING_MSC_POP
omniverse-code/kit/include/carb/FrameworkBindingsPython.h
// Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include "BindingsPythonUtils.h" #include "ClientUtils.h" #include "StartupUtils.h" #include "filesystem/IFileSystem.h" #include "logging/Logger.h" #include <memory> #include <string> #include <vector> namespace carb { namespace filesystem { struct File { }; } // namespace filesystem namespace detail { template <typename VT, typename T, size_t S> T getVectorValue(const VT& vector, size_t i) { if (i >= S) { throw py::index_error(); } const T* components = reinterpret_cast<const T*>(&vector); return components[i]; } template <typename VT, typename T, size_t S> void setVectorValue(VT& vector, size_t i, T value) { if (i >= S) { throw py::index_error(); } T* components = reinterpret_cast<T*>(&vector); components[i] = value; } template <typename VT, typename T, size_t S> py::list getVectorSlice(const VT& s, const py::slice& slice) { size_t start, stop, step, slicelength; if (!slice.compute(S, &start, &stop, &step, &slicelength)) throw py::error_already_set(); py::list returnList; for (size_t i = 0; i < slicelength; ++i) { returnList.append(getVectorValue<VT, T, S>(s, start)); start += step; } return returnList; } template <typename VT, typename T, size_t S> void setVectorSlice(VT& s, const py::slice& slice, const py::sequence& value) { size_t start, stop, step, slicelength; if (!slice.compute(S, &start, &stop, &step, &slicelength)) throw py::error_already_set(); if (slicelength != value.size()) throw std::runtime_error("Left and right hand size of slice assignment have different sizes!"); for (size_t i = 0; i < slicelength; ++i) { setVectorValue<VT, T, S>(s, start, value[i].cast<T>()); start += step; } } template <typename TupleT, class T, size_t S> py::class_<TupleT> defineTupleCommon(py::module& m, const char* name, const char* docstring) { py::class_<TupleT> c(m, name, docstring); c.def(py::init<>()); // Python special methods for iterators, [], len(): c.def("__len__", [](const TupleT& t) { CARB_UNUSED(t); return S; }); c.def("__getitem__", [](const TupleT& t, size_t i) { return getVectorValue<TupleT, T, S>(t, i); }); c.def("__setitem__", [](TupleT& t, size_t i, T v) { setVectorValue<TupleT, T, S>(t, i, v); }); c.def("__getitem__", [](const TupleT& t, py::slice slice) -> py::list { return getVectorSlice<TupleT, T, S>(t, slice); }); c.def("__setitem__", [](TupleT& t, py::slice slice, const py::sequence& value) { setVectorSlice<TupleT, T, S>(t, slice, value); }); // That allows passing python sequence into C++ function which accepts concrete TupleT: py::implicitly_convertible<py::sequence, TupleT>(); return c; } template <typename TupleT, class T> py::class_<TupleT> defineTuple(py::module& m, const char* typeName, const char* varName0, T TupleT::*var0, const char* varName1, T TupleT::*var1, const char* docstring = "") { py::class_<TupleT> c = detail::defineTupleCommon<TupleT, T, 2>(m, typeName, docstring); // Ctors: c.def(py::init<T, T>()); c.def(py::init([](py::sequence s) -> TupleT { return { s[0].cast<T>(), s[1].cast<T>() }; })); // Properties: c.def_readwrite(varName0, var0); c.def_readwrite(varName1, var1); // Formatting: c.def("__str__", [var0, var1](const TupleT& t) { return fmt::format("({},{})", t.*var0, t.*var1); }); c.def("__repr__", [typeName, var0, var1](const TupleT& t) { return fmt::format("carb.{}({},{})", typeName, t.*var0, t.*var1); }); // Pickling: c.def(py::pickle( // __getstate__ [var0, var1](const TupleT& t) { return py::make_tuple(t.*var0, t.*var1); }, // __setstate__ [](py::tuple t) { return TupleT{ t[0].cast<T>(), t[1].cast<T>() }; })); return c; } template <typename TupleT, class T> py::class_<TupleT> defineTuple(py::module& m, const char* typeName, const char* varName0, T TupleT::*var0, const char* varName1, T TupleT::*var1, const char* varName2, T TupleT::*var2, const char* docstring = "") { py::class_<TupleT> c = detail::defineTupleCommon<TupleT, T, 3>(m, typeName, docstring); // Ctors: c.def(py::init<T, T, T>()); c.def(py::init([](py::sequence s) -> TupleT { return { s[0].cast<T>(), s[1].cast<T>(), s[2].cast<T>() }; })); // Properties: c.def_readwrite(varName0, var0); c.def_readwrite(varName1, var1); c.def_readwrite(varName2, var2); // Formatting: c.def("__str__", [var0, var1, var2](const TupleT& t) { return fmt::format("({},{},{})", t.*var0, t.*var1, t.*var2); }); c.def("__repr__", [typeName, var0, var1, var2](const TupleT& t) { return fmt::format("carb.{}({},{},{})", typeName, t.*var0, t.*var1, t.*var2); }); // Pickling: c.def(py::pickle( // __getstate__ [var0, var1, var2](const TupleT& t) { return py::make_tuple(t.*var0, t.*var1, t.*var2); }, // __setstate__ [](py::tuple t) { return TupleT{ t[0].cast<T>(), t[1].cast<T>(), t[2].cast<T>() }; })); return c; } template <typename TupleT, class T> py::class_<TupleT> defineTuple(py::module& m, const char* type, const char* varName0, T TupleT::*var0, const char* varName1, T TupleT::*var1, const char* varName2, T TupleT::*var2, const char* varName3, T TupleT::*var3, const char* docstring = "") { py::class_<TupleT> c = detail::defineTupleCommon<TupleT, T, 4>(m, type, docstring); // Ctors: c.def(py::init<T, T, T, T>()); c.def(py::init([](py::sequence s) -> TupleT { return { s[0].cast<T>(), s[1].cast<T>(), s[2].cast<T>(), s[3].cast<T>() }; })); // Properties: c.def_readwrite(varName0, var0); c.def_readwrite(varName1, var1); c.def_readwrite(varName2, var2); c.def_readwrite(varName3, var3); // Formatting: c.def("__str__", [var0, var1, var2, var3](const TupleT& t) { return fmt::format("({},{},{},{})", t.*var0, t.*var1, t.*var2, t.*var3); }); c.def("__repr__", [type, var0, var1, var2, var3](const TupleT& t) { return fmt::format("carb.{}({},{},{},{})", type, t.*var0, t.*var1, t.*var2, t.*var3); }); // Pickling: c.def(py::pickle( // __getstate__ [var0, var1, var2, var3](const TupleT& t) { return py::make_tuple(t.*var0, t.*var1, t.*var2, t.*var3); }, // __setstate__ [](py::tuple t) { return TupleT{ t[0].cast<T>(), t[1].cast<T>(), t[2].cast<T>(), t[3].cast<T>() }; })); return c; } static void log( const char* source, int32_t level, const char* fileName, const char* functionName, int lineNumber, const char* message) { if (g_carbLogFn && g_carbLogLevel <= level) { g_carbLogFn(source, level, fileName, functionName, lineNumber, "%s", message); } } } // namespace detail inline void definePythonModule(py::module& m) { //////// Common //////// using namespace carb::detail; defineTuple<Float2>(m, "Float2", "x", &Float2::x, "y", &Float2::y, R"( Pair of floating point values. These can be accessed via the named attributes, `x` & `y`, but also support sequence access, making them work where a list or tuple is expected. >>> f = carb.Float2(1.0, 2.0) >>> f[0] 1.0 >>> f.y 2.0 )"); defineTuple<Float3>( m, "Float3", "x", &Float3::x, "y", &Float3::y, "z", &Float3::z, R"(A triplet of floating point values. These can be accessed via the named attributes, `x`, `y` & `z`, but also support sequence access, making them work where a list or tuple is expected. >>> v = [1, 2, 3] f = carb.Float3(v) >>> f[0] 1.0 >>> f.y 2.0 >>> f[2] 3.0 )"); defineTuple<Float4>( m, "Float4", "x", &Float4::x, "y", &Float4::y, "z", &Float4::z, "w", &Float4::w, R"(A quadruplet of floating point values. These can be accessed via the named attributes, `x`, `y`, `z` & `w`, but also support sequence access, making them work where a list or tuple is expected. >>> v = [1, 2, 3, 4] f = carb.Float4(v) >>> f[0] 1.0 >>> f.y 2.0 >>> f[2] 3.0 >>> f.w 4.0 )"); defineTuple<Int2>(m, "Int2", "x", &Int2::x, "y", &Int2::y); defineTuple<Int3>(m, "Int3", "x", &Int3::x, "y", &Int3::y, "z", &Int3::z); defineTuple<Int4>(m, "Int4", "x", &Int4::x, "y", &Int4::y, "z", &Int4::z, "w", &Int4::w); defineTuple<Uint2>(m, "Uint2", "x", &Uint2::x, "y", &Uint2::y); defineTuple<Uint3>(m, "Uint3", "x", &Uint3::x, "y", &Uint3::y, "z", &Uint3::z); defineTuple<Uint4>(m, "Uint4", "x", &Uint4::x, "y", &Uint4::y, "z", &Uint4::z, "w", &Uint4::w); defineTuple<Double2>(m, "Double2", "x", &Double2::x, "y", &Double2::y); defineTuple<Double3>(m, "Double3", "x", &Double3::x, "y", &Double3::y, "z", &Double3::z); defineTuple<Double4>(m, "Double4", "x", &Double4::x, "y", &Double4::y, "z", &Double4::z, "w", &Double4::w); defineTuple<ColorRgb>(m, "ColorRgb", "r", &ColorRgb::r, "g", &ColorRgb::g, "b", &ColorRgb::b); defineTuple<ColorRgbDouble>( m, "ColorRgbDouble", "r", &ColorRgbDouble::r, "g", &ColorRgbDouble::g, "b", &ColorRgbDouble::b); defineTuple<ColorRgba>( m, "ColorRgba", "r", &ColorRgba::r, "g", &ColorRgba::g, "b", &ColorRgba::b, "a", &ColorRgba::a); defineTuple<ColorRgbaDouble>(m, "ColorRgbaDouble", "r", &ColorRgbaDouble::r, "g", &ColorRgbaDouble::g, "b", &ColorRgbaDouble::b, "a", &ColorRgbaDouble::a); //////// Python Utils //////// py::class_<Subscription, std::shared_ptr<Subscription>>(m, "Subscription", R"( Subscription holder. This object is returned by different subscription functions. Subscription lifetime is associated with this object. You can it while you need subscribed callback to be called. Then you can explicitly make it equal to `None` or call `unsubscribe` method or `del` it to unsubscribe. Quite common patter of usage is when you have a class which subscribes to various callbacks and you want to subscription to stay valid while class instance is alive. .. code-block:: python class Foo: def __init__(self): events = carb.events.get_events_interface() stream = events.create_event_stream() self._event_sub = stream.subscribe_to_pop(0, self._on_event) def _on_event(self, e): print(f'event {e}') >>> f = Foo() >>> # f receives some events >>> f._event_sub = None >>> f = None )") .def(py::init([](std::function<void()> unsubscribeFn) { return std::make_shared<Subscription>(wrapPythonCallback(std::move(unsubscribeFn))); })) .def("unsubscribe", &Subscription::unsubscribe); //////// ILogging //////// m.def("log", detail::log, py::arg("source"), py::arg("level"), py::arg("fileName"), py::arg("functionName"), py::arg("lineNumber"), py::arg("message")); py::module loggingModule = m.def_submodule("logging"); { py::enum_<logging::LogSettingBehavior>(loggingModule, "LogSettingBehavior") .value("INHERIT", logging::LogSettingBehavior::eInherit) .value("OVERRIDE", logging::LogSettingBehavior::eOverride); using LogFn = std::function<void(const char*, int32_t, const char*, int, const char*)>; struct PyLogger : public logging::Logger { LogFn logFn; }; static std::unordered_map<PyLogger*, std::shared_ptr<PyLogger>> s_loggers; py::class_<PyLogger>(loggingModule, "LoggerHandle"); defineInterfaceClass<logging::ILogging>(loggingModule, "ILogging", "acquire_logging") .def("set_level_threshold", wrapInterfaceFunction(&logging::ILogging::setLevelThreshold)) .def("get_level_threshold", wrapInterfaceFunction(&logging::ILogging::getLevelThreshold)) .def("set_log_enabled", wrapInterfaceFunction(&logging::ILogging::setLogEnabled)) .def("is_log_enabled", wrapInterfaceFunction(&logging::ILogging::isLogEnabled)) .def("set_level_threshold_for_source", wrapInterfaceFunction(&logging::ILogging::setLevelThresholdForSource)) .def("set_log_enabled_for_source", wrapInterfaceFunction(&logging::ILogging::setLogEnabledForSource)) .def("reset", wrapInterfaceFunction(&logging::ILogging::reset)) .def("add_logger", [](const logging::ILogging* ls, const LogFn& logFn) { auto logger = std::make_shared<PyLogger>(); logger->logFn = logFn; s_loggers[logger.get()] = logger; logger->handleMessage = [](logging::Logger* logger, const char* source, int32_t level, const char* filename, const char* functionName, int lineNumber, const char* message) { CARB_UNUSED(functionName); (static_cast<PyLogger*>(logger)->logFn)(source, level, filename, lineNumber, message); }; ls->addLogger(logger.get()); return logger.get(); }, py::return_value_policy::reference) .def("remove_logger", [](const logging::ILogging* ls, PyLogger* logger) { auto it = s_loggers.find(logger); if (it != s_loggers.end()) { ls->removeLogger(it->second.get()); s_loggers.erase(it); } else { CARB_LOG_ERROR("remove_logger: wrong Logger Handle"); } }); loggingModule.attr("LEVEL_VERBOSE") = py::int_(logging::kLevelVerbose); loggingModule.attr("LEVEL_INFO") = py::int_(logging::kLevelInfo); loggingModule.attr("LEVEL_WARN") = py::int_(logging::kLevelWarn); loggingModule.attr("LEVEL_ERROR") = py::int_(logging::kLevelError); loggingModule.attr("LEVEL_FATAL") = py::int_(logging::kLevelFatal); } //////// IFileSystem //////// py::module filesystemModule = m.def_submodule("filesystem"); { using namespace filesystem; py::class_<filesystem::File>(filesystemModule, "File"); py::enum_<DirectoryItemType>(filesystemModule, "DirectoryItemType") .value("FILE", DirectoryItemType::eFile) .value("DIRECTORY", DirectoryItemType::eDirectory); defineInterfaceClass<IFileSystem>(filesystemModule, "IFileSystem", "acquire_filesystem") .def("get_current_directory_path", wrapInterfaceFunction(&IFileSystem::getCurrentDirectoryPath)) .def("set_current_directory_path", wrapInterfaceFunction(&IFileSystem::setCurrentDirectoryPath)) .def("get_app_directory_path", wrapInterfaceFunction(&IFileSystem::getAppDirectoryPath)) .def("set_app_directory_path", wrapInterfaceFunction(&IFileSystem::setAppDirectoryPath)) .def("exists", wrapInterfaceFunction(&IFileSystem::exists)) .def("is_directory", wrapInterfaceFunction(&IFileSystem::isDirectory)) .def("open_file_to_read", wrapInterfaceFunction(&IFileSystem::openFileToRead), py::return_value_policy::reference) .def("open_file_to_write", wrapInterfaceFunction(&IFileSystem::openFileToWrite), py::return_value_policy::reference) .def("open_file_to_append", wrapInterfaceFunction(&IFileSystem::openFileToAppend), py::return_value_policy::reference) .def("close_file", wrapInterfaceFunction(&IFileSystem::closeFile)) .def("get_file_size", wrapInterfaceFunction(&IFileSystem::getFileSize)) //.def("get_file_mod_time", wrapInterfaceFunction(&IFileSystem::getFileModTime)) .def("get_mod_time", wrapInterfaceFunction(&IFileSystem::getModTime)) //.def("read_file_chunk", wrapInterfaceFunction(&IFileSystem::readFileChunk)) //.def("write_file_chunk", wrapInterfaceFunction(&IFileSystem::writeFileChunk)) //.def("read_file_line", wrapInterfaceFunction(&IFileSystem::readFileLine)) //.def("write_file_line", wrapInterfaceFunction(&IFileSystem::writeFileLine)) .def("flush_file", wrapInterfaceFunction(&IFileSystem::flushFile)) .def("make_temp_directory", [](IFileSystem* iface) -> py::object { char buffer[1024]; if (iface->makeTempDirectory(buffer, CARB_COUNTOF(buffer))) { return py::str(std::string(buffer)); } return py::none(); }) .def("make_directory", wrapInterfaceFunction(&IFileSystem::makeDirectory)) .def("remove_directory", wrapInterfaceFunction(&IFileSystem::removeDirectory)) .def("copy", wrapInterfaceFunction(&IFileSystem::copy)) //.def("for_each_directory_item", wrapInterfaceFunction(&IFileSystem::forEachDirectoryItem)) //.def("for_each_directory_item_recursive", wrapInterfaceFunction( //&IFileSystem::forEachDirectoryItemRecursive)) // .def("subscribe_to_change_events", wrapInterfaceFunction( //&IFileSystem::createChangeSubscription)) .def("unsubscribe_to_change_events", wrapInterfaceFunction( //&IFileSystem::destroyChangeSubscription)) ; } //////// Framework //////// py::enum_<PluginHotReload>(m, "PluginHotReload") .value("DISABLED", PluginHotReload::eDisabled) .value("ENABLED", PluginHotReload::eEnabled); py::class_<PluginImplDesc>(m, "PluginImplDesc") .def_readonly("name", &PluginImplDesc::name) .def_readonly("description", &PluginImplDesc::description) .def_readonly("author", &PluginImplDesc::author) .def_readonly("hotReload", &PluginImplDesc::hotReload) .def_readonly("build", &PluginImplDesc::build); py::class_<Version>(m, "Version") .def(py::init<>()) .def(py::init<uint32_t, uint32_t>()) .def_readonly("major", &Version::major) .def_readonly("minor", &Version::minor) .def("__repr__", [](const Version& v) { return fmt::format("v{}.{}", v.major, v.minor); }); py::class_<InterfaceDesc>(m, "InterfaceDesc") .def_readonly("name", &InterfaceDesc::name) .def_readonly("version", &InterfaceDesc::version) .def("__repr__", [](const InterfaceDesc& d) { return fmt::format("\"{} v{}.{}\"", d.name, d.version.major, d.version.minor); }); py::class_<PluginDesc>(m, "PluginDesc") .def_readonly("impl", &PluginDesc::impl) .def_property_readonly("interfaces", [](const PluginDesc& d) { return std::vector<InterfaceDesc>(d.interfaces, d.interfaces + d.interfaceCount); }) .def_property_readonly("dependencies", [](const PluginDesc& d) { return std::vector<InterfaceDesc>(d.dependencies, d.dependencies + d.dependencyCount); }) .def_readonly("libPath", &PluginDesc::libPath); m.def("get_framework", []() { return getFramework(); }, py::return_value_policy::reference); py::class_<Framework>(m, "Framework") .def("startup", [](const Framework* framework, std::vector<std::string> argv, const char* config, std::vector<std::string> initialPluginsSearchPaths, const char* configFormat) { CARB_UNUSED(framework); std::vector<char*> argv_(argv.size()); for (size_t i = 0; i < argv.size(); i++) { argv_[i] = (char*)argv[i].c_str(); } std::vector<char*> initialPluginsSearchPaths_(initialPluginsSearchPaths.size()); for (size_t i = 0; i < initialPluginsSearchPaths.size(); i++) { initialPluginsSearchPaths_[i] = (char*)initialPluginsSearchPaths[i].c_str(); } carb::StartupFrameworkDesc startupParams = carb::StartupFrameworkDesc::getDefault(); startupParams.configString = config; startupParams.argv = argv_.size() ? argv_.data() : nullptr; startupParams.argc = static_cast<int>(argv_.size()); startupParams.initialPluginsSearchPaths = initialPluginsSearchPaths_.size() ? initialPluginsSearchPaths_.data() : nullptr; startupParams.initialPluginsSearchPathCount = initialPluginsSearchPaths_.size(); startupParams.configFormat = configFormat; startupFramework(startupParams); }, py::arg("argv") = std::vector<std::string>(), py::arg("config") = nullptr, py::arg("initial_plugins_search_paths") = std::vector<std::string>(), py::arg("config_format") = "toml") .def("load_plugins", [](const Framework* framework, std::vector<std::string> loadedFileWildcards, std::vector<std::string> searchPaths) { std::vector<const char*> loadedFileWildcards_(loadedFileWildcards.size()); for (size_t i = 0; i < loadedFileWildcards.size(); i++) { loadedFileWildcards_[i] = loadedFileWildcards[i].c_str(); } std::vector<const char*> searchPaths_(searchPaths.size()); for (size_t i = 0; i < searchPaths.size(); i++) { searchPaths_[i] = searchPaths[i].c_str(); } carb::PluginLoadingDesc desc = carb::PluginLoadingDesc::getDefault(); desc.loadedFileWildcardCount = loadedFileWildcards_.size(); desc.loadedFileWildcards = loadedFileWildcards_.data(); if (searchPaths_.size() > 0) { desc.searchPathCount = searchPaths_.size(); desc.searchPaths = searchPaths_.data(); } framework->loadPluginsEx(desc); }, py::arg("loaded_file_wildcards") = std::vector<std::string>(), py::arg("search_paths") = std::vector<std::string>()) .def("unload_all_plugins", wrapInterfaceFunction(&Framework::unloadAllPlugins)) .def("get_plugins", [](const Framework* framework) { std::vector<PluginDesc> plugins(framework->getPluginCount()); framework->getPlugins(plugins.data()); return plugins; }) .def("try_reload_plugins", wrapInterfaceFunction(&Framework::tryReloadPlugins)); py::options options; // options.disable_function_signatures(); m.def("answer_question", [](const char* message) { CARB_UNUSED(message); return std::string("blarg"); }, py::arg("question"), R"( This function can answer some questions. It currently only answers a limited set of questions so don't expect it to know everything. Args: question: The question passed to the function, trailing question mark is not necessary and casing is not important. Returns: The answer to the question or empty string if it doesn't know the answer.)"); } } // namespace carb
omniverse-code/kit/include/carb/extras/Base64.h
// Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // /** @file * @brief Provides a Base64 encoding and decoding helper class. */ #pragma once #include <stdint.h> #include <string.h> /** Namespace for all low level Carbonite functionality. */ namespace carb { /** Common namespace for extra helper functions and classes. */ namespace extras { /** Encoder and decoder helper for Base64 data. This allows for processing Base64 data in multiple * different standard and non-standard variants of the algorithm, plus custom encodings. Padding * bytes are always optional but will be generated by default by all non-custom variants. A * custom variant without padding bytes can be specified by setting the padding byte to 0. No * data verification is done on decoded data - that is left up to the caller. Any invalid input * data is simply ignored. */ class Base64 { public: /** Special value for the @a size parameters to encode() and decode() to indicate that the * input data buffer is a null terminated string. This is only safe on encode() if the * input to be encoded is known to be a standard C string. If binary data is provided * to be encoded, an explicit size must always be given. On decode(), this value should * only be used if the encoded data being passed as input is known to be null terminated. * encoded data produced by this object will always be null terminated. */ static constexpr size_t kNullTerminated = ~0ull; /** Various variants of the Base64 algorithm. The only differences these cause in the * generated encoded data are in the bytes used for the values 62 and 63, as well as * the padding byte. All other encoded values will always use A-Z, a-z, and 0-9. The * correct variant must also be known ahead of time when decoding data. No auto-detection * on the input data will be done. For most applications, the @ref Variant::eDefault * and @ref Variant::eFilenameSafe variants should be the most useful. */ enum class Variant { eDefault, ///< Default encoding set. ePem, ///< Encoding set for privacy-enhanced mail (RFC 1421, deprecated). eMime, ///< Encoding set for standard MIME Base64 (RFC 2045). eRfc4648, ///< Encoding set for RFC 4648. eFilenameSafe, ///< Encoding set for URL and filename safe Base64 (RFC 4648, section 5). eOpenPgp, ///< Encoding set for OpenPGP (RFC 4880). eUtf7, ///< Encoding set for UTF-7 (RFC 2152). eImap, ///< Encoding set for IMAP mailbox names (RFC 3501). eYui, ///< Encoding set for Y64 URL-sage Base64 from the YUI library. eProgramId1, ///< Encoding set for Program identifier Base64 variant 1 (non-standard). eProgramId2, ///< Encoding set for Program identifier Base64 variant 2 (non-standard). eFreenetUrl, ///< Encoding set for Freenet URL-safe Base64 (non-standard). }; /** Constructor: creates a new object supporting the default Base64 encoding. */ Base64() : Base64(Variant::eDefault) { } /** Constructor: creates a new object supporting a specific known Base64 encoding. * * @param[in] variant The algorithm variant to use. This controls which additional two * encoding bytes and which padding byte will be used. The variant * specifics about line lengths, checksums, and optional versus * mandatory padding will be ignored. */ Base64(Variant variant) { switch (variant) { default: case Variant::eDefault: case Variant::ePem: case Variant::eMime: case Variant::eRfc4648: case Variant::eOpenPgp: case Variant::eUtf7: initCodec('+', '/', '='); break; case Variant::eFilenameSafe: initCodec('-', '_', '='); break; case Variant::eImap: initCodec('+', ',', '='); break; case Variant::eYui: initCodec('.', '_', '-'); break; case Variant::eProgramId1: initCodec('_', '-', '='); break; case Variant::eProgramId2: initCodec('.', '_', '='); break; case Variant::eFreenetUrl: initCodec('~', '-', '='); break; } } /** Constructor: creates a new object supporting a custom Base64 encoding. * * @param[in] byte62 The byte that will be used to represent the encoding for the value * 62 (0x3e). This may be any non-zero byte outside of the ranges * [A-Z, a-z, 0-9]. This may not be equal to @p byte63 or @p padding. * @param[in] byte63 The byte that will be used to represent the encoding for the value * 63 (0x3f). This may be any non-zero byte outside of the ranges * [A-Z, a-z, 0-9]. This may not be equal to @p byte62 or @p padding. * @param[in] padding The padding byte to use at the end of an encoded block to identify * an unaligned block. This may be 0 to indicate that no specific * padding byte is used. This may not be equal to @p byte62 or * @p byte63 and must not be in the range [A-Z, a-z, 0-9]. The usual * padding byte for this value in most variants is '='. This defaults * to 0. */ Base64(uint8_t byte62, uint8_t byte63, uint8_t padding = 0) { initCodec(byte62, byte63, padding); } /** Calculates the required output size for encoding a given number of bytes. * * @param[in] inputSize The size of the input buffer in bytes. This may not be 0. * @returns The number of bytes required to store the encoded data for the given input * size. This will always include space for a null terminator byte so that the * encoded data can always be treated as a C string for further processing. * The actual size of the encoded data should always come from the value returned * by encode(). */ static size_t getEncodeOutputSize(size_t inputSize) { return 4 * ((inputSize + 2) / 3) + 1; } /** Calculates the require input buffer size for a given output buffer length. * @param[in] outputSize The size of the output to write. * @returns The number of bytes that needs to be input to produce a given * output size without padding. * This is useful if you want to split up a base64 encode across * multiple buffers. Breaking up the input into a series of chunks * of this size will allow the output base64 chunks to be * concatenated together without creating an invalid base64 string. */ static size_t getEncodeInputSize(size_t outputSize) { return ((outputSize - 1) / 4 * 3) / 3 * 3; } /** Calculates the required output size for decoding a given number of bytes. * * @param[in] inputSize The size of the input buffer in bytes. This may not be 0. * @returns The number of bytes required to store the decoded data for the given input * size. This may include some extra space depending on whether the input * buffer is null terminated or not. The actual size of the decoded data * should always come from the value returned by decode(). */ static size_t getDecodeOutputSize(size_t inputSize) { return 3 * ((inputSize + 3) / 4); } /** Encodes a block of binary data into Base64. * * @param[in] buffer The input buffer to be encoded. This may not be `nullptr`. * @param[in] size The size of the input buffer in bytes. This may not be `0`. This may * be @ref kNullTerminated to indicate that the input data is a null * terminated C string. The actual length of the input will be * calculated by finding the length of the string. * @param[out] output Receives the encoded output as long as it is large enough to contain * the entire encoded output. No work will be done if the output buffer * is not large enough. Encoding into the same buffer as @p buffer is * not safe since the input data will be overwritten as it is processed * resulting in a corrupted encoding. This output buffer will always * be null terminated so that the output data can be processed as a * standard C string. * @param[in] maxOut The size of the output buffer in bytes. This may be larger than is * strictly required by the encoding operation. This must be large * enough to hold the entire encoded result. * @returns The number of bytes of encoded data written to the output buffer, not including * the null terminator. Note that this byte count may not be aligned to a multiple * of 4 if padding is ignored (ie: this object was initialized to use a padding byte * of 0). * @returns `0` if the output buffer is not large enough to hold the full encoded output. * * @note If the encoded data block is expected to be transmitted to a generic destination * (ie: one not controlled by this process or something related to it), the caller * is responsible for ensuring that the data can be properly interpreted by the * receiver regardless of endianness. Since base64 encoding simply sees the block * of data as a simple string of bytes, it has no knowledge of any internal structure * and can therefore not properly do an kind of network byte order swapping. The * caller would be the one with the knowledge of internal structure and should * byte swap the incoming data as needed before attempting to encode it. */ size_t encode(const void* buffer, size_t size, char* output, size_t maxOut) { uint32_t data; size_t j = 0; size_t stop; size_t extra; size_t paddingCount = (m_padding == 0) ? 0 : 1; // null terminated C string input data => calculate its length. if (size == kNullTerminated) size = strlen(reinterpret_cast<const char*>(buffer)); // the output buffer is not large enough => fail. if (maxOut < getEncodeOutputSize(size)) return 0; // calculate the number of input bytes that can be bulk processed. stop = (size / 3) * 3; extra = size - stop; // bulk process all aligned input bytes. Each three byte input block will produce // four output bytes. for (size_t i = 0; i < stop; i += 3) { data = (reinterpret_cast<const uint8_t*>(buffer)[i + 0] << 16) | (reinterpret_cast<const uint8_t*>(buffer)[i + 1] << 8) | (reinterpret_cast<const uint8_t*>(buffer)[i + 2] << 0); output[j + 0] = m_encode[(data >> 18) & 0x3f]; output[j + 1] = m_encode[(data >> 12) & 0x3f]; output[j + 2] = m_encode[(data >> 6) & 0x3f]; output[j + 3] = m_encode[(data >> 0) & 0x3f]; j += 4; } // process any remaining bytes. Note that a value of 0 indicates that the original // input data was a multiple of 3 bytes and no unaligned data needs to be processed. switch (extra) { // one extra unaligned input byte was provided. This will produce two output bytes // followed by two padding bytes. case 1: data = reinterpret_cast<const uint8_t*>(buffer)[stop + 0] << 16; output[j + 0] = m_encode[(data >> 18) & 0x3f]; output[j + 1] = m_encode[(data >> 12) & 0x3f]; output[j + 2] = m_padding; output[j + 3] = m_padding; j += 2 + (paddingCount * 2); break; // two extra unaligned input bytes were provided. This will produce three output // bytes followed by one padding byte. case 2: data = (reinterpret_cast<const uint8_t*>(buffer)[stop + 0] << 16) | (reinterpret_cast<const uint8_t*>(buffer)[stop + 1] << 8); output[j + 0] = m_encode[(data >> 18) & 0x3f]; output[j + 1] = m_encode[(data >> 12) & 0x3f]; output[j + 2] = m_encode[(data >> 6) & 0x3f]; output[j + 3] = m_padding; j += 3 + paddingCount; break; // another count of destination characters (!?) -> should never happen for any value // other than 0 => ignore it. default: break; } // always null terminate the output buffer so that it can be treated as a C string by // the caller. output[j] = 0; return j; } /** Decodes a block of binary data from Base64. * * @param[in] buffer The input buffer to be decoded. This may not be `nullptr`. This * buffer may be optionally null terminated. * @param[in] size The size of the input buffer in bytes. This may not be `0`. This may * be @ref kNullTerminated to indicate that the input buffer is a null * terminated C string. In this case, the length of the input buffer * will be calculated as the length of the string. * @param[out] output Receives the decoded data as long as it is large enough to contain * the entire decoded output. No work will be done if the output buffer * is not large enough. Decoding into the same buffer as @p buffer is * safe since the input data will be always be longer than the output. * No null terminator or extra data will ever be written to the decoded * data buffer. * @param[in] maxOut The size of the output buffer in bytes. This may be larger than is * strictly required by the decoding operation. This must be large * enough to hold the entire decoded result. * @returns The number of bytes of decoded data written to the output buffer. Note that * this byte count will not have a particular alignment and will always match * that of the original encoded data exactly. * @returns `0` if the output buffer is not large enough to hold the full decoded output. * * @note This will decode the block exactly as it was encoded and assuming a little endian * byte ordering. Since base64 always treats the data block as a simple string of * bytes, the caller is responsible for doing any kind of endianness checks and * byte swapping as necessary after decoding. */ size_t decode(const char* buffer, size_t size, void* output, size_t maxOut) { uint32_t data; size_t j = 0; size_t stop; size_t extra; uint8_t* out = reinterpret_cast<uint8_t*>(output); // the input buffer is a null terminated C string => calculate its length. if (size == kNullTerminated) size = strlen(reinterpret_cast<const char*>(buffer)); // the output buffer is not large enough => fail. if (maxOut < getDecodeOutputSize(size)) return 0; // invalid encoding length -> decodes to less than 1 byte => fail. if (size < 2) return 0; // calculate the number of input bytes that can be bulk processed. extra = size & 3; stop = size - extra; // no extra unaligned input bytes were provided -> the input buffer is actually aligned // or it contains padding bytes => determine which one is correct and adjust sizes. if (extra == 0) { // two padding bytes were specified => produces one byte in the last block. if (buffer[size - 2] == m_padding) { extra = 2; stop -= 4; } // one padding byte was specified => produces two bytes in the last block. else if (buffer[size - 1] == m_padding) { extra = 3; stop -= 4; } // at this point, we know the input is either block aligned or is corrupt. Either // way we don't care and will just continue decoding. It is left up to the caller // to verify the validity of the decoded data. All we are interested in here is // the actual decoding process. } // bulk process the aligned input data. Each four byte block will produce three output // bytes. for (size_t i = 0; i < stop; i += 4) { data = (m_decode[static_cast<size_t>(buffer[i + 0])] << 18) | (m_decode[static_cast<size_t>(buffer[i + 1])] << 12) | (m_decode[static_cast<size_t>(buffer[i + 2])] << 6) | (m_decode[static_cast<size_t>(buffer[i + 3])] << 0); out[j + 0] = (data >> 16) & 0xff; out[j + 1] = (data >> 8) & 0xff; out[j + 2] = (data >> 0) & 0xff; j += 3; } // process any extra unaligned bytes. This will allow extra or optional padding bytes to // be ignored in the input buffer. switch (extra) { // two extra bytes (plus two optional padding bytes) were provided. This produces one // output byte. case 2: data = (m_decode[static_cast<size_t>(buffer[stop + 0])] << 18) | (m_decode[static_cast<size_t>(buffer[stop + 1])] << 12); out[j + 0] = (data >> 16) & 0xff; j += 1; break; // three extra bytes (plus one optional padding byte) were provided. This produces two // output bytes. case 3: data = (m_decode[static_cast<size_t>(buffer[stop + 0])] << 18) | (m_decode[static_cast<size_t>(buffer[stop + 1])] << 12) | (m_decode[static_cast<size_t>(buffer[stop + 2])] << 6); out[j + 0] = (data >> 16) & 0xff; out[j + 1] = (data >> 8) & 0xff; j += 2; break; // invalid 'extra' count (!?) or no extra bytes => fail. default: break; } // return the number of decoded bytes. return j; } private: /** Initializes the encoding and decoding tables for this object. * * @param[in] char62 The byte that will be used to represent the encoding for the value * 62 (0x3e). This may be any non-zero byte outside of the ranges * [A-Z, a-z, 0-9]. This may not be equal to @p byte63 or @p padding. * @param[in] char63 The byte that will be used to represent the encoding for the value * 63 (0x3f). This may be any non-zero byte outside of the ranges * [A-Z, a-z, 0-9]. This may not be equal to @p byte62 or @p padding. * @param[in] padding The padding byte to use at the end of an encoded block to identify * an unaligned block. This may be 0 to indicate that no specific * padding byte is used. This may not be equal to @p byte62 or * @p byte63 and must not be in the range [A-Z, a-z, 0-9]. The usual * padding byte for this value in most variants is '='. This defaults * to 0. * @returns No return value. */ void initCodec(uint8_t char62, uint8_t char63, uint8_t padding) { // generate the encoding map. for (size_t i = 0; i < 26; i++) { m_encode[i] = ('A' + i) & 0xff; m_encode[i + 26] = ('a' + i) & 0xff; } for (size_t i = 0; i < 10; i++) m_encode[i + 52] = ('0' + i) & 0xff; m_encode[62] = char62; m_encode[63] = char63; // generate the decoding map as a the reverse of the encoding table. memset(m_decode, 0, sizeof(m_decode)); for (size_t i = 0; i < 64; i++) m_decode[static_cast<size_t>(m_encode[i])] = i & 0xff; // store the padding byte. Note that the padding byte does not need to be part of the // decoding table since it will never be decoded. m_padding = padding; } /** The encoding table for this object. This specifies all possible encoding values for each * of the 64 input data bytes. */ char m_encode[64]; /** The decoding table for this object. This specifies the decoded bit patterns for all * possible encoded input bytes. This table will contain a zero for all invalid input * bytes. */ char m_decode[256]; /** The padding byte used by this object. This will be 0 to indicate that no padding * bytes will be generated (on encode) or expected (on decode). */ char m_padding; }; } // namespace extras } // namespace carb
omniverse-code/kit/include/carb/extras/MultiFreeListAllocator.h
// Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include "../Defines.h" #include "../logging/Log.h" #include "FreeListAllocator.h" #include <cstring> namespace carb { namespace extras { /** * Defines a free list that can allocate/deallocate fast and in any order from identically sized blocks. * * As long as every deallocation matches every allocation. Both allocation and deallocation are O(1) and * generally just a few instructions. The underlying memory allocator will allocate in large blocks, * with multiple elements amortizing a more costly large allocation against lots of fast small element allocations. */ class MultiFreeListAllocator { public: struct AllocDesc { size_t elementSize; size_t elementsPerBlock; }; /** * Constructor. */ MultiFreeListAllocator() { } /** * Destructor. */ ~MultiFreeListAllocator() { shutdown(); } void startup(AllocDesc* allocatorDescs, size_t allocatorCount) { // First, count the free list allocator descriptors m_freeListAllocDescs = new FreeListAllocInternalDesc[allocatorCount]; m_freeListAllocCount = static_cast<uint32_t>(allocatorCount); // Setup allocators for (uint32_t i = 0; i < m_freeListAllocCount; ++i) { size_t elemSize = CARB_ALIGNED_SIZE(allocatorDescs[i].elementSize, FreeListAllocator::kMinimalAlignment); size_t elemPerBlock = (allocatorDescs[i].elementsPerBlock == kElementsPerBlockAuto) ? kDefaultElementsPerBlock : allocatorDescs[i].elementsPerBlock; m_freeListAllocDescs[i].pureElemSize = elemSize; m_freeListAllocDescs[i].allocator.initialize(elemSize, 0, elemPerBlock); } } void shutdown() { delete[] m_freeListAllocDescs; m_freeListAllocDescs = nullptr; m_freeListAllocCount = 0; } void* allocate(size_t size) { uint8_t* originalChunk = nullptr; // Calculate total prefix size size_t prefixSize = sizeof(ChunkSizePrefixType) + sizeof(ChunkOffsetPrefixType); // Chunk size includes prefixes and the requested chunk size size_t originalChunkSize = prefixSize + size; // Get suitable allocator index based on the total chunk size uint32_t allocatorIndex = _getAllocatorIndexFromSize(originalChunkSize); if (allocatorIndex != kNoSuitableAllocator) { originalChunk = static_cast<uint8_t*>(m_freeListAllocDescs[allocatorIndex].allocator.allocate()); } else { originalChunk = static_cast<uint8_t*>(CARB_MALLOC(originalChunkSize)); } if (originalChunk == nullptr) { CARB_LOG_ERROR("Failed to allocate memory!"); return nullptr; } size_t ptrOffset = prefixSize; // Calculate final chunk address, so that prefix could be inserted uint8_t* extChunk = ptrOffset + originalChunk; // Record chunk size requested from the underlying allocator ChunkSizePrefixType* chunkSizeMem = reinterpret_cast<ChunkSizePrefixType*>(extChunk) - 1; *chunkSizeMem = static_cast<ChunkSizePrefixType>(originalChunkSize); // Record the offset from the returned memory pointer to the allocated memory chunk ChunkOffsetPrefixType* chunkOffsetMem = reinterpret_cast<ChunkOffsetPrefixType*>(chunkSizeMem) - 1; *chunkOffsetMem = static_cast<ChunkOffsetPrefixType>(ptrOffset); return extChunk; } void* allocateAligned(size_t size, size_t alignment) { // With zero alignment, call regular allocation if (alignment == 0) return allocate(size); uint8_t* originalChunk = nullptr; // Calculate aligned memory slot size for prefixes size_t prefixSize = sizeof(ChunkSizePrefixType) + sizeof(ChunkOffsetPrefixType); size_t prefixSizeAligned = CARB_ALIGNED_SIZE(prefixSize, static_cast<uint32_t>(alignment)); // Conservative chunk size includes prefixes, requested chunk size and alignment reserve size_t originalChunkAlignedSize = prefixSizeAligned + size + (alignment - 1); // Get suitable allocator index based on the total chunk size uint32_t allocatorIndex = _getAllocatorIndexFromSize(originalChunkAlignedSize); if (allocatorIndex != kNoSuitableAllocator) { originalChunk = static_cast<uint8_t*>(m_freeListAllocDescs[allocatorIndex].allocator.allocate()); } else { originalChunk = static_cast<uint8_t*>(CARB_MALLOC(originalChunkAlignedSize)); } if (originalChunk == nullptr) { CARB_LOG_ERROR("Failed to allocate memory!"); return nullptr; } // Calculate final chunk address, so that prefix could be inserted uint8_t* alignedPrefixedChunk = CARB_ALIGN(originalChunk + prefixSize, alignment); size_t ptrOffset = alignedPrefixedChunk - originalChunk; // Make sure that we don't go out-of-bounds CARB_CHECK(ptrOffset + size <= originalChunkAlignedSize); // This is effectively equal to alignedPrefixedChunk uint8_t* extChunk = ptrOffset + originalChunk; // Record chunk size requested from the underlying allocator ChunkSizePrefixType* chunkSizeMem = reinterpret_cast<ChunkSizePrefixType*>(extChunk) - 1; *chunkSizeMem = static_cast<ChunkSizePrefixType>(originalChunkAlignedSize); // Record the offset from the returned memory pointer to the allocated memory chunk ChunkOffsetPrefixType* chunkOffsetMem = reinterpret_cast<ChunkOffsetPrefixType*>(chunkSizeMem) - 1; *chunkOffsetMem = static_cast<ChunkOffsetPrefixType>(ptrOffset); return extChunk; } void deallocate(void* memory) { if (!memory) return; uint8_t* extChunk = static_cast<uint8_t*>(memory); ChunkSizePrefixType* chunkSizeMem = reinterpret_cast<ChunkSizePrefixType*>(extChunk) - 1; size_t originalChunkSize = static_cast<size_t>(*chunkSizeMem); ChunkOffsetPrefixType* chunkOffsetMem = reinterpret_cast<ChunkOffsetPrefixType*>(chunkSizeMem) - 1; size_t offset = static_cast<size_t>(*chunkOffsetMem); uint8_t* originalChunk = reinterpret_cast<uint8_t*>(extChunk) - offset; uint32_t allocatorIndex = _getAllocatorIndexFromSize(originalChunkSize); if (allocatorIndex != kNoSuitableAllocator) { m_freeListAllocDescs[allocatorIndex].allocator.deallocate(originalChunk); } else { CARB_FREE(originalChunk); } } private: struct FreeListAllocInternalDesc { FreeListAllocator allocator; size_t pureElemSize; }; FreeListAllocInternalDesc* m_freeListAllocDescs = nullptr; uint32_t m_freeListAllocCount = 0; static const size_t kUnlimitedSize = 0; static const size_t kDefaultElementsPerBlock = 100; static const size_t kElementsPerBlockAuto = 0; using ChunkSizePrefixType = uint32_t; using ChunkOffsetPrefixType = uint32_t; const uint32_t kNoSuitableAllocator = (uint32_t)-1; uint32_t _getAllocatorIndexFromSize(size_t size) { for (uint32_t i = 0; i < m_freeListAllocCount; ++i) { if (m_freeListAllocDescs[i].pureElemSize >= size) { return i; } } return kNoSuitableAllocator; } static constexpr size_t _calculateAlignedSize(size_t size, size_t alignment) { return CARB_ALIGNED_SIZE(size, static_cast<uint32_t>(alignment)); } }; } // namespace extras } // namespace carb