file_path
stringlengths 21
202
| content
stringlengths 12
1.02M
| size
int64 12
1.02M
| lang
stringclasses 9
values | avg_line_length
float64 3.33
100
| max_line_length
int64 10
993
| alphanum_fraction
float64 0.27
0.93
|
---|---|---|---|---|---|---|
CesiumGS/cesium-omniverse/exts/cesium.omniverse/cesium/omniverse/bindings/CesiumOmniversePythonBindings.pyi | from typing import Any, List, Tuple
from typing import overload
class Asset:
def __init__(self, *args, **kwargs) -> None: ...
@property
def asset_id(self) -> int: ...
@property
def asset_type(self) -> str: ...
@property
def attribution(self) -> str: ...
@property
def bytes(self) -> int: ...
@property
def date_added(self) -> str: ...
@property
def description(self) -> str: ...
@property
def name(self) -> str: ...
@property
def percent_complete(self) -> int: ...
@property
def status(self) -> str: ...
class AssetTroubleshootingDetails:
def __init__(self, *args, **kwargs) -> None: ...
@property
def asset_exists_in_user_account(self) -> bool: ...
@property
def asset_id(self) -> int: ...
class Assets:
def __init__(self, *args, **kwargs) -> None: ...
@property
def items(self) -> Any: ...
@property
def link(self) -> str: ...
class CesiumIonSession:
def __init__(self, *args, **kwargs) -> None: ...
def disconnect(self) -> None: ...
def get_assets(self, *args, **kwargs) -> Any: ...
def get_authorize_url(self) -> str: ...
def get_connection(self, *args, **kwargs) -> Any: ...
def get_profile(self, *args, **kwargs) -> Any: ...
def get_tokens(self, *args, **kwargs) -> Any: ...
def is_asset_list_loaded(self) -> bool: ...
def is_connected(self) -> bool: ...
def is_connecting(self) -> bool: ...
def is_loading_asset_list(self) -> bool: ...
def is_loading_profile(self) -> bool: ...
def is_loading_token_list(self) -> bool: ...
def is_profile_loaded(self) -> bool: ...
def is_resuming(self) -> bool: ...
def is_token_list_loaded(self) -> bool: ...
def refresh_assets(self) -> None: ...
def refresh_profile(self) -> None: ...
def refresh_tokens(self) -> None: ...
class Connection:
def __init__(self, *args, **kwargs) -> None: ...
def get_access_token(self) -> str: ...
def get_api_uri(self) -> str: ...
class ICesiumOmniverseInterface:
def __init__(self, *args, **kwargs) -> None: ...
def clear_accessor_cache(self) -> None: ...
def connect_to_ion(self) -> None: ...
def create_token(self, arg0: str) -> None: ...
def credits_available(self) -> bool: ...
def credits_start_next_frame(self) -> None: ...
def get_asset_token_troubleshooting_details(self, *args, **kwargs) -> Any: ...
def get_asset_troubleshooting_details(self, *args, **kwargs) -> Any: ...
def get_credits(self) -> List[Tuple[str, bool]]: ...
def get_default_token_troubleshooting_details(self, *args, **kwargs) -> Any: ...
def get_render_statistics(self, *args, **kwargs) -> Any: ...
def get_server_path(self) -> str: ...
def get_server_paths(self) -> List[str]: ...
def get_session(self, *args, **kwargs) -> Any: ...
def get_sessions(self, *args, **kwargs) -> Any: ...
def get_set_default_token_result(self, *args, **kwargs) -> Any: ...
def is_default_token_set(self) -> bool: ...
def is_tracing_enabled(self) -> bool: ...
def on_shutdown(self) -> None: ...
def on_stage_change(self, arg0: int) -> None: ...
def on_startup(self, arg0: str) -> None: ...
def on_update_frame(self, arg0: List[ViewportPythonBinding], arg1: bool) -> None: ...
def print_fabric_stage(self) -> str: ...
def reload_tileset(self, arg0: str) -> None: ...
def select_token(self, arg0: str, arg1: str) -> None: ...
def specify_token(self, arg0: str) -> None: ...
@overload
def update_troubleshooting_details(self, arg0: str, arg1: int, arg2: int, arg3: int) -> None: ...
@overload
def update_troubleshooting_details(self, arg0: str, arg1: int, arg2: int, arg3: int, arg4: int) -> None: ...
class Profile:
def __init__(self, *args, **kwargs) -> None: ...
@property
def id(self) -> int: ...
@property
def username(self) -> str: ...
class RenderStatistics:
def __init__(self, *args, **kwargs) -> None: ...
@property
def culled_tiles_visited(self) -> int: ...
@property
def geometries_capacity(self) -> int: ...
@property
def geometries_loaded(self) -> int: ...
@property
def geometries_rendered(self) -> int: ...
@property
def materials_capacity(self) -> int: ...
@property
def materials_loaded(self) -> int: ...
@property
def max_depth_visited(self) -> int: ...
@property
def tiles_culled(self) -> int: ...
@property
def tiles_loaded(self) -> int: ...
@property
def tiles_loading_main(self) -> int: ...
@property
def tiles_loading_worker(self) -> int: ...
@property
def tiles_rendered(self) -> int: ...
@property
def tiles_visited(self) -> int: ...
@property
def tileset_cached_bytes(self) -> int: ...
@property
def triangles_loaded(self) -> int: ...
@property
def triangles_rendered(self) -> int: ...
class SetDefaultTokenResult:
def __init__(self, *args, **kwargs) -> None: ...
@property
def code(self) -> int: ...
@property
def message(self) -> str: ...
class Token:
def __init__(self, *args, **kwargs) -> None: ...
@property
def id(self) -> str: ...
@property
def is_default(self) -> bool: ...
@property
def name(self) -> str: ...
@property
def token(self) -> str: ...
class TokenTroubleshootingDetails:
def __init__(self, *args, **kwargs) -> None: ...
@property
def allows_access_to_asset(self) -> bool: ...
@property
def associated_with_user_account(self) -> bool: ...
@property
def is_valid(self) -> bool: ...
@property
def show_details(self) -> bool: ...
@property
def token(self) -> Token: ...
class Viewport:
height: float
projMatrix: Matrix4d
viewMatrix: Matrix4d
width: float
def __init__(self) -> None: ...
def acquire_cesium_omniverse_interface(
plugin_name: str = ..., library_path: str = ...
) -> ICesiumOmniverseInterface: ...
def release_cesium_omniverse_interface(arg0: ICesiumOmniverseInterface) -> None: ...
| 6,082 | unknown | 32.423077 | 112 | 0.582539 |
CesiumGS/cesium-omniverse/exts/cesium.omniverse/cesium/omniverse/models/raster_overlay_to_add.py | from __future__ import annotations
from typing import Optional
import carb.events
class RasterOverlayToAdd:
def __init__(self, tileset_path: str, raster_overlay_ion_asset_id: int, raster_overlay_name: str):
self.tileset_path = tileset_path
self.raster_overlay_ion_asset_id = raster_overlay_ion_asset_id
self.raster_overlay_name = raster_overlay_name
def to_dict(self) -> dict:
return {
"tileset_path": self.tileset_path,
"raster_overlay_ion_asset_id": self.raster_overlay_ion_asset_id,
"raster_overlay_name": self.raster_overlay_name,
}
@staticmethod
def from_event(event: carb.events.IEvent) -> Optional[RasterOverlayToAdd]:
if event.payload is None or len(event.payload) == 0:
return None
return RasterOverlayToAdd(
event.payload["tileset_path"],
event.payload["raster_overlay_ion_asset_id"],
event.payload["raster_overlay_name"],
)
| 1,004 | Python | 33.655171 | 102 | 0.639442 |
CesiumGS/cesium-omniverse/exts/cesium.omniverse/cesium/omniverse/models/__init__.py | from .asset_to_add import AssetToAdd # noqa: F401
from .raster_overlay_to_add import RasterOverlayToAdd # noqa: F401
| 119 | Python | 38.999987 | 67 | 0.773109 |
CesiumGS/cesium-omniverse/exts/cesium.omniverse/cesium/omniverse/models/asset_to_add.py | from __future__ import annotations
from typing import Optional
import carb.events
class AssetToAdd:
def __init__(
self,
tileset_name: str,
tileset_ion_asset_id: int,
raster_overlay_name: Optional[str] = None,
raster_overlay_ion_asset_id: Optional[int] = None,
):
self.tileset_name = tileset_name
self.tileset_ion_asset_id = tileset_ion_asset_id
self.raster_overlay_name = raster_overlay_name
self.raster_overlay_ion_asset_id = raster_overlay_ion_asset_id
def to_dict(self) -> dict:
return {
"tileset_name": self.tileset_name,
"tileset_ion_asset_id": self.tileset_ion_asset_id,
"raster_overlay_name": self.raster_overlay_name,
"raster_overlay_ion_asset_id": self.raster_overlay_ion_asset_id,
}
@staticmethod
def from_event(event: carb.events.IEvent) -> Optional[AssetToAdd]:
if event.payload is None or len(event.payload) == 0:
return None
return AssetToAdd(
event.payload["tileset_name"],
event.payload["tileset_ion_asset_id"],
event.payload["raster_overlay_name"],
event.payload["raster_overlay_ion_asset_id"],
)
| 1,259 | Python | 32.157894 | 76 | 0.609214 |
CesiumGS/cesium-omniverse/exts/cesium.omniverse/cesium/omniverse/models/tests/raster_overlay_to_add_test.py | import carb.events
import omni.kit.test
from unittest.mock import MagicMock
from cesium.omniverse.models.raster_overlay_to_add import RasterOverlayToAdd
TILESET_PATH = "/fake/tileset/path"
RASTER_OVERLAY_NAME = "fake_raster_overlay_name"
RASTER_OVERLAY_ION_ASSET_ID = 2
PAYLOAD_DICT = {
"tileset_path": TILESET_PATH,
"raster_overlay_name": RASTER_OVERLAY_NAME,
"raster_overlay_ion_asset_id": RASTER_OVERLAY_ION_ASSET_ID,
}
class RasterOverlayToAddTest(omni.kit.test.AsyncTestCase):
async def setUp(self):
pass
async def tearDown(self):
pass
async def test_convert_raster_overlay_to_add_to_dict(self):
raster_overlay_to_add = RasterOverlayToAdd(
tileset_path=TILESET_PATH,
raster_overlay_ion_asset_id=RASTER_OVERLAY_ION_ASSET_ID,
raster_overlay_name=RASTER_OVERLAY_NAME,
)
result = raster_overlay_to_add.to_dict()
self.assertEqual(result["tileset_path"], TILESET_PATH)
self.assertEqual(result["raster_overlay_name"], RASTER_OVERLAY_NAME)
self.assertEqual(result["raster_overlay_ion_asset_id"], RASTER_OVERLAY_ION_ASSET_ID)
async def test_create_raster_overlay_to_add_from_event(self):
mock_event = MagicMock(spec=carb.events.IEvent)
mock_event.payload = PAYLOAD_DICT
raster_overlay_to_add = RasterOverlayToAdd.from_event(mock_event)
self.assertIsNotNone(raster_overlay_to_add)
self.assertEqual(raster_overlay_to_add.tileset_path, TILESET_PATH)
self.assertEqual(raster_overlay_to_add.raster_overlay_name, RASTER_OVERLAY_NAME)
self.assertEqual(raster_overlay_to_add.raster_overlay_ion_asset_id, RASTER_OVERLAY_ION_ASSET_ID)
async def test_create_raster_overlay_to_add_from_empty_event(self):
mock_event = MagicMock(spec=carb.events.IEvent)
mock_event.payload = None
raster_overlay_to_add = RasterOverlayToAdd.from_event(mock_event)
self.assertIsNone(raster_overlay_to_add)
| 1,998 | Python | 39.795918 | 104 | 0.707708 |
CesiumGS/cesium-omniverse/exts/cesium.omniverse/cesium/omniverse/models/tests/__init__.py | from .asset_to_add_test import AssetToAddTest # noqa: F401
from .raster_overlay_to_add_test import RasterOverlayToAddTest # noqa: F401
| 137 | Python | 44.999985 | 76 | 0.788321 |
CesiumGS/cesium-omniverse/exts/cesium.omniverse/cesium/omniverse/models/tests/asset_to_add_test.py | import carb.events
import omni.kit.test
from unittest.mock import MagicMock
from cesium.omniverse.models.asset_to_add import AssetToAdd
TILESET_NAME = "fake_tileset_name"
TILESET_ION_ASSET_ID = 1
RASTER_OVERLAY_NAME = "fake_raster_overlay_name"
RASTER_OVERLAY_ION_ASSET_ID = 2
PAYLOAD_DICT = {
"tileset_name": TILESET_NAME,
"tileset_ion_asset_id": TILESET_ION_ASSET_ID,
"raster_overlay_name": RASTER_OVERLAY_NAME,
"raster_overlay_ion_asset_id": RASTER_OVERLAY_ION_ASSET_ID,
}
class AssetToAddTest(omni.kit.test.AsyncTestCase):
async def setUp(self):
pass
async def tearDown(self):
pass
async def test_convert_asset_to_add_to_dict(self):
asset_to_add = AssetToAdd(
tileset_name=TILESET_NAME,
tileset_ion_asset_id=TILESET_ION_ASSET_ID,
raster_overlay_name=RASTER_OVERLAY_NAME,
raster_overlay_ion_asset_id=RASTER_OVERLAY_ION_ASSET_ID,
)
result = asset_to_add.to_dict()
self.assertEqual(result["tileset_name"], TILESET_NAME)
self.assertEqual(result["tileset_ion_asset_id"], TILESET_ION_ASSET_ID)
self.assertEqual(result["raster_overlay_name"], RASTER_OVERLAY_NAME)
self.assertEqual(result["raster_overlay_ion_asset_id"], RASTER_OVERLAY_ION_ASSET_ID)
async def test_create_asset_to_add_from_event(self):
mock_event = MagicMock(spec=carb.events.IEvent)
mock_event.payload = PAYLOAD_DICT
asset_to_add = AssetToAdd.from_event(mock_event)
self.assertIsNotNone(asset_to_add)
self.assertEqual(asset_to_add.tileset_name, TILESET_NAME)
self.assertEqual(asset_to_add.tileset_ion_asset_id, TILESET_ION_ASSET_ID)
self.assertEqual(asset_to_add.raster_overlay_name, RASTER_OVERLAY_NAME)
self.assertEqual(asset_to_add.raster_overlay_ion_asset_id, RASTER_OVERLAY_ION_ASSET_ID)
async def test_create_asset_to_add_from_empty_event(self):
mock_event = MagicMock(spec=carb.events.IEvent)
mock_event.payload = None
asset_to_add = AssetToAdd.from_event(mock_event)
self.assertIsNone(asset_to_add)
| 2,131 | Python | 38.481481 | 95 | 0.689817 |
CesiumGS/cesium-omniverse/exts/cesium.omniverse/cesium/omniverse/tests/__init__.py | # For Python testing within Omniverse, it only looks in the `.tests` submodule in whatever is defined
# as an extensions Python module. For organization purposes, we then import all of our tests from our other
# testing submodules.
from .extension_test import * # noqa: F401 F403
from ..models.tests import * # noqa: F401 F403
from ..ui.tests import * # noqa: F401 F403
from ..ui.models.tests import * # noqa: F401 F403
| 424 | Python | 52.124993 | 107 | 0.742925 |
CesiumGS/cesium-omniverse/exts/cesium.omniverse/cesium/omniverse/tests/utils.py | from pathlib import Path
import omni.kit.app
def get_golden_img_dir():
manager = omni.kit.app.get_app().get_extension_manager()
ext_id = manager.get_extension_id_by_module("cesium.omniverse")
return Path(manager.get_extension_path(ext_id)).joinpath("images/tests/ui/pass_fail_widget")
async def wait_for_update(wait_frames=10):
for _ in range(wait_frames):
await omni.kit.app.get_app().next_update_async()
| 434 | Python | 30.071426 | 96 | 0.709677 |
CesiumGS/cesium-omniverse/exts/cesium.omniverse/cesium/omniverse/tests/extension_test.py | import omni.kit.test
import omni.kit.ui_test as ui_test
import omni.usd
import pxr.Usd
import cesium.usd
from typing import Optional
_window_ref: Optional[ui_test.WidgetRef] = None
class ExtensionTest(omni.kit.test.AsyncTestCase):
async def setUp(self):
global _window_ref
# can be removed (or at least decreased) once there is no delay
# required before spawning the cesium window. See:
# https://github.com/CesiumGS/cesium-omniverse/pull/423
await ui_test.wait_n_updates(24)
_window_ref = ui_test.find("Cesium")
async def tearDown(self):
pass
async def test_cesium_window_opens(self):
global _window_ref
self.assertIsNotNone(_window_ref)
async def test_window_docked(self):
global _window_ref
# docked is false if the window is not in focus,
# as may be the case if other extensions are loaded
await _window_ref.focus()
self.assertTrue(_window_ref.window.docked)
async def test_blank_tileset(self):
global _window_ref
blankTilesetButton = _window_ref.find("**/Button[*].text=='Blank 3D Tiles Tileset'")
self.assertIsNotNone(blankTilesetButton)
stage: pxr.Usd.Stage = omni.usd.get_context().get_stage()
self.assertIsNotNone(stage)
self.assertFalse(any([i.IsA(cesium.usd.plugins.CesiumUsdSchemas.Tileset) for i in stage.Traverse()]))
await blankTilesetButton.click()
await ui_test.wait_n_updates(2) # passes without, but seems prudent
self.assertTrue(any([i.IsA(cesium.usd.plugins.CesiumUsdSchemas.Tileset) for i in stage.Traverse()]))
| 1,657 | Python | 30.283018 | 109 | 0.67411 |
CesiumGS/cesium-omniverse/exts/cesium.omniverse/cesium/omniverse/utils/custom_fields.py | import omni.ui as ui
READ_ONLY_STYLE = {"color": ui.color("#888888")}
def string_field_with_label(label_text, model=None, enabled=True):
with ui.HStack(spacing=4, height=20):
ui.Label(label_text, height=20, width=100)
field = ui.StringField(height=20, enabled=enabled)
if not enabled:
field.style = READ_ONLY_STYLE
if model:
field.model = model
return field
def int_field_with_label(label_text, model=None, enabled=True):
with ui.HStack(spacing=4, height=20):
ui.Label(label_text, height=20, width=100)
field = ui.IntField(height=20, enabled=enabled)
if not enabled:
field.style = READ_ONLY_STYLE
if model:
field.model = model
return field
def float_field_with_label(label_text, model=None, enabled=True):
with ui.HStack(spacing=4, height=20):
ui.Label(label_text, height=20, width=100)
field = ui.FloatField(height=20, enabled=enabled, precision=7)
if not enabled:
field.style = READ_ONLY_STYLE
if model:
field.model = model
return field
| 1,150 | Python | 30.108107 | 70 | 0.616522 |
CesiumGS/cesium-omniverse/exts/cesium.omniverse/cesium/omniverse/utils/utils.py | from typing import Optional, Callable
import omni.usd
import omni.kit
import omni.ui as ui
async def wait_n_frames(n: int) -> None:
for i in range(0, n):
await omni.kit.app.get_app().next_update_async()
async def dock_window_async(
window: Optional[ui.Window], target: str = "Stage", position: ui.DockPosition = ui.DockPosition.SAME
) -> None:
if window is None:
return
# Wait five frame
await wait_n_frames(5)
stage_window = ui.Workspace.get_window(target)
window.dock_in(stage_window, position, 1)
window.focus()
async def perform_action_after_n_frames_async(n: int, action: Callable[[], None]) -> None:
await wait_n_frames(n)
action()
def str_is_empty_or_none(s: Optional[str]) -> bool:
if s is None:
return True
if s == "":
return True
return False
| 848 | Python | 21.342105 | 104 | 0.646226 |
CesiumGS/cesium-omniverse/exts/cesium.omniverse/cesium/omniverse/utils/cesium_interface.py | from ..bindings import acquire_cesium_omniverse_interface
class CesiumInterfaceManager:
def __init__(self):
# Acquires the interface. Is a singleton.
self.interface = acquire_cesium_omniverse_interface()
def __enter__(self):
return self.interface
def __exit__(self, exc_type, exc_val, exc_tb):
# We release the interface when we pull down the plugin.
pass
| 412 | Python | 26.533332 | 64 | 0.657767 |
CesiumGS/cesium-omniverse/exts/cesium.omniverse/config/extension.toml | [package]
version = "0.19.0"
category = "simulation"
feature = false
app = false
title = "Cesium for Omniverse"
description = "High-accuracy full-scale WGS84 globe for Omniverse"
authors = "Cesium GS Inc."
repository = "https://github.com/CesiumGS/cesium-omniverse"
keywords = [
"cesium",
"omniverse",
"geospatial",
"3D Tiles",
"glTF",
"globe",
"earth",
"simulation",
]
# Paths are relative to the extension folder
changelog = "doc/CHANGES.md"
readme = "doc/README.md"
preview_image = "doc/resources/preview.jpg"
icon = "doc/resources/icon.png"
[package.target]
kit = ["105.1"]
[package.writeTarget]
kit = true
python = false
# Which extensions this extension depends on
[dependencies]
"cesium.usd.plugins" = { version = "0.4.0" }
"usdrt.scenegraph" = {}
"omni.ui" = {}
"omni.usd" = {}
"omni.ui.scene" = {}
"omni.usd.libs" = {}
"omni.kit.commands" = {}
"omni.kit.pipapi" = {}
"omni.kit.uiapp" = {}
"omni.kit.viewport.utility" = {}
"omni.kit.property.usd" = {}
"omni.kit.menu.utils" = {}
"omni.kit.capture.viewport" = {}
# Main python module this extension provides, it will be publicly available as "import cesium.omniverse"
[[python.module]]
name = "cesium.omniverse"
[python.pipapi]
archiveDirs = ["vendor"]
[[native.plugin]]
path = "bin/cesium.omniverse.plugin"
[settings]
exts."cesium.omniverse".defaultAccessToken = ""
persistent.exts."cesium.omniverse".userAccessToken = ""
exts."cesium.omniverse".showOnStartup = true
[[test]]
args = [
"--/renderer/enabled=rtx",
"--/renderer/active=rtx",
"--/app/window/dpiScaleOverride=1.0",
"--/app/window/scaleToMonitor=false",
"--/app/file/ignoreUnsavedOnExit=true",
]
dependencies = [
"omni.hydra.pxr",
"omni.kit.mainwindow",
"omni.kit.ui_test",
"omni.kit.test_suite.helpers",
"omni.kit.window.file",
"omni.kit.viewport.window",
]
pythonTests.include = ["cesium.omniverse.*"]
pythonTests.exclude = []
pythonTests.unreliable = [
"*test_window_docked", # window does not dock when tests run from the empty test kit via the omniverse app
]
timeout = 180
| 2,088 | TOML | 22.47191 | 110 | 0.674808 |
CesiumGS/cesium-omniverse/apps/exts/cesium.performance.app/cesium/performance/app/extension.py | from functools import partial
import asyncio
import time
from typing import Callable, List, Optional
import logging
import carb.events
import omni.ext
import omni.ui as ui
import omni.usd
import omni.kit.app as app
import omni.kit.ui
from omni.kit.viewport.utility import get_active_viewport
from pxr import Gf, Sdf, UsdGeom
from .performance_window import CesiumPerformanceWindow
from .fps_sampler import FpsSampler
from cesium.omniverse.bindings import acquire_cesium_omniverse_interface, release_cesium_omniverse_interface
from cesium.omniverse.utils import wait_n_frames, dock_window_async
from cesium.usd.plugins.CesiumUsdSchemas import (
Data as CesiumData,
Georeference as CesiumGeoreference,
IonRasterOverlay as CesiumIonRasterOverlay,
Tileset as CesiumTileset,
Tokens as CesiumTokens,
)
ION_ACCESS_TOKEN = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJqdGkiOiI0Y2ZjNzY3NC04MWIyLTQyN2ItODg3Zi0zYzk3MmQxZWYxMmIiLCJpZCI6MjU5LCJpYXQiOjE3MTE5NzkyNzl9.GuvRiyuJO14zjA5_mIwgocOShmF4EUj2xbmikcCeXxs" # noqa: E501
GOOGLE_3D_TILES_ION_ID = 2275207
CESIUM_DATA_PRIM_PATH = "/Cesium"
CESIUM_GEOREFERENCE_PRIM_PATH = "/CesiumGeoreference"
CESIUM_CAMERA_PATH = "/Camera"
class CesiumPerformanceExtension(omni.ext.IExt):
def __init__(self):
super().__init__()
self._logger = logging.getLogger(__name__)
self._performance_window: Optional[CesiumPerformanceWindow] = None
self._view_new_york_city_subscription: Optional[carb.events.ISubscription] = None
self._view_paris_subscription: Optional[carb.events.ISubscription] = None
self._view_grand_canyon_subscription: Optional[carb.events.ISubscription] = None
self._view_tour_subscription: Optional[carb.events.ISubscription] = None
self._view_new_york_city_google_subscription: Optional[carb.events.ISubscription] = None
self._view_paris_google_subscription: Optional[carb.events.ISubscription] = None
self._view_grand_canyon_google_subscription: Optional[carb.events.ISubscription] = None
self._view_tour_google_subscription: Optional[carb.events.ISubscription] = None
self._stop_subscription: Optional[carb.events.ISubscription] = None
self._on_stage_subscription: Optional[carb.events.ISubscription] = None
self._update_frame_subscription: Optional[carb.events.ISubscription] = None
self._tileset_loaded_subscription: Optional[carb.events.ISubscription] = None
self._camera_path: Optional[str] = None
self._tileset_path: Optional[str] = None
self._active: bool = False
self._start_time: float = 0.0
self._fps_sampler: FpsSampler = FpsSampler()
def on_startup(self):
global _cesium_omniverse_interface
_cesium_omniverse_interface = acquire_cesium_omniverse_interface()
self._setup_menus()
self._show_and_dock_startup_windows()
bus = app.get_app().get_message_bus_event_stream()
view_new_york_city_event = carb.events.type_from_string("cesium.performance.VIEW_NEW_YORK_CITY")
self._view_new_york_city_subscription = bus.create_subscription_to_pop_by_type(
view_new_york_city_event, self._view_new_york_city
)
view_paris_event = carb.events.type_from_string("cesium.performance.VIEW_PARIS")
self._view_paris_subscription = bus.create_subscription_to_pop_by_type(view_paris_event, self._view_paris)
view_grand_canyon_event = carb.events.type_from_string("cesium.performance.VIEW_GRAND_CANYON")
self._view_grand_canyon_subscription = bus.create_subscription_to_pop_by_type(
view_grand_canyon_event, self._view_grand_canyon
)
view_tour_event = carb.events.type_from_string("cesium.performance.VIEW_TOUR")
self._view_tour_subscription = bus.create_subscription_to_pop_by_type(view_tour_event, self._view_tour)
view_new_york_city_google_event = carb.events.type_from_string("cesium.performance.VIEW_NEW_YORK_CITY_GOOGLE")
self._view_new_york_city_google_subscription = bus.create_subscription_to_pop_by_type(
view_new_york_city_google_event, self._view_new_york_city_google
)
view_paris_google_event = carb.events.type_from_string("cesium.performance.VIEW_PARIS_GOOGLE")
self._view_paris_google_subscription = bus.create_subscription_to_pop_by_type(
view_paris_google_event, self._view_paris_google
)
view_grand_canyon_google_event = carb.events.type_from_string("cesium.performance.VIEW_GRAND_CANYON_GOOGLE")
self._view_grand_canyon_google_subscription = bus.create_subscription_to_pop_by_type(
view_grand_canyon_google_event, self._view_grand_canyon_google
)
view_tour_google_event = carb.events.type_from_string("cesium.performance.VIEW_TOUR_GOOGLE")
self._view_tour_google_subscription = bus.create_subscription_to_pop_by_type(
view_tour_google_event, self._view_tour_google
)
stop_event = carb.events.type_from_string("cesium.performance.STOP")
self._stop_subscription = bus.create_subscription_to_pop_by_type(stop_event, self._on_stop)
usd_context = omni.usd.get_context()
if usd_context.get_stage_state() == omni.usd.StageState.OPENED:
self._on_stage_opened()
self._on_stage_subscription = usd_context.get_stage_event_stream().create_subscription_to_pop(
self._on_stage_event, name="cesium.performance.ON_STAGE_EVENT"
)
update_stream = app.get_app().get_update_event_stream()
self._update_frame_subscription = update_stream.create_subscription_to_pop(
self._on_update_frame, name="cesium.performance.ON_UPDATE_FRAME"
)
def on_shutdown(self):
self._clear_scene()
if self._view_new_york_city_subscription is not None:
self._view_new_york_city_subscription.unsubscribe()
self._view_new_york_city_subscription = None
if self._view_paris_subscription is not None:
self._view_paris_subscription.unsubscribe()
self._view_paris_subscription = None
if self._view_grand_canyon_subscription is not None:
self._view_grand_canyon_subscription.unsubscribe()
self._view_grand_canyon_subscription = None
if self._view_tour_subscription is not None:
self._view_tour_subscription.unsubscribe()
self._view_tour_subscription = None
if self._view_new_york_city_google_subscription is not None:
self._view_new_york_city_google_subscription.unsubscribe()
self._view_new_york_city_google_subscription = None
if self._view_paris_google_subscription is not None:
self._view_paris_google_subscription.unsubscribe()
self._view_paris_google_subscription = None
if self._view_grand_canyon_google_subscription is not None:
self._view_grand_canyon_google_subscription.unsubscribe()
self._view_grand_canyon_google_subscription = None
if self._view_tour_google_subscription is not None:
self._view_tour_google_subscription.unsubscribe()
self._view_tour_google_subscription = None
if self._stop_subscription is not None:
self._stop_subscription.unsubscribe()
self._stop_subscription = None
if self._on_stage_subscription is not None:
self._on_stage_subscription.unsubscribe()
self._on_stage_subscription = None
if self._update_frame_subscription is not None:
self._update_frame_subscription.unsubscribe()
self._update_frame_subscription = None
self._fps_sampler.destroy()
self._destroy_performance_window()
release_cesium_omniverse_interface(_cesium_omniverse_interface)
def _setup_menus(self):
ui.Workspace.set_show_window_fn(
CesiumPerformanceWindow.WINDOW_NAME, partial(self._show_performance_window, None)
)
editor_menu = omni.kit.ui.get_editor_menu()
if editor_menu:
editor_menu.add_item(
CesiumPerformanceWindow.MENU_PATH, self._show_performance_window, toggle=True, value=True
)
def _show_and_dock_startup_windows(self):
ui.Workspace.show_window(CesiumPerformanceWindow.WINDOW_NAME)
asyncio.ensure_future(dock_window_async(self._performance_window, target="Property"))
def _destroy_performance_window(self):
if self._performance_window is not None:
self._performance_window.destroy()
self._performance_window = None
async def _destroy_window_async(self, path):
# Wait one frame, this is due to the one frame defer in Window::_moveToMainOSWindow()
await wait_n_frames(1)
if path is CesiumPerformanceWindow.MENU_PATH:
self._destroy_performance_window()
def _visibility_changed_fn(self, path, visible):
editor_menu = omni.kit.ui.get_editor_menu()
if editor_menu:
editor_menu.set_value(path, visible)
if not visible:
asyncio.ensure_future(self._destroy_window_async(path))
def _show_performance_window(self, _menu, value):
if value:
self._performance_window = CesiumPerformanceWindow(_cesium_omniverse_interface, width=300, height=400)
self._performance_window.set_visibility_changed_fn(
partial(self._visibility_changed_fn, CesiumPerformanceWindow.MENU_PATH)
)
elif self._performance_window is not None:
self._performance_window.visible = False
def _on_update_frame(self, _e: carb.events.IEvent):
if self._active is True:
duration = self._get_duration()
self._update_duration_ui(duration)
self._update_fps_ui(self._fps_sampler.get_fps())
def _on_stage_event(self, _e: carb.events.IEvent):
usd_context = omni.usd.get_context()
if usd_context.get_stage_state() == omni.usd.StageState.OPENED:
self._on_stage_opened()
def _on_stage_opened(self):
self._camera_path = self._create_camera(CESIUM_CAMERA_PATH)
@staticmethod
def _create_tileset_ion(path: str, asset_id: int, access_token: str) -> str:
stage = omni.usd.get_context().get_stage()
tileset_path = omni.usd.get_stage_next_free_path(stage, path, False)
tileset = CesiumTileset.Define(stage, tileset_path)
assert tileset.GetPrim().IsValid()
tileset.GetIonAssetIdAttr().Set(asset_id)
tileset.GetIonAccessTokenAttr().Set(access_token)
tileset.GetSourceTypeAttr().Set(CesiumTokens.ion)
return tileset_path
@staticmethod
def _create_tileset_google() -> str:
stage = omni.usd.get_context().get_stage()
tileset_path = omni.usd.get_stage_next_free_path(stage, "/Google_3D_Tiles", False)
tileset = CesiumTileset.Define(stage, tileset_path)
tileset.GetIonAssetIdAttr().Set(GOOGLE_3D_TILES_ION_ID)
tileset.GetIonAccessTokenAttr().Set(ION_ACCESS_TOKEN)
tileset.GetSourceTypeAttr().Set(CesiumTokens.ion)
return tileset_path
@staticmethod
def _create_raster_overlay_ion(path: str, asset_id: int, access_token: str) -> str:
stage = omni.usd.get_context().get_stage()
raster_overlay_path = omni.usd.get_stage_next_free_path(stage, path, False)
raster_overlay = CesiumIonRasterOverlay.Define(stage, raster_overlay_path)
assert raster_overlay.GetPrim().IsValid()
parent = raster_overlay.GetPrim().GetParent()
assert parent.IsA(CesiumTileset)
tileset_prim = CesiumTileset.Get(stage, parent.GetPath())
tileset_prim.GetRasterOverlayBindingRel().AddTarget(raster_overlay_path)
raster_overlay.GetIonAssetIdAttr().Set(asset_id)
raster_overlay.GetIonAccessTokenAttr().Set(access_token)
return raster_overlay_path
@staticmethod
def _create_camera(path: str) -> str:
stage = omni.usd.get_context().get_stage()
if stage.GetPrimAtPath(path):
return path
camera = UsdGeom.Camera.Define(stage, path)
assert camera.GetPrim().IsValid()
camera.GetClippingRangeAttr().Set(Gf.Vec2f(1.0, 100000000.0))
return path
@staticmethod
def _get_raster_overlay_path(tileset_path: str, raster_overlay_name: str) -> str:
return Sdf.Path(tileset_path).AppendPath(raster_overlay_name).pathString
@staticmethod
def _set_georeference(longitude: float, latitude: float, height: float):
stage = omni.usd.get_context().get_stage()
cesium_georeference = CesiumGeoreference.Get(stage, CESIUM_GEOREFERENCE_PRIM_PATH)
assert cesium_georeference.GetPrim().IsValid()
cesium_georeference.GetGeoreferenceOriginLongitudeAttr().Set(longitude)
cesium_georeference.GetGeoreferenceOriginLatitudeAttr().Set(latitude)
cesium_georeference.GetGeoreferenceOriginHeightAttr().Set(height)
def _set_camera(
self,
translate: Gf.Vec3d,
rotate: Gf.Vec3f,
focal_length: float,
horizontal_aperture: float,
vertical_aperture: float,
):
stage = omni.usd.get_context().get_stage()
viewport = get_active_viewport()
viewport.set_active_camera(self._camera_path)
camera = UsdGeom.Camera.Get(stage, self._camera_path)
camera.GetFocalLengthAttr().Set(focal_length)
camera.GetHorizontalApertureAttr().Set(horizontal_aperture)
camera.GetVerticalApertureAttr().Set(vertical_aperture)
xform_common_api = UsdGeom.XformCommonAPI(camera.GetPrim())
xform_common_api.SetTranslate(translate)
xform_common_api.SetRotate(rotate, UsdGeom.XformCommonAPI.RotationOrderYXZ)
@staticmethod
def _get_tileset(path: str) -> CesiumTileset:
stage = omni.usd.get_context().get_stage()
tileset = CesiumTileset.Get(stage, path)
assert tileset.GetPrim().IsValid()
return tileset
@staticmethod
def _get_cesium_data() -> CesiumData:
stage = omni.usd.get_context().get_stage()
cesium_data = CesiumData.Get(stage, CESIUM_DATA_PRIM_PATH)
assert cesium_data.GetPrim().IsValid()
return cesium_data
@staticmethod
def _remove_prim(path: str):
stage = omni.usd.get_context().get_stage()
stage.RemovePrim(path)
def _setup_location_new_york_city(self):
self._set_georeference(-74.0060, 40.7128, 50.0)
self._set_camera(
Gf.Vec3d(-176516.8372437113, 33877.019622553846, 197777.19771945066),
Gf.Vec3f(-7.9392824, -37.71652, -6.0970836),
18.14756,
20.955,
15.2908,
)
def _setup_location_paris(self):
self._set_georeference(2.3522, 48.8566, 100.0)
self._set_camera(
Gf.Vec3d(-285275.1368718885, 780.3607448845705, 35392.91845506678),
Gf.Vec3f(0.46399376, 65.245544, -1.0061567),
18.14756,
20.955,
15.2908,
)
def _setup_location_grand_canyon(self):
self._set_georeference(-112.3535, 36.2679, 2100.0)
self._set_camera(
Gf.Vec3d(-339866.7567928189, 27967.440239271935, -59650.894693908194),
Gf.Vec3f(5.532731, -129.35608, -6.704948),
18.14756,
20.955,
15.2908,
)
def _view_new_york_city(self, _e: carb.events.IEvent):
self._logger.warning("View New York City")
self._clear_scene()
self._setup_location_new_york_city()
tileset_path = self._create_tileset_ion("/Cesium_World_Terrain", 1, ION_ACCESS_TOKEN)
self._create_raster_overlay_ion(
self._get_raster_overlay_path(tileset_path, "Bing_Maps_Aerial_Imagery"),
2,
ION_ACCESS_TOKEN,
)
self._load_tileset(tileset_path, self._tileset_loaded)
def _view_paris(self, _e: carb.events.IEvent):
self._logger.warning("View Paris")
self._clear_scene()
self._setup_location_paris()
tileset_path = self._create_tileset_ion("/Cesium_World_Terrain", 1, ION_ACCESS_TOKEN)
self._create_raster_overlay_ion(
self._get_raster_overlay_path(tileset_path, "Bing_Maps_Aerial_Imagery"),
2,
ION_ACCESS_TOKEN,
)
self._load_tileset(tileset_path, self._tileset_loaded)
def _view_grand_canyon(self, _e: carb.events.IEvent):
self._logger.warning("View Grand Canyon")
self._clear_scene()
self._setup_location_grand_canyon()
tileset_path = self._create_tileset_ion("/Cesium_World_Terrain", 1, ION_ACCESS_TOKEN)
self._create_raster_overlay_ion(
self._get_raster_overlay_path(tileset_path, "Bing_Maps_Aerial_Imagery"),
2,
ION_ACCESS_TOKEN,
)
self._load_tileset(tileset_path, self._tileset_loaded)
def _view_tour(self, _e: carb.events.IEvent):
self._logger.warning("View Tour")
self._clear_scene()
tileset_path = self._create_tileset_ion("/Cesium_World_Terrain", 1, ION_ACCESS_TOKEN)
self._create_raster_overlay_ion(
self._get_raster_overlay_path(tileset_path, "Bing_Maps_Aerial_Imagery"),
2,
ION_ACCESS_TOKEN,
)
def tour_stop_0():
self._setup_location_new_york_city()
def tour_stop_1():
self._setup_location_paris()
def tour_stop_2():
self._setup_location_grand_canyon()
tour = Tour(self, [tour_stop_0, tour_stop_1, tour_stop_2], self._tileset_loaded)
self._load_tileset(tileset_path, tour.tour_stop_loaded)
def _view_new_york_city_google(self, _e: carb.events.IEvent):
self._logger.warning("View New York City Google")
self._clear_scene()
self._setup_location_new_york_city()
tileset_path = self._create_tileset_google()
self._load_tileset(tileset_path, self._tileset_loaded)
def _view_paris_google(self, _e: carb.events.IEvent):
self._logger.warning("View Paris Google")
self._clear_scene()
self._setup_location_paris()
tileset_path = self._create_tileset_google()
self._load_tileset(tileset_path, self._tileset_loaded)
def _view_grand_canyon_google(self, _e: carb.events.IEvent):
self._logger.warning("View Grand Canyon Google")
self._clear_scene()
self._setup_location_grand_canyon()
tileset_path = self._create_tileset_google()
self._load_tileset(tileset_path, self._tileset_loaded)
def _view_tour_google(self, _e: carb.events.IEvent):
self._logger.warning("View Tour Google")
self._clear_scene()
tileset_path = self._create_tileset_google()
def tour_stop_0():
self._setup_location_new_york_city()
def tour_stop_1():
self._setup_location_paris()
def tour_stop_2():
self._setup_location_grand_canyon()
tour = Tour(self, [tour_stop_0, tour_stop_1, tour_stop_2], self._tileset_loaded)
self._load_tileset(tileset_path, tour.tour_stop_loaded)
def _load_tileset(self, tileset_path: str, tileset_loaded: Callable):
tileset = self._get_tileset(tileset_path)
cesium_data = self._get_cesium_data()
assert self._performance_window is not None
bus = app.get_app().get_message_bus_event_stream()
tileset_loaded_event = carb.events.type_from_string("cesium.omniverse.TILESET_LOADED")
self._tileset_loaded_subscription = bus.create_subscription_to_pop_by_type(
tileset_loaded_event, tileset_loaded
)
random_colors = self._performance_window.get_random_colors()
forbid_holes = self._performance_window.get_forbid_holes()
frustum_culling = self._performance_window.get_frustum_culling()
main_thread_loading_time_limit = self._performance_window.get_main_thread_loading_time_limit_model()
cesium_data.GetDebugRandomColorsAttr().Set(random_colors)
tileset.GetForbidHolesAttr().Set(forbid_holes)
tileset.GetEnableFrustumCullingAttr().Set(frustum_culling)
tileset.GetMainThreadLoadingTimeLimitAttr().Set(main_thread_loading_time_limit)
self._tileset_path = tileset_path
self._active = True
self._start_time = time.time()
self._fps_sampler.start()
def _tileset_loaded(self, _e: carb.events.IEvent):
self._stop()
duration = self._get_duration()
self._update_duration_ui(duration)
self._update_fps_mean_ui(self._fps_sampler.get_mean())
self._update_fps_median_ui(self._fps_sampler.get_median())
self._update_fps_low_ui(self._fps_sampler.get_low())
self._update_fps_high_ui(self._fps_sampler.get_high())
self._logger.warning("Loaded in {} seconds".format(duration))
def _get_duration(self) -> float:
current_time = time.time()
duration = current_time - self._start_time
return duration
def _update_duration_ui(self, value: float):
if self._performance_window is not None:
self._performance_window.set_duration(value)
def _update_fps_ui(self, value: float):
if self._performance_window is not None:
self._performance_window.set_fps(value)
def _update_fps_mean_ui(self, value: float):
if self._performance_window is not None:
self._performance_window.set_fps_mean(value)
def _update_fps_median_ui(self, value: float):
if self._performance_window is not None:
self._performance_window.set_fps_median(value)
def _update_fps_low_ui(self, value: float):
if self._performance_window is not None:
self._performance_window.set_fps_low(value)
def _update_fps_high_ui(self, value: float):
if self._performance_window is not None:
self._performance_window.set_fps_high(value)
def _clear_scene(self):
self._stop()
self._update_duration_ui(0.0)
self._update_fps_ui(0.0)
self._update_fps_mean_ui(0.0)
self._update_fps_median_ui(0.0)
self._update_fps_low_ui(0.0)
self._update_fps_high_ui(0.0)
if self._tileset_path is not None:
self._remove_prim(self._tileset_path)
def _on_stop(self, _e: carb.events.IEvent):
self._stop()
def _stop(self):
self._active = False
self._fps_sampler.stop()
if self._tileset_loaded_subscription is not None:
self._tileset_loaded_subscription.unsubscribe()
self._tileset_loaded_subscription = None
class Tour:
def __init__(self, ext: CesiumPerformanceExtension, tour_stops: List[Callable], tour_complete: Callable):
self._ext: CesiumPerformanceExtension = ext
self._tour_stops: List[Callable] = tour_stops
self._tour_complete: Callable = tour_complete
self._current_stop: int = 0
self._duration: float = 0.0
assert len(tour_stops) > 0
tour_stops[0]()
def tour_stop_loaded(self, _e: carb.events.IEvent):
duration = self._ext._get_duration()
current_duration = duration - self._duration
self._duration = duration
self._ext._logger.warning("Tour stop {} loaded in {} seconds".format(self._current_stop, current_duration))
if self._current_stop == len(self._tour_stops) - 1:
self._tour_complete(_e)
else:
self._current_stop += 1
self._tour_stops[self._current_stop]()
| 23,749 | Python | 39.667808 | 212 | 0.652659 |
CesiumGS/cesium-omniverse/apps/exts/cesium.performance.app/cesium/performance/app/performance_window.py | import logging
import carb.events
import omni.kit.app as app
import omni.ui as ui
from cesium.omniverse.bindings import ICesiumOmniverseInterface
RANDOM_COLORS_TEXT = "Random colors"
FORBID_HOLES_TEXT = "Forbid holes"
FRUSTUM_CULLING_TEXT = "Frustum culling"
TRACING_ENABLED_TEXT = "Tracing enabled"
MAIN_THREAD_LOADING_TIME_LIMIT_TEXT = "Main thread loading time limit (ms)"
NEW_YORK_CITY_TEXT = "New York City"
PARIS_TEXT = "Paris"
GRAND_CANYON_TEXT = "Grand Canyon"
TOUR_TEXT = "Tour"
NEW_YORK_CITY_GOOGLE_TEXT = "New York City (Google)"
PARIS_GOOGLE_TEXT = "Paris (Google)"
GRAND_CANYON_GOOGLE_TEXT = "Grand Canyon (Google)"
TOUR_GOOGLE_TEXT = "Tour (Google)"
DURATION_TEXT = "Duration (seconds)"
FPS_TEXT = "FPS"
FPS_MEAN_TEXT = "FPS (mean)"
FPS_MEDIAN_TEXT = "FPS (median)"
FPS_LOW_TEXT = "FPS (low)"
FPS_HIGH_TEXT = "FPS (high)"
class CesiumPerformanceWindow(ui.Window):
WINDOW_NAME = "Cesium Performance Testing"
MENU_PATH = f"Window/Cesium/{WINDOW_NAME}"
def __init__(self, cesium_omniverse_interface: ICesiumOmniverseInterface, **kwargs):
super().__init__(CesiumPerformanceWindow.WINDOW_NAME, **kwargs)
self._cesium_omniverse_interface = cesium_omniverse_interface
self._logger = logging.getLogger(__name__)
self._random_colors_checkbox_model = ui.SimpleBoolModel(False)
self._forbid_holes_checkbox_model = ui.SimpleBoolModel(False)
self._frustum_culling_checkbox_model = ui.SimpleBoolModel(True)
self._main_thread_loading_time_limit_model = ui.SimpleFloatModel(0.0)
self._duration_model = ui.SimpleFloatModel(0.0)
self._fps_model = ui.SimpleFloatModel(0.0)
self._fps_mean_model = ui.SimpleFloatModel(0.0)
self._fps_median_model = ui.SimpleFloatModel(0.0)
self._fps_low_model = ui.SimpleFloatModel(0.0)
self._fps_high_model = ui.SimpleFloatModel(0.0)
self.frame.set_build_fn(self._build_fn)
def destroy(self) -> None:
super().destroy()
def _build_fn(self):
with ui.VStack(spacing=10):
with ui.VStack(spacing=4):
with ui.HStack(height=16):
ui.Label("Options", height=0)
ui.Spacer()
for label, model in [
(RANDOM_COLORS_TEXT, self._random_colors_checkbox_model),
(FORBID_HOLES_TEXT, self._forbid_holes_checkbox_model),
(FRUSTUM_CULLING_TEXT, self._frustum_culling_checkbox_model),
]:
with ui.HStack(height=0):
ui.Label(label, height=0)
ui.CheckBox(model)
with ui.HStack(height=0):
ui.Label(MAIN_THREAD_LOADING_TIME_LIMIT_TEXT, height=0)
ui.StringField(self._main_thread_loading_time_limit_model)
with ui.HStack(height=16):
tracing_label = ui.Label(TRACING_ENABLED_TEXT, height=0)
tracing_label.set_tooltip(
"Enabled when the project is configured with -D CESIUM_OMNI_ENABLE_TRACING=ON"
)
enabled_string = "ON" if self._cesium_omniverse_interface.is_tracing_enabled() else "OFF"
ui.Label(enabled_string, height=0)
with ui.VStack(spacing=0):
ui.Label("Scenarios", height=16)
for label, callback in [
(NEW_YORK_CITY_TEXT, self._view_new_york_city),
(PARIS_TEXT, self._view_paris),
(GRAND_CANYON_TEXT, self._view_grand_canyon),
(TOUR_TEXT, self._view_tour),
(NEW_YORK_CITY_GOOGLE_TEXT, self._view_new_york_city_google),
(PARIS_GOOGLE_TEXT, self._view_paris_google),
(GRAND_CANYON_GOOGLE_TEXT, self._view_grand_canyon_google),
(TOUR_GOOGLE_TEXT, self._view_tour_google),
]:
ui.Button(label, height=20, clicked_fn=callback)
with ui.VStack(spacing=4):
with ui.HStack(height=16):
ui.Label("Stats", height=0)
ui.Spacer()
for label, model in [
(DURATION_TEXT, self._duration_model),
(FPS_TEXT, self._fps_model),
(FPS_MEAN_TEXT, self._fps_mean_model),
(FPS_MEDIAN_TEXT, self._fps_median_model),
(FPS_LOW_TEXT, self._fps_low_model),
(FPS_HIGH_TEXT, self._fps_high_model),
]:
with ui.HStack(height=0):
ui.Label(label, height=0)
ui.StringField(model=model, height=0, read_only=True)
with ui.VStack(spacing=0):
ui.Button("Stop", height=16, clicked_fn=self._stop)
def _view_new_york_city(self):
bus = app.get_app().get_message_bus_event_stream()
view_new_york_city_event = carb.events.type_from_string("cesium.performance.VIEW_NEW_YORK_CITY")
bus.push(view_new_york_city_event)
def _view_paris(self):
bus = app.get_app().get_message_bus_event_stream()
view_paris_event = carb.events.type_from_string("cesium.performance.VIEW_PARIS")
bus.push(view_paris_event)
def _view_grand_canyon(self):
bus = app.get_app().get_message_bus_event_stream()
view_grand_canyon_event = carb.events.type_from_string("cesium.performance.VIEW_GRAND_CANYON")
bus.push(view_grand_canyon_event)
def _view_tour(self):
bus = app.get_app().get_message_bus_event_stream()
view_tour_event = carb.events.type_from_string("cesium.performance.VIEW_TOUR")
bus.push(view_tour_event)
def _view_new_york_city_google(self):
bus = app.get_app().get_message_bus_event_stream()
view_new_york_city_google_event = carb.events.type_from_string("cesium.performance.VIEW_NEW_YORK_CITY_GOOGLE")
bus.push(view_new_york_city_google_event)
def _view_paris_google(self):
bus = app.get_app().get_message_bus_event_stream()
view_paris_google_event = carb.events.type_from_string("cesium.performance.VIEW_PARIS_GOOGLE")
bus.push(view_paris_google_event)
def _view_grand_canyon_google(self):
bus = app.get_app().get_message_bus_event_stream()
view_grand_canyon_google_event = carb.events.type_from_string("cesium.performance.VIEW_GRAND_CANYON_GOOGLE")
bus.push(view_grand_canyon_google_event)
def _view_tour_google(self):
bus = app.get_app().get_message_bus_event_stream()
view_tour_google_event = carb.events.type_from_string("cesium.performance.VIEW_TOUR_GOOGLE")
bus.push(view_tour_google_event)
def _stop(self):
bus = app.get_app().get_message_bus_event_stream()
stop_event = carb.events.type_from_string("cesium.performance.STOP")
bus.push(stop_event)
def get_random_colors(self) -> bool:
return self._random_colors_checkbox_model.get_value_as_bool()
def get_forbid_holes(self) -> bool:
return self._forbid_holes_checkbox_model.get_value_as_bool()
def get_frustum_culling(self) -> bool:
return self._frustum_culling_checkbox_model.get_value_as_bool()
def get_main_thread_loading_time_limit_model(self) -> float:
return self._main_thread_loading_time_limit_model.get_value_as_float()
def set_duration(self, value: float):
self._duration_model.set_value(value)
def set_fps(self, value: float):
self._fps_model.set_value(value)
def set_fps_mean(self, value: float):
self._fps_mean_model.set_value(value)
def set_fps_median(self, value: float):
self._fps_median_model.set_value(value)
def set_fps_low(self, value: float):
self._fps_low_model.set_value(value)
def set_fps_high(self, value: float):
self._fps_high_model.set_value(value)
| 8,013 | Python | 40.097436 | 118 | 0.606639 |
CesiumGS/cesium-omniverse/apps/exts/cesium.performance.app/cesium/performance/app/fps_sampler.py | import array
import time
import carb.events
import omni.kit.app as app
import statistics
from omni.kit.viewport.utility import get_active_viewport
FREQUENCY_IN_SECONDS: float = 0.025
class FpsSampler:
def __init__(
self,
):
self._last_time: float = 0.0
self._active: bool = False
self._fps = 0.0
self._samples = array.array("f")
self._median: float = 0.0
self._mean: float = 0.0
self._low: float = 0.0
self._high: float = 0.0
self._viewport = get_active_viewport()
update_stream = app.get_app().get_update_event_stream()
self._update_frame_subscription = update_stream.create_subscription_to_pop(
self._on_update_frame, name="cesium.performance.ON_UPDATE_FRAME"
)
def __del__(self):
self.destroy()
def destroy(self):
if self._update_frame_subscription is not None:
self._update_frame_subscription.unsubscribe()
self._update_frame_subscription = None
def start(self):
self._last_time = time.time()
self._active = True
def stop(self):
self._active = False
if len(self._samples) > 0:
self._mean = statistics.mean(self._samples)
self._median = statistics.median(self._samples)
self._low = min(self._samples)
self._high = max(self._samples)
self._samples = array.array("f")
def get_mean(self):
assert not self._active
return self._mean
def get_median(self):
assert not self._active
return self._median
def get_low(self):
assert not self._active
return self._low
def get_high(self):
assert not self._active
return self._high
def get_fps(self):
assert self._active
return self._fps
def _on_update_frame(self, _e: carb.events.IEvent):
if not self._active:
return
current_time = time.time()
elapsed = current_time - self._last_time
if elapsed > FREQUENCY_IN_SECONDS:
fps = self._viewport.fps
self._samples.append(fps)
self._last_time = current_time
self._fps = fps
| 2,243 | Python | 25.093023 | 83 | 0.576014 |
CesiumGS/cesium-omniverse/apps/exts/cesium.performance.app/config/extension.toml | [package]
title = "Cesium for Omniverse Performance Testing Extension"
version = "0.0.0"
app = false
toggleable = false
[dependencies]
"cesium.omniverse" = {}
[[python.module]]
name = "cesium.performance.app"
| 211 | TOML | 16.666665 | 60 | 0.725118 |
CesiumGS/cesium-omniverse/scripts/copy_to_exts.py | """
This file is a post build step run by cmake that copies over the CHANGES.md and related resources to
the exts/docs folder for packaging.
"""
import re
import shutil
from dataclasses import dataclass
from pathlib import Path
from typing import List
@dataclass
class PathPair:
"""
Represents a source and relative destination pair.
:arg source: The source path for the file.
:arg relative_destination: The relative destination for the file.
"""
source: Path
relative_destination: str = ""
def find_resources(path: Path) -> List[PathPair]:
"""
Finds all resources within a file and returns them as a list of PathPairs. The search is done using a regular
expression looking for all links that contain the substring "docs/resources".
NOTE: This **only** works with relative paths. Absolute paths in the file read will fail.
:param path: The file to search.
:return: A list containing PathPairs of all resources found in the file.
"""
regex = re.compile(r"!\[.*]\((.*docs/(resources.*?))\)")
root_path = path.parent.resolve()
resources: List[PathPair] = []
with open(path.resolve(), "r") as f:
for line in f.readlines():
match = regex.search(line)
if match is not None:
source = root_path.joinpath(match.group(1))
relative_destination = match.group(2)
resources.append(PathPair(source, relative_destination))
return resources
def copy_to_destination(pair: PathPair, destination: Path) -> None:
"""
Copies the file based on the path and relative destination contained in the pair.
NOTE: This uses shutils so if you're on a version of Python older than 3.8 this will be slow.
:param pair: The PathPair for the copy operation.
:param destination: The path of the destination directory.
"""
true_destination = (
destination.joinpath(pair.relative_destination) if pair.relative_destination != "" else destination
)
# In the event that true_destination isn't a direct file path, we need to take the source filename and append it
# to true_destination.
if true_destination.is_dir():
true_destination = true_destination.joinpath(pair.source.name)
true_destination.parent.mkdir(parents=True, exist_ok=True)
shutil.copyfile(pair.source, true_destination)
def main() -> int:
project_root = Path(__file__).parent.parent
destination = project_root.joinpath("exts/cesium.omniverse/doc")
changes_path = project_root.joinpath("CHANGES.md")
try:
# Turning off formatting here for readability.
# fmt: off
paths_to_copy: List[PathPair] = [
PathPair(changes_path),
*find_resources(changes_path)
]
# fmt: on
for pair in paths_to_copy:
copy_to_destination(pair, destination)
except Exception as e:
print(e)
return 1
return 0
exit(main())
| 2,987 | Python | 29.489796 | 116 | 0.665216 |
CesiumGS/cesium-omniverse/scripts/vscode_build.py | #!/usr/bin/env python3
import sys
import subprocess
import multiprocessing
import os
import platform
import shutil
try:
import pty
except Exception:
pass
import webbrowser
from typing import List, NamedTuple
def is_windows():
return platform.system() == "Windows"
def is_linux():
return platform.system() == "Linux"
def process(cmd: List[str]):
print("Run: " + " ".join(cmd))
if is_linux():
# Using pty instead of subprocess to get terminal colors
result = pty.spawn(cmd)
if result != 0:
sys.exit(result)
else:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True)
for line in p.stdout:
print(line, end="")
p.communicate()
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, p.args)
def open_browser(html: str):
html = os.path.realpath(html)
html = "file://{}".format(html)
webbrowser.open(html, new=2)
class Args(NamedTuple):
task: str
build_folder: str
build_type: str
compiler_name: str
tracing: bool
verbose: bool
kit_debug: bool
parallel: bool
build_only: bool
def c_compiler_to_cpp_compiler(compiler_name: str):
cpp = compiler_name
cpp = cpp.replace("gcc", "g++")
cpp = cpp.replace("clang", "clang++")
return cpp
def get_cmake_configure_command(args: Args):
cmd = ["cmake", "-B", args.build_folder]
# Release is the default build type, so no need to pass CMAKE_BUILD_TYPE
if args.build_type != "Release":
cmd.extend(("-D", "CMAKE_BUILD_TYPE={}".format(args.build_type)))
if args.tracing:
cmd.extend(("-D", "CESIUM_OMNI_ENABLE_TRACING=ON"))
if args.kit_debug:
cmd.extend(("-D", "CESIUM_OMNI_USE_NVIDIA_DEBUG_LIBRARIES=ON"))
if is_windows():
cmd.extend(("-G", "Ninja Multi-Config", "-D", "CMAKE_C_COMPILER=cl", "-D", "CMAKE_CXX_COMPILER=cl"))
return cmd
if args.compiler_name == "default":
return cmd
c_compiler = args.compiler_name
cpp_compiler = c_compiler_to_cpp_compiler(args.compiler_name)
cmd.extend(("-D", "CMAKE_C_COMPILER={}".format(c_compiler)))
cmd.extend(("-D", "CMAKE_CXX_COMPILER={}".format(cpp_compiler)))
return cmd
def get_cmake_build_command(args: Args, target: str):
cmd = ["cmake", "--build", args.build_folder]
if is_windows():
cmd.extend(("--config", args.build_type))
if target:
cmd.extend(("--target", target))
if args.verbose:
cmd.append("--verbose")
if args.parallel:
# use every core except one so that computer doesn't go too slow
cores = max(1, multiprocessing.cpu_count() - 1)
cmd.extend(("--parallel", str(cores)))
return cmd
def get_cmake_install_command(args: Args):
cmd = ["cmake", "--install", args.build_folder]
if is_windows():
cmd.extend(("--config", args.build_type))
return cmd
def configure(args: Args):
configure_cmd = get_cmake_configure_command(args)
process(configure_cmd)
def build(args: Args):
build_cmd = get_cmake_build_command(args, None)
install_kit_cmd = get_cmake_install_command(args)
if not args.build_only:
configure_cmd = get_cmake_configure_command(args)
process(configure_cmd)
process(build_cmd)
process(install_kit_cmd)
def coverage(args: Args):
if is_windows():
print("Coverage is not supported for Windows")
return
configure_cmd = get_cmake_configure_command(args)
build_cmd = get_cmake_build_command(args, "generate-coverage")
html = "{}/coverage/index.html".format(args.build_folder)
process(configure_cmd)
process(build_cmd)
open_browser(html)
def documentation(args: Args):
configure_cmd = get_cmake_configure_command(args)
documentation_cmd = get_cmake_build_command(args, "generate-documentation")
html = "{}/docs/html/index.html".format(args.build_folder)
process(configure_cmd)
process(documentation_cmd)
open_browser(html)
def install(args: Args):
configure_cmd = get_cmake_configure_command(args)
install_cmd = get_cmake_build_command(args, "install")
process(configure_cmd)
process(install_cmd)
def clean(args: Args):
if os.path.exists(args.build_folder) and os.path.isdir(args.build_folder):
shutil.rmtree(args.build_folder)
def format(args: Args):
format_cmd = get_cmake_build_command(args, "clang-format-fix-all")
process(format_cmd)
def lint(args: Args):
clang_tidy_cmd = get_cmake_build_command(args, "clang-tidy")
process(clang_tidy_cmd)
def lint_fix(args: Args):
clang_tidy_cmd = get_cmake_build_command(args, "clang-tidy-fix")
process(clang_tidy_cmd)
def dependency_graph(args: Args):
configure_cmd = get_cmake_configure_command(args)
conan_packages_path = os.path.join(args.build_folder, "Conan_Packages")
dependency_html = os.path.join(args.build_folder, "dependency_graph.html")
dependency_cmd = ["conan", "info", args.build_folder, "-if", conan_packages_path, "--graph", dependency_html]
process(configure_cmd)
process(dependency_cmd)
open_browser(dependency_html)
def get_build_folder_name(build_type: str, compiler_name: str):
folder_name = "build"
if is_windows():
return folder_name
if build_type != "Release":
folder_name += "-{}".format(build_type.lower())
if compiler_name != "default":
folder_name += "-{}".format(compiler_name)
return folder_name
def get_bin_folder_name(build_type: str, compiler_name: str):
build_folder_name = get_build_folder_name(build_type, compiler_name)
if is_windows():
bin_folder_name = "{}/bin/{}".format(build_folder_name, build_type)
else:
bin_folder_name = "{}/bin".format(build_folder_name)
return bin_folder_name
def main(av: List[str]):
print(av)
task = av[0]
build_type = av[1] if len(av) >= 2 else "Release"
compiler_name = av[2] if len(av) >= 3 else "default"
build_folder = get_build_folder_name(build_type, compiler_name)
tracing = True if len(av) >= 4 and av[3] == "--tracing" else False
verbose = True if len(av) >= 4 and av[3] == "--verbose" else False
kit_debug = True if len(av) >= 4 and av[3] == "--kit-debug" else False
parallel = False if len(av) >= 5 and av[4] == "--no-parallel" else True
build_only = True if len(av) >= 4 and av[3] == "--build-only" else False
args = Args(task, build_folder, build_type, compiler_name, tracing, verbose, kit_debug, parallel, build_only)
if task == "configure":
configure(args)
elif task == "build":
build(args)
elif task == "clean":
clean(args)
elif task == "coverage":
coverage(args)
elif task == "documentation":
documentation(args)
elif task == "install":
install(args)
elif task == "format":
format(args)
elif task == "lint":
lint(args)
elif task == "lint-fix":
lint_fix(args)
elif task == "dependency-graph":
dependency_graph(args)
if __name__ == "__main__":
try:
main(sys.argv[1:])
except Exception as e:
print(e)
exit(1)
| 7,274 | Python | 25.845018 | 113 | 0.63129 |
CesiumGS/cesium-omniverse/scripts/clang_tidy.py | #!/usr/bin/env python3
import argparse
import sys
import shutil
from utils import utils
from typing import List
def parse_args(av: List[str]):
parser = argparse.ArgumentParser(description="Run / check clang-tidy on staged cpp files.")
parser.add_argument(
"--clang-tidy-executable", help="Specific clang-tidy binary to use.", action="store", required=False
)
return parser.parse_known_args(av)
def main(av: List[str]):
known_args, clang_tidy_args = parse_args(av)
project_root = utils.get_project_root()
clang_tidy_executable = known_args.clang_tidy_executable
if not clang_tidy_executable:
clang_tidy_executable = shutil.which("clang-tidy")
project_root = utils.get_project_root()
candidate_files = [
f.as_posix() for f in utils.get_staged_git_files(project_root) if f.suffix in utils.CPP_EXTENSIONS
]
cmd = [clang_tidy_executable] + clang_tidy_args + candidate_files
if len(candidate_files) > 0:
print("Running clang-tidy")
utils.run_command_and_echo_on_error(cmd)
else:
print("Skipping clang-tidy (no cpp files staged)")
if __name__ == "__main__":
main(sys.argv[1:])
| 1,188 | Python | 27.999999 | 108 | 0.672559 |
CesiumGS/cesium-omniverse/scripts/update_certs.py | #! /usr/bin/python3
"""
Intended to be called in the
This script updates the certificates used for any requests that use the core
Context class. While some certs are available on the system, they may not be
consistent or updated. This ensures all certs are uniform and up to date
see: https://github.com/CesiumGS/cesium-omniverse/issues/306
"""
import requests
import sys
import os
def main():
# --- establish source/destination for certs ---
if len(sys.argv) < 2:
print("must provide a filepath for the updated certs")
return -1
CERT_URL = "https://curl.se/ca/cacert.pem"
CERT_FILE_PATH = sys.argv[1]
# --- ensure directory structure exists ----
os.makedirs(os.path.dirname(CERT_FILE_PATH), exist_ok=True)
# --- fetch and write the cert file ----
req = requests.get(CERT_URL)
if req.status_code != 200:
print(f"failed to fetch certificates from {CERT_URL}")
return -1
# explicit encoding is required for windows
with open(CERT_FILE_PATH, "w", encoding="utf-8") as f:
f.write(req.text)
return 0
if __name__ == "__main__":
sys.exit(main())
| 1,144 | Python | 24.444444 | 76 | 0.659091 |
CesiumGS/cesium-omniverse/scripts/generate_third_party_license_json.py | #!/usr/bin/env python3
import json
import os
import shlex
import subprocess
import argparse
from typing import List
import sys
from pathlib import Path
def main(argv: List[str]):
args = parse_args(argv)
project_dir = args.project_dir
build_dir = args.build_dir
libraries_to_skip = args.skip.split(',')
cmd = "conan info {} -if {} -j".format(build_dir,
os.path.join(build_dir, 'Conan_Packages'))
cmd = shlex.split(cmd, posix=(os.name == 'posix'))
try:
output = subprocess.check_output(cmd).decode('utf-8')
json_output = output.split(os.linesep, 2)[1]
third_party_json = json.loads(json_output)
except subprocess.CalledProcessError as error:
cmd_string = ' '.join(error.cmd)
raise RuntimeError('Conan command \'{}\' failed with error {}. Third party JSON creation aborted.'
.format(cmd_string, error.returncode))
third_party_json = generate_conan_third_party_json(
third_party_json, libraries_to_skip)
third_party_extra_json = json.loads(Path(project_dir).joinpath(
'ThirdParty.extra.json').read_text())
# Handle ThirdParty.extra.json
for element in third_party_extra_json:
if 'override' in element:
found_match = False
for match in third_party_json:
if match['name'] == element['name']:
found_match = True
break
if found_match:
del element['override']
third_party_json.remove(match)
combined = {**match, **element}
third_party_json.append(combined)
else:
raise RuntimeError('Could not find library to override: \'{}\'. Third party JSON creation aborted.'
.format(element.name))
else:
third_party_json.append(element)
third_party_json.sort(key=lambda obj: obj['name'].lower())
third_party_json_path = os.path.join(project_dir, 'ThirdParty.json')
with open(third_party_json_path, 'w', newline='\n') as json_file:
json.dump(third_party_json, json_file, indent=4)
json_file.write('\n')
def parse_args(argv: List[str]):
parser = argparse.ArgumentParser(
description='Create third party license json from Conan info and ThirdParty.extra.json.'
)
parser.add_argument('--project-dir',
help='The project directory.',
action='store',
required='true'
)
parser.add_argument('--build-dir',
help='The CMake build directory. From CMake variable PROJECT_BINARY_DIR.',
action='store',
required='true'
)
parser.add_argument('--skip',
help='Comma separated list of libraries to skip.',
action='store',
)
return parser.parse_args(argv)
def generate_conan_third_party_json(third_party_json, libraries_to_skip):
result = []
for library in third_party_json:
# skip the `conanfile` object, as its NOT a real third party library
if library['reference'] == 'conanfile.txt':
continue
display_name = library['display_name']
url = library['homepage']
license = library['license']
licenses = []
for lc in license:
licenses.extend(lc.split(', '))
display_name_pieces = display_name.split('/')
name = display_name_pieces[0]
version = display_name_pieces[1]
# skip libraries that aren't included in the executable
if name in libraries_to_skip:
continue
result.append({
'name': name,
'license': licenses,
'version': version,
'url': url
})
return result
if __name__ == '__main__':
main(sys.argv[1:])
| 4,043 | Python | 32.421487 | 115 | 0.558001 |
CesiumGS/cesium-omniverse/scripts/clang_format.py | #!/usr/bin/env python3
import argparse
import sys
import subprocess
import shutil
import shlex
from utils import utils
from pathlib import Path
from typing import List
def clang_format_on_path(clang_format_binary: str, absolute_path: Path) -> str:
cmd = "{} -style=file {}".format(shlex.quote(clang_format_binary), shlex.quote(str(absolute_path)))
cmd = shlex.split(cmd)
result = subprocess.check_output(cmd)
return result.decode("utf-8", "replace")
def clang_format_in_place(clang_format_binary: str, absolute_path: Path):
cmd = "{} -style=file -i {}".format(shlex.quote(clang_format_binary), shlex.quote(str(absolute_path)))
cmd = shlex.split(cmd)
subprocess.check_output(cmd)
def parse_args(av: List[str]):
parser = argparse.ArgumentParser(description="Run / check clang-formatting.")
parser.add_argument(
"--clang-format-executable", help="Specific clang-format binary to use.", action="store", required=False
)
parser.add_argument(
"--source-directories",
help='Directories (relative to project root) to recursively scan for cpp files (e.g "src", "include"...)',
nargs="+",
required=True,
)
run_type = parser.add_mutually_exclusive_group(required=True)
run_type.add_argument(
"--fix", help="Apply clang-format formatting to source in-place (destructive)", action="store_true"
)
run_type.add_argument("--check", help="Check if source matches clang-format rules", action="store_true")
scope_type = parser.add_mutually_exclusive_group(required=True)
scope_type.add_argument("--all", help="Process all valid source files.", action="store_true")
scope_type.add_argument("--staged", help="Process only staged source files.", action="store_true")
return parser.parse_args(av)
def main(av: List[str]):
if not shutil.which("git"):
raise RuntimeError("Could not find git in path")
project_root_directory = utils.get_project_root()
args = parse_args(av)
# Use user provided clang_format binary if provided
clang_format_binary = args.clang_format_executable
if clang_format_binary:
clang_format_binary = shutil.which(clang_format_binary)
if not clang_format_binary:
clang_format_binary = shutil.which("clang-format")
if not clang_format_binary:
raise RuntimeError("Could not find clang-format in system path")
mode = "all" if args.all else "staged"
source_directories = args.source_directories
# Generate list of source_files to check / fix.
source_files: List[utils.SourceFile] = utils.get_source_files(source_directories, args.all)
failed_files: List[utils.FailedFile] = []
# Fix or check formatting for each file
for src in source_files:
absolute_path = project_root_directory.joinpath(src.relative_path)
if args.check:
old_text = (
absolute_path.read_text(encoding="utf-8")
if not src.staged
else utils.get_staged_file_text(src.relative_path)
)
new_text = clang_format_on_path(clang_format_binary, absolute_path)
diff = utils.unidiff_output(old_text, new_text)
if diff != "":
failed_files.append(utils.FailedFile(src.relative_path, diff))
else:
clang_format_in_place(clang_format_binary, absolute_path)
if len(source_files) == 0:
print("clang-format ({} files): No files found, nothing to do.".format(mode))
sys.exit(0)
if args.fix:
print("Ran clang-format -style=file -i on {} files".format(mode))
sys.exit(0)
if len(failed_files) == 0:
print("clang-format ({} files) passes.".format(mode))
sys.exit(0)
print("clang-format ({} files) failed on the following files: ".format(mode))
for failure in failed_files:
print("{}".format(failure.relative_path))
print(failure.diff)
sys.exit(len(failed_files))
if __name__ == "__main__":
main(sys.argv[1:])
| 4,036 | Python | 36.379629 | 114 | 0.655104 |
CesiumGS/cesium-omniverse/scripts/copy_from_dir.py | import sys
from pathlib import Path
from shutil import copy2
# Broken out for formatting reasons, since tabs within HEREDOCs will be output.
usage_message = """Invalid arguments.
Usage: copy_from_dir.py <glob-pattern> <source-dir-path> <destination-dir-path>
Please fix your command and try again.
"""
def main():
if len(sys.argv) < 4:
print(usage_message)
return 1
glob_pattern: str = sys.argv[1]
source_dir = Path(sys.argv[2]).resolve()
dest_dir = Path(sys.argv[3]).resolve()
print(f'Performing file copy with glob pattern "{glob_pattern}"')
print(f"\tSource: {source_dir}")
print(f"\tDestination: {dest_dir}\n")
source_files = source_dir.glob(glob_pattern)
for f in source_files:
source_path = source_dir / f
copy2(source_path, dest_dir, follow_symlinks=True)
print(f"Copied {source_path}")
return 0
if __name__ == "__main__":
sys.exit(main())
| 948 | Python | 22.724999 | 83 | 0.647679 |
CesiumGS/cesium-omniverse/scripts/utils/utils.py | #!/usr/bin/env python3
import subprocess
import shlex
import os
import glob
import sys
from pathlib import Path
from typing import List, NamedTuple, Set
import difflib
CPP_EXTENSIONS = [".cpp", ".h", ".cxx", ".hxx", ".hpp", ".cc", ".inl"]
def get_project_root() -> Path:
try:
cmd = shlex.split('git rev-parse --show-toplevel')
output = subprocess.check_output(
cmd).strip().decode('utf-8', 'replace')
return Path(output)
except subprocess.CalledProcessError:
raise RuntimeError('command must be ran inside .git repo')
def get_staged_git_files(project_root: Path) -> List[Path]:
cmd = shlex.split("git diff --cached --name-only --diff-filter=ACMRT")
paths = subprocess.check_output(cmd).decode('utf-8').splitlines()
return [project_root.joinpath(p) for p in paths]
def get_cmake_build_directory(project_root: Path):
glob_pattern = project_root.joinpath("**/CMakeCache.txt").as_posix()
results = glob.glob(glob_pattern, recursive=True)
if len(results) == 0:
err = "Could not find CMakeCache.txt in {}. Generate CMake configuration first.".format(
project_root)
raise RuntimeError(err)
cmake_build_directory = os.path.realpath(
os.path.join(project_root, results[0], ".."))
return cmake_build_directory
def run_cmake_target(cmake_build_directory, target):
path = shlex.quote(cmake_build_directory)
cmd = shlex.split("cmake --build {} --target {}".format(path, target))
run_command_and_echo_on_error(cmd)
def run_command_and_echo_on_error(cmd: List[str]):
try:
subprocess.run(cmd, check=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print("Command \"{}\" failed:".format(' '.join(cmd)))
print(e.output.decode('utf-8'))
sys.exit(1)
class SourceFile(NamedTuple):
relative_path: Path
staged: bool
class FailedFile(NamedTuple):
relative_path: Path
diff: str
def get_source_files(source_directories: List[str], modeIsAll: bool) -> List[SourceFile]:
project_root = get_project_root()
staged_rel_paths = get_staged_rel_paths()
source_files = []
for directory in source_directories:
for extension in CPP_EXTENSIONS:
glob_pattern = os.path.join(
project_root, directory, "**/*" + extension)
glob_results = glob.glob(glob_pattern, recursive=True)
for abs_path in glob_results:
rel_path = Path(abs_path).relative_to(project_root)
source_files.append(SourceFile(
rel_path, rel_path in staged_rel_paths))
return list(filter(lambda source_file: source_file.staged or modeIsAll, source_files))
def get_staged_rel_paths() -> Set[str]:
cmd = shlex.split("git diff --cached --name-only --diff-filter=ACMRT")
staged_rel_paths = subprocess.check_output(cmd)
staged_rel_paths = staged_rel_paths.decode('utf-8', 'replace')
return set([Path(path) for path in staged_rel_paths.splitlines()])
def get_staged_file_text(relative_path: Path) -> str:
cmd = "git show :{}".format(shlex.quote(str(relative_path.as_posix())))
cmd = shlex.split(cmd)
output = subprocess.check_output(cmd).decode('utf-8', 'replace')
return output
COLOR_SUPPORT = False
try:
import colorama
colorama.init()
COLOR_SUPPORT = True
def color_diff(diff):
for line in diff:
if line.startswith('+'):
yield colorama.Fore.GREEN + line + colorama.Fore.RESET
elif line.startswith('-'):
yield colorama.Fore.RED + line + colorama.Fore.RESET
elif line.startswith('^'):
yield colorama.Fore.BLUE + line + colorama.Fore.RESET
else:
yield line
except ImportError:
pass
def unidiff_output(expected: str, actual: str):
expected = expected.splitlines(1)
actual = actual.splitlines(1)
diff = difflib.unified_diff(expected, actual)
if COLOR_SUPPORT:
diff = color_diff(diff)
return ''.join(diff)
| 4,139 | Python | 31.857143 | 96 | 0.639526 |
CesiumGS/cesium-omniverse/include/cesium/omniverse/CesiumOmniverse.h | #pragma once
#include "cesium/omniverse/AssetTroubleshootingDetails.h"
#include "cesium/omniverse/RenderStatistics.h"
#include "cesium/omniverse/SetDefaultTokenResult.h"
#include "cesium/omniverse/TokenTroubleshootingDetails.h"
#include <carb/Interface.h>
#include <cstdint>
#include <memory>
#include <optional>
#include <vector>
namespace cesium::omniverse {
class CesiumIonSession;
struct ViewportApi {
double viewMatrix[16]; // NOLINT(modernize-avoid-c-arrays)
double projMatrix[16]; // NOLINT(modernize-avoid-c-arrays)
double width;
double height;
};
class ICesiumOmniverseInterface {
public:
CARB_PLUGIN_INTERFACE("cesium::omniverse::ICesiumOmniverseInterface", 0, 0);
/**
* @brief Call this on extension startup.
*
* @param cesiumExtensionLocation Path to the Cesium Omniverse extension location.
*/
virtual void onStartup(const char* cesiumExtensionLocation) noexcept = 0;
/**
* @brief Call this on extension shutdown.
*/
virtual void onShutdown() noexcept = 0;
/**
* @brief Reloads a tileset.
*
* @param tilesetPath The tileset sdf path. If there's no tileset with this path nothing happens.
*/
virtual void reloadTileset(const char* tilesetPath) noexcept = 0;
/**
* @brief Updates all tilesets this frame.
*
* @param viewports The viewports.
* @param count The number of viewports.
* @param waitForLoadingTiles Whether to wait for all tiles to load before continuing.
*/
virtual void onUpdateFrame(const ViewportApi* viewports, uint64_t count, bool waitForLoadingTiles) noexcept = 0;
/**
* @brief Updates the reference to the USD stage for the C++ layer.
*
* @param stageId The id of the current stage.
*/
virtual void onUsdStageChanged(long stageId) noexcept = 0;
/**
* @brief Connects to Cesium ion.
*/
virtual void connectToIon() noexcept = 0;
/**
* @brief Gets the active Cesium ion session.
*/
virtual std::optional<std::shared_ptr<CesiumIonSession>> getSession() noexcept = 0;
/**
* @brief Get the path of the active Cesium ion server.
*/
virtual std::string getServerPath() noexcept = 0;
/**
* @brief Gets all Cesium ion sessions.
*/
virtual std::vector<std::shared_ptr<CesiumIonSession>> getSessions() noexcept = 0;
/**
* @brief Get all Cesium ion server paths.
*/
virtual std::vector<std::string> getServerPaths() noexcept = 0;
/**
* @brief Gets the last result with code and message of setting the default token.
*
* @return A struct with a code and message. 0 is successful.
*/
virtual SetDefaultTokenResult getSetDefaultTokenResult() noexcept = 0;
/**
* @brief Boolean to check if the default token is set.
*
* @return True if the default token is set.
*/
virtual bool isDefaultTokenSet() noexcept = 0;
/**
* @brief Creates a new token using the specified name.
*
* @param name The name for the new token.
*/
virtual void createToken(const char* name) noexcept = 0;
/**
* @brief Selects an existing token associated with the logged in account.
*
* @param id The ID of the selected token.
*/
virtual void selectToken(const char* id, const char* token) noexcept = 0;
/**
* @brief Used for the specify token action by the set project default token window.
*
* @param token The desired token.
*/
virtual void specifyToken(const char* token) noexcept = 0;
virtual std::optional<AssetTroubleshootingDetails> getAssetTroubleshootingDetails() noexcept = 0;
virtual std::optional<TokenTroubleshootingDetails> getAssetTokenTroubleshootingDetails() noexcept = 0;
virtual std::optional<TokenTroubleshootingDetails> getDefaultTokenTroubleshootingDetails() noexcept = 0;
virtual void updateTroubleshootingDetails(
const char* tilesetPath,
int64_t tilesetIonAssetId,
uint64_t tokenEventId,
uint64_t assetEventId) noexcept = 0;
virtual void updateTroubleshootingDetails(
const char* tilesetPath,
int64_t tilesetIonAssetId,
int64_t rasterOverlayIonAssetId,
uint64_t tokenEventId,
uint64_t assetEventId) noexcept = 0;
/**
* @brief Prints the Fabric stage. For debugging only.
*
* @returns A string representation of the Fabric stage.
*/
virtual std::string printFabricStage() noexcept = 0;
/**
* @brief Get render statistics. For debugging only.
*
* @returns Object containing render statistics.
*/
virtual RenderStatistics getRenderStatistics() noexcept = 0;
virtual bool creditsAvailable() noexcept = 0;
virtual std::vector<std::pair<std::string, bool>> getCredits() noexcept = 0;
virtual void creditsStartNextFrame() noexcept = 0;
virtual bool isTracingEnabled() noexcept = 0;
/**
* @brief Clear the asset accessor cache.
*/
virtual void clearAccessorCache() = 0;
};
} // namespace cesium::omniverse
| 5,113 | C | 29.082353 | 116 | 0.672599 |
CesiumGS/cesium-omniverse/tests/src/ExampleTests.cpp | /*
* A collection of simple tests to demonstrate Doctest
*/
#include "testUtils.h"
#include <doctest/doctest.h>
#include <array>
#include <cstdint>
#include <iostream>
#include <list>
#include <stdexcept>
#include <vector>
#include <yaml-cpp/yaml.h>
const std::string CONFIG_PATH = "tests/configs/exampleConfig.yaml";
// Test Suites are not required, but this sort of grouping makes it possible
// to select which tests do/don't run via command line options
TEST_SUITE("Example Tests") {
// ----------------------------------------------
// Basic Tests
// ----------------------------------------------
TEST_CASE("The most basic test") {
CHECK(1 + 1 == 2);
}
TEST_CASE("Demonstrating Subcases") {
// This initialization is shared between all subcases
int x = 1;
// Note that these two subcases run independantly of each other!
SUBCASE("Increment") {
x += 1;
CHECK(x == 2);
}
SUBCASE("Decrement") {
x -= 1;
CHECK(x == 0);
}
// Flow returns here after each independant subcase, so we can test
// shared effects here
CHECK(x != 1);
}
// A few notes on subcases:
// - You can nest subcases
// - Subcases work by creating multiple calls to the higher level case,
// where each call proceeds to only one of the subcases. If you generate
// excessive subcases, watch out for a stack overflow.
void runPositiveCheck(int64_t val) {
// helper function for parameterized test method 1
CHECK(val > 0);
}
TEST_CASE("Demonstrate Parameterized Tests - method 1") {
// Generate the data you want the tests to iterate over
std::list<uint32_t> dataContainer = {42, 64, 8675309, 1024};
for (auto i : dataContainer) {
CAPTURE(i);
runPositiveCheck(i);
}
}
TEST_CASE("Demonstrate Parameterized Tests - method 2") {
// Generate the data you want the tests to iterate over
uint32_t item;
std::list<uint32_t> dataContainer = {42, 64, 8675309, 1024};
// This macro from doctestUtils.h will generate a subcase per datum
DOCTEST_VALUE_PARAMETERIZED_DATA(item, dataContainer);
// this check will now be run for each datum
CHECK(item > 0);
}
// ----------------------------------------------
// YAML Config Examples
// ----------------------------------------------
std::string transmogrifier(const std::string& s) {
// an example function with differing output for some scenarios
if (s == "scenario2") {
return "bar";
}
return "foo";
}
void checkAgainstExpectedResults(const std::string& scenarioName, const YAML::Node& expectedResults) {
// we have to specify the type of the desired data from the config via as()
CHECK(3.14159 == expectedResults["pi"].as<double>());
CHECK(2 == expectedResults["onlyEvenPrime"].as<int>());
// as() does work for some non-scalar types, such as vectors, lists, and maps
// for adding custom types to the config, see:
// https://github.com/jbeder/yaml-cpp/wiki/Tutorial#converting-tofrom-native-data-types
const auto fib = expectedResults["fibonacciSeq"].as<std::vector<int>>();
CHECK(fib[2] + fib[3] == fib[4]);
// More complicated checks can be done with helper functions that take the scenario as input
CHECK(transmogrifier(scenarioName) == expectedResults["transmogrifierOutput"].as<std::string>());
}
TEST_CASE("Use a config file to detail multiple scenarios") {
YAML::Node configRoot = YAML::LoadFile(CONFIG_PATH);
// The config file has default parameters and
// an override for one or more scenarios
std::vector<std::string> scenarios = {"scenario1", "scenario2", "scenario3"};
for (const auto& s : scenarios) {
ConfigMap conf = getScenarioConfig(s, configRoot);
checkAgainstExpectedResults(s, conf);
}
}
// ----------------------------------------------
// Misc.
// ----------------------------------------------
TEST_CASE("A few other useful macros") {
// The most common test macro is CHECK, but others are available
// Here are just a few
// Any failures here will prevent the rest of the test from running
REQUIRE(0 == 0);
// Make sure the enclosed code does/doesn't throw an exception
CHECK_THROWS(throw "test exception!");
CHECK_NOTHROW(if (false) throw "should not throw");
// Prints a warning if the assert fails, but does not fail the test
WARN(true);
}
}
| 4,824 | C++ | 32.741259 | 106 | 0.572968 |
CesiumGS/cesium-omniverse/tests/src/GltfTests.cpp | #include "testUtils.h"
#include "cesium/omniverse/FabricMaterialInfo.h"
#include "cesium/omniverse/FabricVertexAttributeAccessors.h"
#include "cesium/omniverse/GltfUtil.h"
#include <CesiumGltf/Material.h>
#include <CesiumGltf/MeshPrimitive.h>
#include <CesiumGltf/Model.h>
#include <CesiumGltfReader/GltfReader.h>
#include <doctest/doctest.h>
#include <cstddef>
#include <cstdio>
#include <filesystem>
#include <fstream>
#include <iostream>
#include <optional>
#include <stdexcept>
#include <string>
#include <vector>
#include <gsl/span>
#include <yaml-cpp/yaml.h>
using namespace cesium::omniverse;
const std::string ASSET_DIR = "tests/testAssets/gltfs";
const std::string CONFIG_PATH = "tests/configs/gltfConfig.yaml";
// simplifies casting when comparing some material queries to expected output from config
bool operator==(const glm::dvec3& v3, const std::vector<double>& v) {
return v.size() == 3 && v3[0] == v[0] && v3[1] == v[1] && v3[2] == v[2];
}
TEST_SUITE("Test GltfUtil") {
void checkGltfExpectedResults(const std::filesystem::path& gltfFileName, const YAML::Node& expectedResults) {
// --- Load Gltf ---
std::ifstream gltfStream(gltfFileName, std::ifstream::binary);
gltfStream.seekg(0, std::ios::end);
auto gltfFileLength = gltfStream.tellg();
gltfStream.seekg(0, std::ios::beg);
std::vector<std::byte> gltfBuf(static_cast<uint64_t>(gltfFileLength));
gltfStream.read((char*)&gltfBuf[0], gltfFileLength);
CesiumGltfReader::GltfReader reader;
auto gltf = reader.readGltf(
gsl::span(reinterpret_cast<const std::byte*>(gltfBuf.data()), static_cast<uint64_t>(gltfFileLength)));
if (!gltf.errors.empty()) {
for (const auto& err : gltf.errors) {
std::cerr << err;
}
throw std::runtime_error("failed to parse model");
}
// gltf.model is a std::optional<CesiumGltf::Model>, make sure it exists
if (!(gltf.model && gltf.model->meshes.size() > 0)) {
throw std::runtime_error("test model is empty");
}
// --- Begin checks ---
const auto& prim = gltf.model->meshes[0].primitives[0];
const auto& model = *gltf.model;
CHECK(GltfUtil::hasNormals(model, prim, false) == expectedResults["hasNormals"].as<bool>());
CHECK(GltfUtil::hasTexcoords(model, prim, 0) == expectedResults["hasTexcoords"].as<bool>());
CHECK(
GltfUtil::hasRasterOverlayTexcoords(model, prim, 0) ==
expectedResults["hasRasterOverlayTexcoords"].as<bool>());
CHECK(GltfUtil::hasVertexColors(model, prim, 0) == expectedResults["hasVertexColors"].as<bool>());
CHECK(GltfUtil::hasMaterial(prim) == expectedResults["hasMaterial"].as<bool>());
// material tests
if (GltfUtil::hasMaterial(prim)) {
const auto& matInfo = GltfUtil::getMaterialInfo(model, prim);
CHECK(matInfo.alphaCutoff == expectedResults["alphaCutoff"].as<double>());
CHECK(matInfo.alphaMode == static_cast<FabricAlphaMode>(expectedResults["alphaMode"].as<int32_t>()));
CHECK(matInfo.baseAlpha == expectedResults["baseAlpha"].as<double>());
CHECK(matInfo.baseColorFactor == expectedResults["baseColorFactor"].as<std::vector<double>>());
CHECK(matInfo.emissiveFactor == expectedResults["emissiveFactor"].as<std::vector<double>>());
CHECK(matInfo.metallicFactor == expectedResults["metallicFactor"].as<double>());
CHECK(matInfo.roughnessFactor == expectedResults["roughnessFactor"].as<double>());
CHECK(matInfo.doubleSided == expectedResults["doubleSided"].as<bool>());
CHECK(matInfo.hasVertexColors == expectedResults["hasVertexColors"].as<bool>());
}
// Accessor smoke tests
PositionsAccessor positions;
IndicesAccessor indices;
positions = GltfUtil::getPositions(model, prim);
CHECK(positions.size() > 0);
indices = GltfUtil::getIndices(model, prim, positions);
CHECK(indices.size() > 0);
if (GltfUtil::hasNormals(model, prim, false)) {
CHECK(GltfUtil::getNormals(model, prim, positions, indices, false).size() > 0);
}
if (GltfUtil::hasVertexColors(model, prim, 0)) {
CHECK(GltfUtil::getVertexColors(model, prim, 0).size() > 0);
}
if (GltfUtil::hasTexcoords(model, prim, 0)) {
CHECK(GltfUtil::getTexcoords(model, prim, 0).size() > 0);
}
if (GltfUtil::hasRasterOverlayTexcoords(model, prim, 0)) {
CHECK(GltfUtil::getRasterOverlayTexcoords(model, prim, 0).size() > 0);
}
CHECK(GltfUtil::getExtent(model, prim) != std::nullopt);
}
TEST_CASE("Default getter smoke tests") {
CHECK_NOTHROW(GltfUtil::getDefaultMaterialInfo());
CHECK_NOTHROW(GltfUtil::getDefaultTextureInfo());
}
TEST_CASE("Check helper functions on various models") {
std::vector<std::string> gltfFiles;
// get list of gltf test files
for (auto const& i : std::filesystem::directory_iterator(ASSET_DIR)) {
std::filesystem::path fname = i.path().filename();
if (fname.extension() == ".gltf" || fname.extension() == ".glb") {
gltfFiles.push_back(fname.string());
}
}
// parse test config yaml
const auto configRoot = YAML::LoadFile(CONFIG_PATH);
const auto basePath = std::filesystem::path(ASSET_DIR);
for (auto const& fileName : gltfFiles) {
// attach filename to any failed checks
CAPTURE(fileName);
const auto conf = getScenarioConfig(fileName, configRoot);
// the / operator concatonates file paths
checkGltfExpectedResults(basePath / fileName, conf);
}
}
}
| 5,896 | C++ | 39.951389 | 114 | 0.631784 |
CesiumGS/cesium-omniverse/tests/src/ObjectPoolTests.cpp | #include "testUtils.h"
#include <cesium/omniverse/ObjectPool.h>
#include <doctest/doctest.h>
#include <algorithm>
#include <cstdint>
#include <cstdlib>
#include <memory>
#include <queue>
constexpr int MAX_TESTED_POOL_SIZE = 1024; // The max size pool to randomly generate
// ObjectPool is a virtual class so we cannot directly instantiate it for
// testing, and instantiating the classes that implement it (FabricGeometryPool,
// FabricMaterialPool, and FabricTexturePool) requires mocking more complicated
// classes, so we create a bare-bones class here.
class MockObject {
public:
MockObject(uint64_t objectId) {
id = objectId;
active = false;
};
uint64_t id;
bool active;
};
class MockObjectPool final : public cesium::omniverse::ObjectPool<MockObject> {
protected:
std::shared_ptr<MockObject> createObject(uint64_t objectId) const override {
return std::make_shared<MockObject>(objectId);
};
void setActive(MockObject* obj, bool active) const override {
obj->active = active;
};
};
void testRandomSequenceOfCmds(MockObjectPool& opl, int numEvents, bool setCap) {
// Track the objects we've acquired so we can release them
std::queue<std::shared_ptr<MockObject>> activeObjects;
// The total number of acquires performed, which becomes the minimum
// expected size of the pool
auto maxActiveCount = opl.getNumberActive();
// Perform a random sequence of acquires/releases while
// ensuring we only release what we've acquired
for (int i = 0; i < numEvents; ++i) {
if (!activeObjects.empty() && rand() % 2 == 0) {
opl.release(activeObjects.front());
activeObjects.pop();
} else {
activeObjects.push(opl.acquire());
}
maxActiveCount = std::max(maxActiveCount, activeObjects.size());
if (setCap && i == numEvents / 2) {
// At the halfway point, try resetting the capacity
// Ensure the new size is GTE, avoiding rollover
uint64_t guaranteedGTE =
std::max(opl.getCapacity(), opl.getCapacity() + static_cast<uint64_t>(rand() % MAX_TESTED_POOL_SIZE));
opl.setCapacity(guaranteedGTE);
}
}
auto numActive = activeObjects.size();
// Ensure our math matches
CHECK(opl.getNumberActive() == numActive);
// Make sure there's capacity for all objects
CHECK(opl.getCapacity() >= numActive + opl.getNumberInactive());
CHECK(opl.getCapacity() >= maxActiveCount);
// The percent active is calculated out of the pool's total capacity
// which must be gte our max observed active count
float expectedPercentActive;
if (maxActiveCount != 0) {
expectedPercentActive = (float)numActive / (float)maxActiveCount;
} else {
expectedPercentActive = 1;
}
CHECK(opl.computePercentActive() <= expectedPercentActive);
}
// ---- Begin tests ----
TEST_SUITE("Test ObjectPool") {
TEST_CASE("Test initializiation") {
MockObjectPool opl = MockObjectPool();
SUBCASE("Initial capacity") {
CHECK(opl.getCapacity() == 0);
}
SUBCASE("Initial active") {
CHECK(opl.getNumberActive() == 0);
}
SUBCASE("Initial inactive") {
CHECK(opl.getNumberInactive() == 0);
}
SUBCASE("Initial percent active") {
// Initial percent active is assumed to be 100% in parts of the code
CHECK(opl.computePercentActive() == 1);
}
}
TEST_CASE("Test acquire/release") {
MockObjectPool opl = MockObjectPool();
// Generate a random number of actions to perform
int numEvents;
std::list<int> randEventCounts;
fillWithRandomInts(randEventCounts, 0, MAX_TESTED_POOL_SIZE, NUM_TEST_REPETITIONS);
SUBCASE("Test repeated acquires") {
DOCTEST_VALUE_PARAMETERIZED_DATA(numEvents, randEventCounts);
for (int i = 0; i < numEvents; ++i) {
opl.acquire();
}
CHECK(opl.getNumberActive() == numEvents);
CHECK(opl.getCapacity() >= numEvents);
}
SUBCASE("Test random acquire/release patterns") {
DOCTEST_VALUE_PARAMETERIZED_DATA(numEvents, randEventCounts);
testRandomSequenceOfCmds(opl, numEvents, false);
}
SUBCASE("Test random setting capacity") {
DOCTEST_VALUE_PARAMETERIZED_DATA(numEvents, randEventCounts);
testRandomSequenceOfCmds(opl, numEvents, true);
}
}
}
| 4,590 | C++ | 31.560283 | 118 | 0.63573 |
CesiumGS/cesium-omniverse/tests/src/tilesetTests.cpp | #include "tilesetTests.h"
#include "testUtils.h"
#include "cesium/omniverse/AssetRegistry.h"
#include "cesium/omniverse/Context.h"
#include "cesium/omniverse/OmniTileset.h"
#include "cesium/omniverse/UsdUtil.h"
#include <CesiumUsdSchemas/tileset.h>
#include <carb/dictionary/DictionaryUtils.h>
#include <carb/events/IEvents.h>
#include <doctest/doctest.h>
#include <omni/kit/IApp.h>
#include <memory>
pxr::SdfPath endToEndTilesetPath;
bool endToEndTilesetLoaded = false;
carb::events::ISubscriptionPtr endToEndTilesetSubscriptionPtr;
class TilesetLoadListener;
std::unique_ptr<TilesetLoadListener> tilesetLoadListener;
using namespace cesium::omniverse;
class TilesetLoadListener final : public carb::events::IEventListener {
public:
uint64_t refCount = 0;
void onEvent(carb::events::IEvent* e [[maybe_unused]]) override {
endToEndTilesetLoaded = true;
};
uint64_t addRef() override {
return ++refCount;
};
uint64_t release() override {
return --refCount;
};
};
void setUpTilesetTests(Context* pContext, const pxr::SdfPath& rootPath) {
// Create a listener for tileset load events
auto app = carb::getCachedInterface<omni::kit::IApp>();
auto bus = app->getMessageBusEventStream();
auto tilesetLoadedEvent = carb::events::typeFromString("cesium.omniverse.TILESET_LOADED");
tilesetLoadListener = std::make_unique<TilesetLoadListener>();
endToEndTilesetSubscriptionPtr = bus->createSubscriptionToPushByType(tilesetLoadedEvent, tilesetLoadListener.get());
// Load a local test tileset
endToEndTilesetPath = UsdUtil::makeUniquePath(pContext->getUsdStage(), rootPath, "endToEndTileset");
auto endToEndTileset = UsdUtil::defineCesiumTileset(pContext->getUsdStage(), endToEndTilesetPath);
std::string tilesetFilePath = "file://" TEST_WORKING_DIRECTORY "/tests/testAssets/tilesets/Tileset/tileset.json";
endToEndTileset.GetSourceTypeAttr().Set(pxr::TfToken("url"));
endToEndTileset.GetUrlAttr().Set(tilesetFilePath);
}
void cleanUpTilesetTests(const pxr::UsdStageRefPtr& stage) {
endToEndTilesetSubscriptionPtr->unsubscribe();
stage->RemovePrim(endToEndTilesetPath);
tilesetLoadListener.reset();
}
TEST_SUITE("Tileset tests") {
TEST_CASE("End to end test") {
// set by the TilesetLoadListener when any tileset successfully loads
CHECK(endToEndTilesetLoaded);
}
}
| 2,404 | C++ | 33.855072 | 120 | 0.745424 |
CesiumGS/cesium-omniverse/tests/src/testUtils.cpp | #include "testUtils.h"
#include <unordered_map>
#include <variant>
#include <yaml-cpp/node/detail/iterator_fwd.h>
#include <yaml-cpp/node/node.h>
#include <yaml-cpp/node/parse.h>
#include <yaml-cpp/node/type.h>
#include <yaml-cpp/yaml.h>
void fillWithRandomInts(std::list<int>& lst, int min, int max, int n) {
for (int i = 0; i < n; ++i) {
// The odd order here is to avoid issues with rollover
int x = (rand() % (max - min)) + min;
lst.push_back(x);
}
}
ConfigMap getScenarioConfig(const std::string& scenario, YAML::Node configRoot) {
ConfigMap sConfig = ConfigMap();
const auto& defaultConfig = configRoot["scenarios"]["default"];
for (YAML::const_iterator it = defaultConfig.begin(); it != defaultConfig.end(); ++it) {
sConfig[it->first.as<std::string>()] = it->second;
}
const auto& overrides = configRoot["scenarios"][scenario];
for (auto it = overrides.begin(); it != overrides.end(); ++it) {
sConfig[it->first.as<std::string>()] = it->second;
}
return sConfig;
}
| 1,061 | C++ | 26.947368 | 92 | 0.63148 |
CesiumGS/cesium-omniverse/tests/src/CesiumOmniverseCppTests.cpp | #define CARB_EXPORTS
#define DOCTEST_CONFIG_IMPLEMENT
#define DOCTEST_CONFIG_IMPLEMENTATION_IN_DLL
#define DOCTEST_CONFIG_SUPER_FAST_ASSERTS
#include "CesiumOmniverseCppTests.h"
#include "UsdUtilTests.h"
#include "testUtils.h"
#include "tilesetTests.h"
#include "cesium/omniverse/Context.h"
#include "cesium/omniverse/Logger.h"
#include <carb/PluginUtils.h>
#include <cesium/omniverse/UsdUtil.h>
#include <doctest/doctest.h>
#include <omni/fabric/IFabric.h>
#include <omni/kit/IApp.h>
#include <pxr/usd/sdf/path.h>
#include <pxr/usd/usd/stage.h>
#include <iostream>
namespace cesium::omniverse::tests {
class CesiumOmniverseCppTestsPlugin final : public ICesiumOmniverseCppTestsInterface {
public:
void onStartup(const char* cesiumExtensionLocation) noexcept override {
_pContext = std::make_unique<Context>(cesiumExtensionLocation);
}
void onShutdown() noexcept override {
_pContext = nullptr;
}
void setUpTests(long int stage_id) noexcept override {
// This runs after the stage has been created, but at least one frame
// before runAllTests. This is to allow time for USD notifications to
// propogate, as prims cannot be created and used on the same frame.
_pContext->getLogger()->info("Setting up Cesium Omniverse Tests with stage id: {}", stage_id);
_pContext->onUsdStageChanged(stage_id);
auto rootPath = cesium::omniverse::UsdUtil::getRootPath(_pContext->getUsdStage());
setUpUsdUtilTests(_pContext.get(), rootPath);
setUpTilesetTests(_pContext.get(), rootPath);
}
void runAllTests() noexcept override {
_pContext->getLogger()->info("Running Cesium Omniverse Tests");
// construct a doctest context
doctest::Context context;
// Some tests contain relative paths rooted in the top level project dir
// so we set this as the working directory
std::filesystem::path oldWorkingDir = std::filesystem::current_path();
std::filesystem::current_path(TEST_WORKING_DIRECTORY);
// run test suites
context.run();
// restore the previous working directory
std::filesystem::current_path(oldWorkingDir);
_pContext->getLogger()->info("Cesium Omniverse tests complete");
_pContext->getLogger()->info("Cleaning up after tests");
cleanUpAfterTests();
_pContext->getLogger()->info("Cesium Omniverse test prims removed");
}
void cleanUpAfterTests() noexcept {
// delete any test related prims here
auto pUsdStage = _pContext->getUsdStage();
cleanUpUsdUtilTests(pUsdStage);
cleanUpTilesetTests(pUsdStage);
}
private:
std::unique_ptr<Context> _pContext;
};
} // namespace cesium::omniverse::tests
const struct carb::PluginImplDesc pluginImplDesc = {
"cesium.omniverse.cpp.tests.plugin",
"Cesium Omniverse Tests Plugin.",
"Cesium",
carb::PluginHotReload::eDisabled,
"dev"};
#ifdef CESIUM_OMNI_CLANG
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wgnu-zero-variadic-macro-arguments"
#endif
CARB_PLUGIN_IMPL(pluginImplDesc, cesium::omniverse::tests::CesiumOmniverseCppTestsPlugin)
CARB_PLUGIN_IMPL_DEPS(omni::fabric::IFabric, omni::kit::IApp, carb::settings::ISettings)
#ifdef CESIUM_OMNI_CLANG
#pragma clang diagnostic pop
#endif
void fillInterface([[maybe_unused]] cesium::omniverse::tests::CesiumOmniverseCppTestsPlugin& iface) {}
| 3,462 | C++ | 30.770642 | 102 | 0.70595 |
CesiumGS/cesium-omniverse/tests/src/UsdUtilTests.cpp | #include "UsdUtilTests.h"
#include "testUtils.h"
#include "cesium/omniverse/Context.h"
#include "cesium/omniverse/UsdUtil.h"
#include <CesiumUsdSchemas/data.h>
#include <CesiumUsdSchemas/georeference.h>
#include <CesiumUsdSchemas/globeAnchorAPI.h>
#include <CesiumUsdSchemas/ionRasterOverlay.h>
#include <CesiumUsdSchemas/ionServer.h>
#include <CesiumUsdSchemas/session.h>
#include <CesiumUsdSchemas/tileset.h>
#include <doctest/doctest.h>
#include <glm/ext/matrix_double4x4.hpp>
#include <pxr/usd/usdGeom/cube.h>
#include <pxr/usd/usdGeom/xformCommonAPI.h>
#include <pxr/usd/usdGeom/xformable.h>
// define prim paths globally to cut down on repeated definitions
// name the paths after the function to be tested so they can easily be paired up later
pxr::SdfPath defineCesiumDataPath;
pxr::SdfPath defineCesiumSessionPath;
pxr::SdfPath defineCesiumGeoreferencePath;
pxr::SdfPath defineCesiumTilesetPath;
pxr::SdfPath defineCesiumIonRasterOverlayPath;
pxr::SdfPath defineGlobeAnchorPath;
pxr::CesiumSession getOrCreateCesiumSessionPrim;
using namespace cesium::omniverse;
using namespace cesium::omniverse::UsdUtil;
const Context* pContext;
void setUpUsdUtilTests(cesium::omniverse::Context* context, const pxr::SdfPath& rootPath) {
// might as well name the prims after the function as well, to ensure uniqueness and clarity
defineCesiumDataPath = rootPath.AppendChild(pxr::TfToken("defineCesiumData"));
defineCesiumSessionPath = rootPath.AppendChild(pxr::TfToken("defineCesiumSession"));
defineCesiumGeoreferencePath = rootPath.AppendChild(pxr::TfToken("defineCesiumGeoreference"));
defineCesiumIonRasterOverlayPath = rootPath.AppendChild(pxr::TfToken("defineCesiumIonRasterOverlay"));
defineCesiumTilesetPath = rootPath.AppendChild(pxr::TfToken("defineCesiumTileset"));
defineGlobeAnchorPath = rootPath.AppendChild(pxr::TfToken("defineGlobeAnchor"));
defineCesiumData(context->getUsdStage(), defineCesiumDataPath);
defineCesiumSession(context->getUsdStage(), defineCesiumSessionPath);
defineCesiumGeoreference(context->getUsdStage(), defineCesiumGeoreferencePath);
defineCesiumTileset(context->getUsdStage(), defineCesiumTilesetPath);
defineCesiumIonRasterOverlay(context->getUsdStage(), defineCesiumIonRasterOverlayPath);
// defineGlobeAnchor(globeAnchorPath);
getOrCreateCesiumSessionPrim = getOrCreateCesiumSession(context->getUsdStage());
pContext = context;
}
void cleanUpUsdUtilTests(const pxr::UsdStageRefPtr& stage) {
// might as well name the prims after the function as well, to ensure uniqueness and clarity
stage->RemovePrim(defineCesiumDataPath);
stage->RemovePrim(defineCesiumSessionPath);
stage->RemovePrim(defineCesiumGeoreferencePath);
stage->RemovePrim(defineCesiumIonRasterOverlayPath);
stage->RemovePrim(defineCesiumTilesetPath);
stage->RemovePrim(defineGlobeAnchorPath);
// stage->RemovePrim(globeAnchorPath);
stage->RemovePrim(getOrCreateCesiumSessionPrim.GetPath());
}
TEST_SUITE("UsdUtil tests") {
TEST_CASE("Check expected initial state") {
auto cesiumObjPath = pxr::SdfPath("/Cesium");
CHECK(primExists(pContext->getUsdStage(), cesiumObjPath));
// TODO can we check something invisible here too?
CHECK(isPrimVisible(pContext->getUsdStage(), cesiumObjPath));
}
TEST_CASE("Check glm/usd conversion functions") {
glm::dmat4 matrix(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
// Round-trip conversion of usd/glm matrix
CHECK(matrix == usdToGlmMatrix(glmToUsdMatrix(matrix)));
}
TEST_CASE("Tests that require prim creation") {
auto primPath = makeUniquePath(pContext->getUsdStage(), getRootPath(pContext->getUsdStage()), "CesiumTestPrim");
auto prim = pContext->getUsdStage()->DefinePrim(primPath);
// Intentionally try the same prim name
auto cubePath = makeUniquePath(pContext->getUsdStage(), getRootPath(pContext->getUsdStage()), "CesiumTestPrim");
// Tests makeUniquePath actually returns unique paths
CHECK(primPath.GetPrimPath() != cubePath.GetPrimPath());
auto cube = pxr::UsdGeomCube::Define(pContext->getUsdStage(), cubePath);
auto xformApiCube = pxr::UsdGeomXformCommonAPI(cube);
xformApiCube.SetRotate({30, 60, 90});
xformApiCube.SetScale({5, 12, 13});
xformApiCube.SetTranslate({3, 4, 5});
auto xformableCube = pxr::UsdGeomXformable(cube);
pxr::GfMatrix4d cubeXform;
bool xformStackResetNeeded [[maybe_unused]];
xformableCube.GetLocalTransformation(&cubeXform, &xformStackResetNeeded);
CHECK(usdToGlmMatrix(cubeXform) == computePrimLocalToWorldTransform(pContext->getUsdStage(), cubePath));
pContext->getUsdStage()->RemovePrim(primPath);
pContext->getUsdStage()->RemovePrim(cubePath);
}
TEST_CASE("Test UTF-8 path names") {
for (int i = 0; i < NUM_TEST_REPETITIONS; ++i) {
std::string randomUTF8String = "safe_name_test";
randomUTF8String.reserve(64);
for (long unsigned int ii = 0; ii < randomUTF8String.capacity() - randomUTF8String.size(); ++ii) {
char randChar = (char)(rand() % 0xE007F);
randomUTF8String.append(&randChar);
}
auto safeUniquePath =
makeUniquePath(pContext->getUsdStage(), getRootPath(pContext->getUsdStage()), randomUTF8String);
pContext->getUsdStage()->DefinePrim(safeUniquePath);
CHECK(primExists(pContext->getUsdStage(), safeUniquePath));
pContext->getUsdStage()->RemovePrim(safeUniquePath);
CHECK_FALSE(primExists(pContext->getUsdStage(), safeUniquePath));
}
}
TEST_CASE("Cesium helper functions") {
auto rootPath = getRootPath(pContext->getUsdStage());
CHECK(isCesiumData(pContext->getUsdStage(), defineCesiumDataPath));
CHECK(isCesiumSession(pContext->getUsdStage(), defineCesiumSessionPath));
CHECK(isCesiumGeoreference(pContext->getUsdStage(), defineCesiumGeoreferencePath));
CHECK(isCesiumTileset(pContext->getUsdStage(), defineCesiumTilesetPath));
CHECK(isCesiumIonRasterOverlay(pContext->getUsdStage(), defineCesiumIonRasterOverlayPath));
// CHECK(hasCesiumGlobeAnchor(pContext->getUsdStage(), globeAnchorPath));
CHECK(isCesiumSession(pContext->getUsdStage(), getOrCreateCesiumSessionPrim.GetPath()));
}
TEST_CASE("Smoke tests") {
// functions for which we do not yet have better tests,
// but we can at least verify they don't throw
CHECK_NOTHROW(getDynamicTextureProviderAssetPathToken("foo"));
CHECK_NOTHROW(getUsdUpAxis(pContext->getUsdStage()));
CHECK(getUsdMetersPerUnit(pContext->getUsdStage()) > 0);
}
}
| 6,848 | C++ | 41.80625 | 120 | 0.723423 |
CesiumGS/cesium-omniverse/tests/configs/exampleConfig.yaml | ---
# One way to use the config file is to generate multiple scenarios with
# variations on some known parameters. If most of the scenarios have a
# parameter at one particular value, it can make sense to establish that as
# the default, then we only need to list the changes from that default.
# See the gltf test config for a real use-case
scenarios:
default:
# currently supported data types for the testUtils methods:
# float
pi : 3.14159
# int
onlyEvenPrime : 2
# string
transmogrifierOutput : "foo"
# sequence
fibonacciSeq :
- 1
- 1
- 2
- 3
- 5
# an example override for a given item
scenario2:
transmogrifierOutput : "bar"
| 740 | YAML | 23.699999 | 75 | 0.640541 |
CesiumGS/cesium-omniverse/tests/configs/gltfConfig.yaml | ---
#
scenarios:
default:
hasNormals : True
hasTexcoords : True
hasRasterOverlayTexcoords : False
hasVertexColors : False
doubleSided : False
# Material Attributes
hasMaterial : True
alphaMode : 0
alphaCutoff : 0.5
baseAlpha : 1.0
metallicFactor : 1.0
roughnessFactor : 1.0
baseColorTextureWrapS : 10497 # opengl enum for "repeat"
baseColorTextureWrapT : 10497
emissiveFactor:
- 0
- 0
- 0
baseColorFactor:
- 1
- 1
- 1
# Note: all files should all be .glbs. Anything that uses or queries
# accessors requires (included in some tests) requires a call to
# CesiumGltfReader::resolveExternalData, which proved to be complicated to integrate.
Duck.glb:
hasTexcoords : True
metallicFactor : 0
Mesh_Primitives_00.glb:
hasNormals : False
hasTexcoords : False
baseColorFactor:
- 0
- 1
- 0
Mesh_PrimitivesUV_00.glb:
hasNormals : False
hasTexcoords : False
Mesh_PrimitivesUV_06.glb:
hasVertexColors : True
Mesh_PrimitivesUV_08.glb:
hasVertexColors : True
Material_07.glb:
metallicFactor : 0.0
emissiveFactor :
- 1
- 1
- 1
baseColorFactor :
- 0.2
- 0.2
- 0.2
Material_AlphaBlend_05.glb:
hasNormals : False
hasTexcoords : True
alphaMode : 2
baseAlpha : 0.7
Material_AlphaBlend_06.glb:
hasNormals : False
hasVertexColors : True
hasTexcoords : True
alphaMode : 2
baseAlpha : 0.7
Material_AlphaMask_04.glb:
hasNormals : False
hasTexcoords : True
alphaMode : 1
alphaCutoff : 0.0
Material_AlphaMask_06.glb:
hasNormals : False
hasTexcoords : True
alphaMode : 1
alphaCutoff : 0.6
baseAlpha : 0.7
Mesh_PrimitiveVertexColor_00.glb:
hasMaterial : False
hasTexcoords : False
hasVertexColors : True
Mesh_PrimitiveVertexColor_01.glb:
hasMaterial : False
hasTexcoords : False
hasVertexColors : True
Mesh_PrimitiveVertexColor_02.glb:
hasMaterial : False
hasTexcoords : False
hasVertexColors : True
Mesh_PrimitiveVertexColor_03.glb:
hasMaterial : False
hasTexcoords : False
hasVertexColors : True
Mesh_PrimitiveVertexColor_04.glb:
hasMaterial : False
hasTexcoords : False
hasVertexColors : True
Mesh_PrimitiveVertexColor_05.glb:
hasMaterial : False
hasTexcoords : False
hasVertexColors : True
| 2,467 | YAML | 19.566667 | 87 | 0.655857 |
CesiumGS/cesium-omniverse/tests/bindings/PythonBindings.cpp | #include "CesiumOmniverseCppTests.h"
#include <carb/BindingsPythonUtils.h>
#ifdef CESIUM_OMNI_CLANG
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wgnu-zero-variadic-macro-arguments"
#endif
CARB_BINDINGS("cesium.omniverse.cpp.tests.python")
#ifdef CESIUM_OMNI_CLANG
#pragma clang diagnostic pop
#endif
DISABLE_PYBIND11_DYNAMIC_CAST(cesium::omniverse::tests::ICesiumOmniverseCppTestsInterface)
PYBIND11_MODULE(CesiumOmniverseCppTestsPythonBindings, m) {
using namespace cesium::omniverse::tests;
m.doc() = "pybind11 cesium.omniverse.cpp.tests bindings";
// clang-format off
carb::defineInterfaceClass<ICesiumOmniverseCppTestsInterface>(
m, "ICesiumOmniverseCppTestsInterface", "acquire_cesium_omniverse_tests_interface", "release_cesium_omniverse_tests_interface")
.def("set_up_tests", &ICesiumOmniverseCppTestsInterface::setUpTests)
.def("run_all_tests", &ICesiumOmniverseCppTestsInterface::runAllTests)
.def("on_startup", &ICesiumOmniverseCppTestsInterface::onStartup)
.def("on_shutdown", &ICesiumOmniverseCppTestsInterface::onShutdown);
// clang-format on
}
| 1,147 | C++ | 33.787878 | 135 | 0.768963 |
CesiumGS/cesium-omniverse/tests/include/tilesetTests.h | #pragma once
#include <pxr/usd/usd/common.h>
namespace cesium::omniverse {
class Context;
}
void setUpTilesetTests(cesium::omniverse::Context* pContext, const pxr::SdfPath& rootPath);
void cleanUpTilesetTests(const pxr::UsdStageRefPtr& stage);
| 246 | C | 23.699998 | 91 | 0.784553 |
CesiumGS/cesium-omniverse/tests/include/CesiumOmniverseCppTests.h | #pragma once
#include <carb/Interface.h>
namespace cesium::omniverse::tests {
class ICesiumOmniverseCppTestsInterface {
public:
CARB_PLUGIN_INTERFACE("cesium::omniverse::tests::ICesiumOmniverseCppTestsInterface", 0, 0);
/**
* @brief Call this on extension startup.
*
* @param cesiumExtensionLocation Path to the Cesium Omniverse extension location.
*/
virtual void onStartup(const char* cesiumExtensionLocation) noexcept = 0;
/**
* @brief Call this on extension shutdown.
*/
virtual void onShutdown() noexcept = 0;
/**
* @brief To be run at least one fram prior to `runAllTests` in order to
* allow time for USD notifications to propogate.
*/
virtual void setUpTests(long int stage_id) noexcept = 0;
/**
* @brief Collects and runs all the doctest tests defined in adjacent .cpp files
*/
virtual void runAllTests() noexcept = 0;
};
} // namespace cesium::omniverse::tests
| 971 | C | 26.771428 | 95 | 0.679712 |
CesiumGS/cesium-omniverse/tests/include/UsdUtilTests.h | #pragma once
#include <pxr/usd/usd/common.h>
namespace cesium::omniverse {
class Context;
}
void setUpUsdUtilTests(cesium::omniverse::Context* pContext, const pxr::SdfPath& rootPath);
void cleanUpUsdUtilTests(const pxr::UsdStageRefPtr& stage);
| 246 | C | 23.699998 | 91 | 0.784553 |
CesiumGS/cesium-omniverse/docs/README.md | # Cesium for Omniverse Documentation
## Usage
- [Developer Setup](./developer-setup/README.md)
- [Release Guide](./release-guide/README.md)
## General Omniverse Tips
- [Omniverse Connector & Live Sync for SketchUp](./connectors/README.md)
- [Programmatically Changing Settings](./kit/README.md)
- [Building USD On Ubuntu 22.04](./developer-setup/building_usd_on_ubuntu2204.md)
| 381 | Markdown | 28.384613 | 81 | 0.745407 |
CesiumGS/cesium-omniverse/docs/onboarding/README.md | ## What is Omniverse?
Omniverse is a tool that provides an interface for various other tools to interact with a shared 3d environment. The core of this is a USD stage and a Fabric stage. The tools that interact with these stages do so via extensions.
To better understand extensions and how they're defined, check out the [official Omniverse extension template](https://github.com/NVIDIA-Omniverse/kit-extension-template) for a "hello world" extension.
There is also a similar [C++ extension template](https://github.com/NVIDIA-Omniverse/kit-extension-template-cpp).
### Our Extensions/Apps
- Cesium for Omniverse ("The main extension")
- Responsible for streaming geospatial data onto the stages, and providing the user interface.
- Cesium Usd plugins
- Required by the main extension to facilitatge interactions with the USD stage.
- Cesium Powertools
- Helpful additions for developers, such as one-click ways to open debug interfaces and print the fabric stage.
- Cesium Cpp Tests
- Tests of the C++ code underlying the main extension. For more info see [the testing guide](../testing-guide/README.md)
- The Performance App
- Used to get general timing of an interactive session. See [the testing guide](../testing-guide/README.md) for how to run.
## Project File Structure
Some self-explanatory directories have been ommitted.
- `apps` - Tools that use the extensions, such as the performance testing app, but are not themselves extensions
- `docker` - Docker configuration for AlmaLinux 8 CI builds
- `exts` - This is where extension code is kept. The file structure follows the pattern:
```
exts
└── dot.separated.name
├── bin
│ ├── libdot.separated.name.plugin.so
└── dot
└── separated
└── name
└── codeNeededByExtension
└── __init__.py
└── extension.py
```
- `genStubs.*`- auto-generates stub files for python bindings, which are not functionally required but greatly improve intellisense.
- `src`/`include` - There are several `src`/`include` subdirs throughout the project, but this top level one is only for code used in the python bindings for the main extension.
- `regenerate_schema.*` - changes to our usd schema require using this script.
- `scripts` - useful scripts for development that do not contribute to any extension function.
- `tests` - c++ related test code used by the Tests Extension. For python related test code, check `exts/cesium.omniverse/cesium/omniverse/tests`. For more details, see the [testing guide](../testing-guide/README.md)
| 2,623 | Markdown | 61.476189 | 228 | 0.722074 |
CesiumGS/cesium-omniverse/docs/release-guide/README.md | # Releasing a new version of Cesium for Omniverse
This is the process we follow when releasing a new version of Cesium for Omniverse on GitHub.
1. [Release a new version of Cesium for Omniverse Samples](#releasing-a-new-version-of-cesium-for-omniverse-samples).
2. Make sure the latest commit in `main` is passing CI.
3. Download the latest build from S3. In the AWS management console (InternalServices AWS account), go to the bucket [`cesium-builds/cesium-omniverse/main`](https://s3.console.aws.amazon.com/s3/buckets/cesium-builds?region=us-east-1&prefix=cesium-omniverse/main/&showversions=false), find the appropriate date and commit hash to download the AlmaLinux and Windows zip files (e.g. `CesiumGS-cesium-omniverse-linux-x86_64-xxxxxxx.zip` and `CesiumGS-cesium-omniverse-windows-x86_64-xxxxxxx.zip`)
4. Verify that the Linux package loads in USD Composer (see instructions below).
5. Verify that the Windows package loads in USD Composer (see instructions below).
6. Update the project `VERSION` in [CMakeLists.txt](../../CMakeLists.txt).
7. Update the extension `version` in [cesium.omniverse/config/extension.toml](../../exts/cesium.omniverse/config/extension.toml). This should be the same version as above.
8. If any changes have been made to the Cesium USD schemas since last release:
* Update the extension `version` in [cesium.usd.plugins/config/extension.toml](../../exts/cesium.usd.plugins/config/extension.toml)
* Update the `cesium.usd.plugins` dependency version in [cesium.omniverse/config/extension.toml](../../exts/cesium.omniverse/config/extension.toml)
9. Update [`CHANGES.md`](../../CHANGES.md).
10. Update `ION_ACCESS_TOKEN` in [`extension.py`](../../apps/exts/cesium.performance.app/cesium/performance/app/extension.py) within `cesium.performance.app` using the newly generated keys.
11. Create a branch, e.g. `git checkout -b release-0.0.0`.
12. Commit the changes, e.g. `git commit -am "0.0.0 release"`.
13. Push the commit, e.g. `git push origin release-0.0.0`.
14. Open a PR and merge the branch with "Rebase and merge".
15. Tag the release, e.g. `git tag -a v0.0.0 -m "0.0.0 release"`.
16. Push the tag, e.g. `git push origin v0.0.0`.
17. Wait for CI to pass.
18. Download the latest build from S3. In the AWS management console (InternalServices AWS account), go to the bucket [`cesium-builds/cesium-omniverse`](https://s3.console.aws.amazon.com/s3/buckets/cesium-builds?prefix=cesium-omniverse/®ion=us-east-1), find the folder with the new tag and download the AlmaLinux and Windows zip files (e.g. `CesiumGS-cesium-omniverse-linux-x86_64-v0.0.0.zip` and `CesiumGS-cesium-omniverse-windows-x86_64-v0.0.0.zip` )
19. Create a new release on GitHub: https://github.com/CesiumGS/cesium-omniverse/releases/new.
* Chose the new tag.
* Copy the changelog into the description. Follow the format used in previous releases.
* Upload the Linux and Windows release zip files.
# Releasing a new version of Cesium for Omniverse Samples
1. Create a new access token using the CesiumJS ion account.
* The name of the token should match "Cesium for Omniverse Samples vX.X.X - Delete on April 1st, 2023" where the version is the same as the Cesium for Omniverse release and the expiry date is two months later than present.
* The scope of the token should be "assets:read" for all assets.
2. Replace the `cesium:projectDefaultIonAccessToken` property in each `.usda` file with the new access token.
3. Verify that all the USD files load in Cesium for Omniverse.
4. Update `CHANGES.md`.
5. Commit the changes, e.g. `git commit -am "0.0.0 release"`.
6. Push the commit, e.g. `git push origin main`.
7. Tag the release, e.g. `git tag -a v0.0.0 -m "0.0.0 release"`.
8. Push the tag, e.g. `git push origin v0.0.0`.
9. Download the repo as a zip file.
10. Extract the zip file.
11. Rename the extracted folder, e.g. rename `cesium-omniverse-samples-main` to `CesiumOmniverseSamples-v0.0.0`.
12. Create a zip file of the folder
13. Create a new release on GitHub: https://github.com/CesiumGS/cesium-omniverse-samples/releases/new.
* Choose the new tag.
* Copy the changelog into the description. Follow the format used in previous releases.
* Upload the zip file.
# Verify Package
After the package is built, verify that the extension loads in USD Composer:
* Open USD Composer
* Open the extensions window and remove Cesium for Omniverse from the list of search paths (if it exists)
* Close USD Composer
* Unzip the package to `$USERHOME$/Documents/Kit/Shared/exts`
* Open USD Composer
* Open the extensions window and enable autoload for Cesium for Omniverse
* Restart USD Composer
* Verify that there aren't any console errors
* Verify that you can load Cesium World Terrain and OSM buildings
* Delete the extensions from `$USERHOME$/Documents/Kit/Shared/exts`
| 4,829 | Markdown | 72.181817 | 492 | 0.7544 |
CesiumGS/cesium-omniverse/docs/release-guide/push-docker-image.md | # Pushing the Docker Image for AlmaLinux 8 builds.
We use a docker image for our AlmaLinux 8 builds that contains all of our build dependencies, so we don't have to build the image from scratch on each build. This document outlines how to build and push this to Docker Hub.
## Installing Docker
Install [Docker Desktop](https://docs.docker.com/desktop/install/ubuntu/). You will need a license for this and access to our account.
On Linux, docker is run as root. To avoid the requirement for `sudo`, you should add your user to the `docker` group:
```shell
sudo usermod -aG docker $USER
```
To use the new group membership without logging out of your session
completely, you can "relogin" in the same shell by typing:
```shell
su - $USER
```
Note: this creates a new login shell and may behave differently from
your expectations in a windowed environment e.g., GNOME. In
particular, `ssh` logins and `git` may not work anymore.
## Building the container
Confirm that you have push access to the [container repo](https://hub.docker.com/r/cesiumgs/omniverse-almalinux8-build).
### Log in
Log into docker using:
```shell
docker login
```
### Build the docker image
After making your changes to the docker file, execute:
```shell
docker build --tag cesiumgs/omniverse-almalinux8-build:$TAGNAME -f docker/AlmaLinux8.Dockerfile . --no-cache
```
You should replace `TAGNAME` with the current date in `YYYY-MM-DD` format. So if it's the 29th of August, 2023, you would use `2023-08-29`.
### Push the image to Docker Hub
The build will take some time. Once it is finished, execute the following to push the image to Docker Hub:
```shell
docker push cesiumgs/omniverse-almalinux8-build:$TAGNAME
```
Again, you should replace `$TAGNAME` with the current date in `YYYY-MM-DD` format. So if it's the 29th of August, 2023, you would use `2023-08-29`.
### Update CI.Dockerfile
The `docker/CI.Dockerfile` file is used as part of the AlmaLinux 8 build step in our GitHub actions. You will need to update the version of the Docker image used to the tagged version you just uploaded.
| 2,087 | Markdown | 33.799999 | 222 | 0.752276 |
CesiumGS/cesium-omniverse/docs/testing-guide/README.md | # Testing Guide
## Performance Test App
Provides some general metrics for how long it takes to load tiles. Can be run with:
```bash
extern/nvidia/_build/target-deps/kit-sdk/kit ./apps/cesium.performance.kit
```
The is intentionally no vs code launch configuration out of concern that debug related setting could slow the app down.
## Python Tests
Python tests are run through `pytest` (see full documentation [here](https://docs.pytest.org/en/latest/)). To run these tests with the proper sourcing and environment, simpy run:
```bash
scripts/run_python_unit_tests.(bat|sh)
```
You can also run these tests via the app. Open the extensions window while running omniverse. Find and select the Cesium for Omniverse Extension, then navigate to its Tests tab. The "Run Extension Tests" button will run the python tests (not the C++ tests).
## C++ Tests (The Tests Extension)
C++ tests are run through `doctest`, which is set up and run via the Tests Extension.
Normally `doctest` can be run via the command line, but since much of the code we test
can only run properly inside omniverse, we run the tests there too.
The easiest way to run the tests extension is via the launch configuration in vs code. Simply go to the `run and debug` dropdown and launch the `Tests Extension`. The testing output is provided in the terminal used to launch everything. Failed tests will be caught by the debugger, though you may need to go one level up in the execution stack to see the `CHECK` being called.
To run the extension via the command line, simply pass the tests extension's kit config file to kit with
```bash
extern/nvidia/_build/target-deps/kit-sdk/kit ./apps/cesium.omniverse.cpp.tests.runner.kit
```
[doctest documentation](https://bit.ly/doctest-docs) can be found here.
## How do I add a new test?
### Python
`pytest` will auto-discover functions matching the pattern `test_.*` (and other patterns).
If you want your tests to be included in the tests for the main extension, import it into `exts/cesium.omniverse/cesium/omniverse/tests/__init__.py`.
See [extension_test.py](../../exts/cesium.omniverse/cesium/omniverse/tests/extension_test.py) for an example
### C++
`TEST_SUITE`s and `TEST_CASE`s defined in `tests/src/*.cpp` will be auto-discovered by the `run_all_tests` function in `tests/src/CesiumOmniverseCppTests.cpp`. These macros perform some automagic function definitions, so they are best left outside of other function/class definitions. See `tests/src/ExampleTests.cpp` for examples of basic tests and more advanced use cases, such as using a config file to
define expected outputs or parameterizing tests.
To create a new set of tests for a class that doesn't already have a relevant tests cpp file, say `myCesiumClass.cpp`:
- create `tests/src/myCesiumClassTests.cpp` and `tests/include/myCesiumClassTests.h`
- define any setup and cleanup required for the tests in functions in `myCesiumClassTests.cpp`. This can be anything that has to happen on a different frame than the tests, such as prim creation or removal.
- expose the setup and cleanup functions in `myCesiumClassTests.h`
- call the setup in `setUpTests()` in `tests/src/CesiumOmniverseCppTests.cpp`
- call the cleanup in `cleanUpAfterTests()` in `tests/src/CesiumOmniverseCppTests.cpp`
- define a `TEST_SUITE` in `myCesiumClassTests.cpp`, and place your `TEST_CASE`(s) in it
Any tests defined in the new test suite will be auto-discovered and run when `runAllTests()` (bound to `run_all_tests()`) is called. Classes that do not require setup/cleanup can skip the header and any steps related to setup/cleanup functions.
| 3,606 | Markdown | 71.139999 | 405 | 0.773156 |
CesiumGS/cesium-omniverse/docs/developer-setup/README.md | <!-- omit in toc -->
# Cesium for Omniverse
- [Prerequisites](#prerequisites)
- [Linux](#linux)
- [Windows](#windows)
- [Clone the repository](#clone-the-repository)
- [Build](#build)
- [Linux](#linux-1)
- [Windows](#windows-1)
- [Docker](#docker)
- [Advanced build options](#advanced-build-options)
- [Unit Tests](#unit-tests)
- [Coverage](#coverage)
- [Documentation](#documentation)
- [Installing](#installing)
- [Tracing](#tracing)
- [Sanitizers](#sanitizers)
- [Formatting](#formatting)
- [Linting](#linting)
- [Packaging](#packaging)
- [Build Linux Package (Local)](#build-linux-package-local)
- [Build Windows Package (Local)](#build-windows-package-local)
- [VSCode](#vscode)
- [Workspaces](#workspaces)
- [Tasks](#tasks)
- [Launching/Debugging](#launchingdebugging)
- [Project Structure](#project-structure)
- [Third Party Libraries](#third-party-libraries)
- [Overriding Packman Libraries](#overriding-packman-libraries)
## Prerequisites
See [Linux](#linux) or [Windows](#windows) for step-by-step installation instructions
- Linux (Ubuntu 22.04+ or equivalent) or Windows
- Clang 15+, GCC 9+, or Visual Studio 2022+
- Python 3.10+ - For Conan and scripts
- CMake 3.22+ - Build system generator
- Make - Build system (Linux only)
- Conan - Third party C++ library management
- gcovr - Code coverage (Linux only)
- Doxygen - Documentation
- clang-format - Code formatting
- clang-tidy - Linting and static code analysis (Linux only)
### Linux
- Ensure the correct NVIDIA drivers are installed (not the default open source driver) and that the GPU can be identified
```sh
nvidia-smi
```
- Install dependencies (for Ubuntu 22.04 - other Linux distributions should be similar)
```sh
sudo apt install -y gcc-9 g++-9 clang-15 python3 python3-pip cmake make git doxygen clang-format-15 clang-tidy-15 clangd-15 gcovr
```
- Install Conan with pip because Conan is not in Ubuntu's package manager
```sh
sudo pip3 install conan==1.64.0
```
- Install `cmake-format`
```sh
sudo pip3 install cmake-format
```
- Install `black` and `flake8`
```sh
pip3 install black==23.1.0 flake8==6.0.0
```
- Add symlinks the clang-15 tools so that the correct version is chosen when running `clang-format`, `clang-tidy`, etc
```sh
sudo ln -s /usr/bin/clang-15 /usr/bin/clang
sudo ln -s /usr/bin/clang++-15 /usr/bin/clang++
sudo ln -s /usr/bin/clang-format-15 /usr/bin/clang-format
sudo ln -s /usr/bin/clang-tidy-15 /usr/bin/clang-tidy
sudo ln -s /usr/bin/run-clang-tidy-15 /usr/bin/run-clang-tidy
sudo ln -s /usr/bin/llvm-cov-15 /usr/bin/llvm-cov
sudo ln -s /usr/bin/clangd-15 /usr/bin/clangd
```
- Or, you can use the `update-alternatives` program to create the
links and manage versions. This is an approach you can use in
a script or on the command line:
```sh
clangprogs="/usr/bin/clang*-15 /usr/bin/run-clang-tidy-15 /usr/bin/llvm-cov-15"
for prog in $clangprogs
do
linked=${prog%%-15}
generic=${linked##*/}
update-alternatives --install $linked $generic $prog 15
done
```
- Then refresh the shell so that newly added dependencies are available in the path.
```sh
exec bash
```
### Windows
There are two ways to install prerequisites for Windows, [manually](#install-manually) or [with Chocolatey](#install-with-chocolatey). Chocolately is quicker to set up but may conflict with existing installations. We use Chocolatey for CI.
<!-- omit in toc -->
#### Install manually
- Install Visual Studio 2022 Professional: https://visualstudio.microsoft.com/downloads/
- Select `Desktop Development with C++` and use the default components
- Install Git: https://git-scm.com/downloads
- Use defaults
- Install LLVM 15.0.7: https://llvm.org/builds
- When prompted, select `Add LLVM to the system PATH for all users`
- Install CMake: https://cmake.org/download
- When prompted, select `Add CMake to the system PATH for all users`
- Install Python (version 3.x): https://www.python.org/downloads
- Select `Add Python 3.x to PATH`
- Create a symbolic link called `python3.exe` that points to the actual `python` (version 3.x) executable. This is necessary for some of the scripts to run correctly when `#!/usr/bin/env python3` is at the top of the file. Open Command Prompt as administrator and enter:
```sh
where python
cd <first_path_in_list>
mklink python3.exe python.exe
```
- Install `requests` module for Python
```sh
pip3 install requests
```
- Install `cmake-format`
```sh
pip3 install cmake-format
```
- Install `black` and `flake8`
```sh
pip3 install black==23.1.0 flake8==6.0.0
```
- Install `colorama` to enable color diff support
```sh
pip3 install colorama
```
- Install Conan
```sh
pip3 install conan==1.64.0
```
- Install Doxygen: https://www.doxygen.nl/download.html
- After installation, add the install location to your `PATH`. Open PowerShell as administrator and enter:
```sh
[Environment]::SetEnvironmentVariable("Path", $env:Path + ";C:\Program Files\doxygen\bin", "Machine")
```
- Enable Long Paths. This ensures that all Conan libraries are installed in `~/.conan`. Open PowerShell as administrator and enter:
```sh
New-ItemProperty -Path "HKLM:\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1 -PropertyType DWORD -Force
```
- Then refresh PowerShell so that newly added dependencies are available in the path.
```sh
refreshenv
```
<!-- omit in toc -->
#### Install with Chocolatey
- Install [Chocolatey](https://docs.chocolatey.org/en-us/choco/setup) and then install dependencies
```sh
choco install -y visualstudio2022professional visualstudio2022-workload-nativedesktop python cmake ninja git doxygen.install vswhere --installargs 'ADD_CMAKE_TO_PATH=System'
```
```sh
choco install -y llvm --version=15.0.7
```
```sh
choco install -y conan --version 1.64.0
```
> **Note:** If you see a warning like `Chocolatey detected you are not running from an elevated command shell`, reopen Command Prompt as administrator
- Create a symbolic link called `python3.exe` that points to the actual `python` (version 3.x) executable. This is necessary for some of the scripts to run correctly when `#!/usr/bin/env python3` is at the top of the file.
```sh
where python
cd <first_path_in_list>
mklink python3.exe python.exe
```
- Install `requests`
```sh
pip3 install requests
```
- Install `cmake-format`
```sh
pip3 install cmake-format
```
- Install `black` and `flake8`
```sh
pip3 install black==23.1.0 flake8==6.0.0
```
- Install `colorama` to enable color diff support
```sh
pip3 install colorama
```
- Enable Long Paths. This ensures that all Conan libraries are installed correctly. Open PowerShell as administrator and enter:
```sh
New-ItemProperty -Path "HKLM:\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1 -PropertyType DWORD -Force
```
- Then refresh PowerShell so that newly added dependencies are available in the path.
```sh
refreshenv
```
## Clone the repository
```sh
git clone [email protected]:CesiumGS/cesium-omniverse.git --recurse-submodules
```
If you forget the `--recurse-submodules`, nothing will work because the Git submodules will be missing. You should be able to fix it with
```sh
git submodule update --init --recursive
```
## Build
### Linux
```sh
## Release
cmake -B build
cmake --build build --target install --parallel 8
## Debug
cmake -B build-debug -D CMAKE_BUILD_TYPE=Debug
cmake --build build-debug --target install --parallel 8
```
Binaries will be written to `build/bin`. Shared libraries and static libraries will be written to `build/lib`.
### Windows
```sh
## Release
cmake -B build
cmake --build build --config Release --target install --parallel 8
## Debug
cmake -B build
cmake --build build --config Debug --target install --parallel 8
```
Binaries and shared libraries will be written to `build/bin/Release`. Static libraries and python modules will be written to `build/lib/Release`.
CMake will select the most recent version of Visual Studio on your system unless overridden with a generator (e.g. `-G "Visual Studio 17 2022"`).
### Docker
Install [Docker Engine CE For Ubuntu](https://docs.docker.com/engine/install/ubuntu/)
Enter the container:
```sh
docker build --tag cesiumgs/cesium-omniverse:almalinux8 -f docker/AlmaLinux8.Dockerfile .
docker run --rm --interactive --tty --volume $PWD:/var/app cesiumgs/cesium-omniverse:almalinux8
```
Once inside the container, build like usual. Note that linters are turned off. It won't affect the build, it just means there won't be code formatting or linting. It will build fine with GCC.
```sh
cmake -B build -D CESIUM_OMNI_ENABLE_LINTERS=OFF
cmake --build build
```
### Advanced build options
For faster builds, use the `--parallel` option
```sh
cmake -B build
cmake --build build --parallel 8
```
To use a specific C/C++ compiler, set `CMAKE_CXX_COMPILER` and `CMAKE_C_COMPILER`
```sh
cmake -B build -D CMAKE_CXX_COMPILER=clang++-15 -D CMAKE_C_COMPILER=clang-15
cmake --build build
```
Make sure to use a different build folder for each compiler, otherwise you may see an error from Conan like
```
Library [name] not found in package, might be system one.
```
This error can also be avoided by deleting `build/CMakeCache.txt` before switching compilers.
To view verbose output from the compiler, use the `--verbose` option
```sh
cmake -B build
cmake --build build --verbose
```
To change the build configuration, set `CMAKE_BUILD_TYPE` to one of the following values:
- `Debug`: Required for coverage
- `Release`: Used for production builds
- `RelWithDebInfo`: Similar to `Release` but has debug symbols
- `MinSizeRel`: Similar to `Release` but smaller compile size
On Linux
```sh
cmake -B build-relwithdebinfo -D CMAKE_BUILD_TYPE=RelWithDebInfo
cmake --build build-relwithdebinfo
```
On Windows
```sh
cmake -B build
cmake --build build --config RelWithDebInfo
```
Note that Windows (MSVC) is a multi-configuration generator meaning all four build configurations are created during the configure step and the specific configuration is chosen during the build step. If using Visual Studio there will be a dropdown to select the build configuration.
Ninja is also supported as an alternative to the MSVC generator. To build with Ninja locally open `x64 Native Tools Command Prompt for VS 2022` and run:
```
cmake -B build -D CMAKE_C_COMPILER=cl -D CMAKE_CXX_COMPILER=cl -G "Ninja Multi-Config"
cmake --build build --config Release --parallel 8
```
## Unit Tests
Unit tests can be run by starting the Cesium Omniverse Tests extension inside Omniverse.
## Coverage
It's a good idea to generate code coverage frequently to ensure that you're adequately testing new features. To do so run
```sh
cmake -B build-debug -D CMAKE_BUILD_TYPE=Debug
cmake --build build-debug --target generate-coverage
```
Once finished, the coverage report will be located at `build-debug/coverage/index.html`.
Notes:
- Coverage is disabled in `Release` mode because it would be inaccurate and we don't want coverage instrumentation in our release binaries anyway
- Coverage is not supported on Windows
## Documentation
```sh
cmake -B build
cmake --build build --target generate-documentation
```
Once finished, documentation will be located at `build/docs/html/index.html`.
## Installing
To install `CesiumOmniverse` into the Omniverse Kit extension run:
```sh
cmake -B build
cmake --build build --target install
```
This will install the libraries to `exts/cesium.omniverse/bin`.
<!-- omit in toc -->
### Advanced Install Instructions
In some cases it's helpful to produce a self-contained build that can be tested outside of Omniverse. The instructions below are intended for debugging purposes only.
To install `CesiumOmniverse` onto the local system run:
On Linux
```sh
cmake -B build
cmake --build build
cmake --install build --component library --prefix /path/to/install/location
```
On Windows
```sh
cmake -B build
cmake --build build --config Release
cmake --install build --config Release --component library --prefix /path/to/install/location
```
## Tracing
To enable performance tracing set `CESIUM_OMNI_ENABLE_TRACING`:
```sh
cmake -B build -D CESIUM_OMNI_ENABLE_TRACING=ON
cmake --build build
```
A file called `cesium-trace-xxxxxxxxxxx.json` will be saved to the `exts/cesium-omniverse` folder when the program exits. This file can then be inspected in `chrome://tracing/`.
Note that the JSON output may get truncated if the program closes unexpectedly - e.g. when the debugging session is stopped or the program crashes - or if `app.fastShutdown` is `true` (like with Omniverse Create and `cesium.omniverse.dev.kit`). Therefore the best workflow for performance tracing is to run `cesium.omniverse.dev.trace.kit` and close the window normally.
## Sanitizers
When sanitizers are enabled they will check for mistakes that are difficult to catch at compile time, such as reading past the end of an array or dereferencing a null pointer. Sanitizers should not be used for production builds because they inject these checks into the binaries themselves, creating some runtime overhead.
Sanitizers
- ASAN - [Address sanitizer](https://clang.llvm.org/docs/AddressSanitizer.html)
- UBSAN - [Undefined behavior sanitizer](https://clang.llvm.org/docs/UndefinedBehaviorSanitizer.html)
> **Note:** memory leak detection is not supported on Windows currently. See https://github.com/google/sanitizers/issues/1026#issuecomment-850404983
> **Note:** memory leak detection does not work while debugging with gdb. See https://stackoverflow.com/questions/54022889/leaksanitizer-not-working-under-gdb-in-ubuntu-18-04
To verify that sanitization is working, add the following code to any cpp file.
```c++
int arr[4] = {0};
arr[argc + 1000] = 0;
```
After running, it should print something like
```
main.cpp:114:22: runtime error: index 1001 out of bounds for type 'int [4]'
main.cpp:114:24: runtime error: store to address 0x7ffe16f44c44 with insufficient space for an object of type 'int'
0x7ffe16f44c44: note: pointer points here
00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
^
```
## Formatting
To format code based on the [`.clang-format`](./.clang-format) configuration file
```sh
cmake -B build
cmake --build build --target clang-format-fix-all
```
The full list of targets is below:
- `clang-format-fix-all` - Formats all code
- `clang-format-fix-staged` - Formats staged code
- `clang-format-check-all` - Checks for formatting problems in all code
- `clang-format-check-staged` - Checks for formatting problems in staged code
Please note that the `clang-format-fix-all` and `clang-format-fix-staged` targets will add fixes in the working area, not in the staging area. We also have a Git hook that is installed on project configuration that will check if the staging area is properly formatted before permitting a commit.
## Linting
`clang-tidy` is run during the build to catch common coding errors. `clang-tidy` is used for linting and static code analysis based on the [`.clang-tidy`](./.clang-tidy) configuration file.
We also generate CMake targets to run these tools manually
Run `clang-tidy`:
```sh
cmake -B build
cmake --build build --target clang-tidy
```
## Packaging
### Build Linux Package (Local)
Linux packages are built in the AlmaLinux 8 Docker container. A Red Hat Enterprise Linux 8 compatible OS is the [minimum OS required by Omniverse](https://docs.omniverse.nvidia.com/app_view/common/technical-requirements.html#suggested-minimums-by-product) and uses glibc 2.18 which is compatible with nearly all modern Linux distributions.
It's recommended to build AlmaLinux 8 packages in a separate clone of cesium-omniverse since the Docker container will overwrite files in the `extern/nvidia/_build` and `exts` folders.
Run the following shell script from the root cesium-omniverse directory:
```sh
./scripts/build_package_almalinux8.sh
```
The resulting `.zip` file will be written to the `build-package` directory (e.g. `CesiumGS-cesium-omniverse-linux-x86_64-v0.0.0.zip`)
### Build Windows Package (Local)
Run the following batch script from the root cesium-omniverse directory:
```sh
./scripts/build_package_windows.bat
```
The resulting `.zip` file will be written to the `build-package` directory (e.g. `CesiumGS-cesium-omniverse-windows-x86_64-v0.0.0.zip`)
## VSCode
We use VSCode as our primary IDE for development. While everything can be done on the command line the `.vscode` project folder has built-in tasks for building, running unit tests, generating documentation, etc.
### Workspaces
Each workspace contains recommended extensions and settings for VSCode development. Make sure to open the workspace for your OS instead of opening the `cesium-omniverse` folder directly.
- [cesium-omniverse-linux.code-workspace](./.vscode/cesium-omniverse-linux.code-workspace)
- [cesium-omniverse-windows.code-workspace](./.vscode/cesium-omniverse-windows.code-workspace)
### Tasks
[`.vscode/tasks.json`](./.vscode/tasks.json) comes with the following tasks:
- Configure - configures the project
- Build (advanced) - configures and builds the project
- Build (tracing) - configures and builds the project with tracing enabled
- Build (kit debug) - configures and builds the project using NVIDIA debug libraries
- Build (verbose) - configures and builds the project with verbose output
- Build (debug) - configures and builds the project in debug mode with the default compiler
- Build (release) - configures and builds the project in release mode with the default compiler
- Build Only (debug) - builds the project in debug mode with the default compiler
- Build Only (release) - builds the project in release mode with the default compiler
- Clean - cleans the build directory
- Coverage - generates a coverage report and opens a web browser showing the results
- Documentation - generates documentation and opens a web browser showing the results
- Format - formats the code with clang-format
- Lint - runs clang-tidy
- Lint Fix - runs clang-tidy and fixes issues
- Dependency Graph - shows the third party library dependency graph
To run a task:
- `Ctrl + Shift + B` and select the task, e.g. `Build`
- Select the build type and compiler (if applicable)
### Launching/Debugging
Windows and Linux versions of `launch.json` are provided in the `.vscode` folder.
* On Windows copy `launch.windows.json` and rename it to `launch.json`.
* On Linux copy `launch.linux.json` and rename it to `launch.json`.
Alternatively, create a symlink so that `launch.json` always stays up-to-date:
```sh
# Windows - Command Prompt As Administrator
cd .vscode
mklink launch.json launch.windows.json
```
```sh
# Linux
cd .vscode
sudo ln -s launch.linux.json launch.json
```
Then select a configuration from the `Run and Debug` panel, such as `Kit App`, and click the green arrow.
> **Note:** Most configurations run a build-only prelaunch task. This assumes the project has already been configured. When debugging for the first time make sure to configure the project first by pressing `Ctrl + Shift + B` and running `Build (debug)`.
> **Note:** For running the `Performance Tracing` configuration, make sure the project has been configured with tracing enabled by pressing `Ctrl + Shift + B` and running `Build (tracing)`.
> **Note:** For running the `Development App (Kit Debug)` configuration make sure the project has been built with NVIDIA debug libraries by pressing `Ctrl + Shift + B` and running `Build (kit debug)`.
> **Note:** For Python debugging, first run `Python Debugging (start)`, then wait for Omniverse to load, then run `Python Debugging (attach)`. Now you can set breakpoints in both the C++ and Python code.
<!-- omit in toc -->
#### Launch/Debug Troubleshooting
- When running in debug within vscode, if you find execution halting at a breakpoint outside the cesium codebase, you may need to uncheck "C++: on throw" under the "Breakpoints" section of the "Run and Debug" panel.
- On Linux, if you are given an error or warning about IOMMU, you may need to turn this off in the BIOS. IOMMU also goes by the name of Intel VT-d and AMD-Vi.
- On Linux, if repeated `"[Omniverse app]" is not responding` prompts to either force quit or wait, you may want to extend the global timeout for such events from the default 5s to 30s with the following command (for gnome):
```sh
gsettings set org.gnome.mutter check-alive-timeout 30000
```
## Project Structure
- `src` - Source code for the CesiumOmniverse library
- `include` - Include directory for the CesiumOmniverse library
- `tests` - Unit tests
- `extern` - Third-party dependencies that aren't on Conan
- `cmake` - CMake helper files
- `scripts` - Build scripts and Git hooks
- `docker` - Docker files
## Third Party Libraries
We use [Conan](https://conan.io/) as our C++ third party package manager for dependencies that are public and not changed often. Third party libraries are always built from source and are cached on the local system for subsequent builds.
To add a new dependency to Conan
- Add it to [AddConanDependencies.cmake](./cmake/AddConanDependencies.cmake)
- Call `find_package` in [CMakeLists.txt](./CMakeLists.txt)
- Add the library to the `LIBRARIES` field in any relevant `setup_lib` or `setup_app` calls
Some dependencies are pulled in as Git submodules instead. When adding a new git submodule add the license to [ThirdParty.extra.json](./ThirdParty.extra.json).
[ThirdParty.json](./ThirdParty.json) is autogenerated and combines [ThirdParty.extra.json](./ThirdParty.extra.json) and Conan dependencies.
### Overriding Packman Libraries
The external dependencies from Nvidia use Nvidia's packman tool to fetch and install. The dependency files are found at `/extern/nvidia/deps` in this repository. You can override these by using a `*.packman.xml.user` file. For example, to override the version of kit you can create a user file called `kit-sdk.packman.xml.user` next to `kit-sdk.packman.xml` in the `deps` directory. You can then use standard packman configurations within this file, such as:
```xml
<project toolsVersion="5.6">
<dependency name="kit_sdk" linkPath="../_build/target-deps/kit-sdk/">
<package name="kit-sdk" version="105.0.1+release.109439.ed961c5c.tc.${platform}.release"/>
</dependency>
<dependency name="kit_sdk_debug" linkPath="../_build/target-deps/kit-sdk-debug/">
<package name="kit-sdk" version="105.0.1+release.109439.ed961c5c.tc.${platform}.debug"/>
</dependency>
</project>
```
The above configuration would override the version of the Kit SDK used to `105.0.1+release.109439.ed961c5c.tc`.
These user files are ignored by the `.gitignore` so it is safe to test out prerelease and private versions of new libraries.
| 22,960 | Markdown | 36.952066 | 458 | 0.740026 |
CesiumGS/cesium-omniverse/docs/developer-setup/building_usd_on_ubuntu2204.md | # Building Pixar's USD 22.11 for Ubuntu 22.04
_Last Updated: 2022/01/12_
Building Pixar's USD 22.11 on Ubuntu 22.04 can be difficult. This guide aims to help those who wish to download and compile USD on their system. For most people, [using the Nvidia binaries should suffice and is the recommended option](https://developer.nvidia.com/usd). If those do not work for you, or you wish to have a self-compiled version, this guide is for you.
## Prerequisites
You need:
- Python 3.7 from the Deadsnakes PPA: https://launchpad.net/~deadsnakes/+archive/ubuntu/ppa
- GCC 11
- Cmake
- USD downloaded from the GitHub repository: https://github.com/PixarAnimationStudios/USD
## Python Setup
As of writing, USD targets Python 3.7. On Ubuntu you need to use the [Deadsnakes PPA](https://launchpad.net/~deadsnakes/+archive/ubuntu/ppa) to get this. You need the following packages:
- python3.7
- python3.7-dev
- libpython3.7
- libpython3.7-dev
Once you have Python 3.7, you need to install `PyOpenGL` and `PySide2`. **You cannot use your normal system `pip` command for this!** The correct command is:
```shell
python3.7 -m pip install PyOpenGL PySide2
```
## Fixing Boost
USD currently targets Boost 1.70 on Linux, which has issues compiling on Ubuntu 22.04. USD supports up to Boost 1.76 on account of issues in MacOS. We can use this to our advantage. Apply the below patchfile to the repository to fix this.
```
diff --git a/build_scripts/build_usd.py b/build_scripts/build_usd.py
index 5d3861d0a..96dd1c0a4 100644
--- a/build_scripts/build_usd.py
+++ b/build_scripts/build_usd.py
@@ -695,7 +695,7 @@ if MacOS():
BOOST_URL = "https://boostorg.jfrog.io/artifactory/main/release/1.76.0/source/boost_1_76_0.tar.gz"
BOOST_VERSION_FILE = "include/boost/version.hpp"
elif Linux():
- BOOST_URL = "https://boostorg.jfrog.io/artifactory/main/release/1.70.0/source/boost_1_70_0.tar.gz"
+ BOOST_URL = "https://boostorg.jfrog.io/artifactory/main/release/1.76.0/source/boost_1_76_0.tar.gz"
BOOST_VERSION_FILE = "include/boost/version.hpp"
elif Windows():
# The default installation of boost on Windows puts headers in a versioned
```
## Building USD
**NOTE: At this time, only a limited number of install options have been tested. YMMV.**
We can use the USD build scripts to build USD as we normally would, but we need to provide some additional options for Python for this to work correctly. If you just need to quickly get this built, use the following command from the USD repository's directory. It builds the USD and the tools including `usdview`, placing them in `~/.local/USD`. If you want to learn more, read on below.
```shell
python3.7m build_scripts/build_usd.py ~/.local/USD \
--tools \
--usd-imaging \
--usdview \
--build-python-info /usr/bin/python3.7m /usr/include/python3.7m /usr/lib/python3.7/config-3.7m-x86_64-linux-gnu/libpython3.7m.so 3.7
```
The important line here is the `--build-python-info` line. This takes, in order, the Python executable, include directory, library, and version. Using the Deadsnakes PPA, these are:
- `PYTHON_EXECUTABLE` : `/usr/bin/python3.7m`
- `PYTHON_INCLUDE_DIR` : `/usr/include/python3.7m`
- `PYTHON_LIBRARY` : `/usr/lib/python3.7/config-3.7m-x86_64-linux-gnu/libpython3.7m.so`
- `PYTHON_VERSION` : `3.7`
Do note that we are using the `pymalloc` versions of Python. The Deadsnakes PPA version of Python 3.7 is compiled using `pymalloc` and `/usr/bin/python3.7` simply symlinks to `/usr/bin/python3.7m`. You could use the symlinks, but there is **NOT** a symlink for `libpython3.7m.so`, so you need to at least provide the direct path to that.
## Afterword
There are a lot of other options for building USD. If you use the command `python3.7m build_scripts/build_usd.py --help` you can get a list of all these commands. Your mileage may vary with compiling these other features.
| 3,871 | Markdown | 49.285714 | 387 | 0.738827 |
CesiumGS/cesium-omniverse/docs/connectors/README.md | # Connectors
Helpful guides for setting up various connectors with Omniverse.
- [SketchUp Connector](./sketchup/README.md)
| 125 | Markdown | 19.999997 | 64 | 0.784 |
CesiumGS/cesium-omniverse/docs/connectors/sketchup/README.md | Introduction
============
This documentation is designed as a supplement guide for the [official
Nvidia documentation on the SketchUp
Connector](https://docs.omniverse.nvidia.com/con_connect/con_connect/sketchup.html)
for Omniverse. While there are some intersections, the primary goal of
this documentation is to get someone new to SketchUp, Omniverse, and
Cesium for Omniverse up and running quickly. It is strongly advised that
the reader take the time to [review the entire official documentation
fully](https://docs.omniverse.nvidia.com/con_connect/con_connect/sketchup.html).
Installing Omniverse Connector for SketchUp
===========================================
Installing the connector can be done through the Exchange Tab in the
Omniverse Launcher. The connector requires a SketchUp Pro license. More
details can be found in the [Installing the
Connector](https://docs.omniverse.nvidia.com/con_connect/con_connect/sketchup.html#installing-the-connector)
section of the official docs.
Instructions
------------
1. Ensure that SketchUp is closed.
2. Navigate to the Exchange Tab
3. Search for "SketchUp"
4. Click on "Trimble SketchUp Omniverse Connector"
5. Click Install
Using Omniverse Connector for SketchUp
======================================
**NOTE:** The [official
documentation](https://docs.omniverse.nvidia.com/con_connect/con_connect/sketchup.html#connecting-to-view-local)
has a section on connecting locally to Omniverse for editing. This
section in the official guide is slightly out of date and does not
contain details about working with Nucleus at the time of writing, but
is worth a read before continuing further.
The Omniverse Toolbar
---------------------
Once installed and in a project, the Omniverse Toolbar can be dragged to
the toolbar area. The diagram below describes all of the functions.
![Omniverse Toolbar](resources/sketchup_toolbar.jpg)
Configuring SketchUp for Omniverse with Nucleus
-----------------------------------------------
Once you have started a new project with the correct scale for your
needs, you will need to ensure that the settings are properly configured
for your Nucleus server. The "Do Not Use Nucleus" checkbox **must be
unchecked** for Live Editing with Nucleus to work.
**WARNING:** Every time you start or open a new project you must go into the settings
dialog and uncheck "Do Not Use Nucleus" at the time of this writing. It
is unclear if this is intended or a bug.
It is also recommended that *Send to locally installed Viewer* is
configured to use either the latest View or Create, and *Create Send To
Omniverse output as:*' has "Prop" selected. All other settings can be
set to the user’s liking.
![SketchUp Settings](resources/sketchup_settings.png)
Signing into Omniverse
----------------------
Click the *Sign In to Omniverse* button and enter in the host name for
your Nucleus server. This will open your browser to finish the sign in
process.
Exporting to Nucleus
--------------------
Once configured correctly, you can export to Nucleus by using either the
*Publish Project* or *Publish Prop* button. *Publish Project* produces a
`*.project.usd` file and associated directory and *Publish Prop*
produces a single `*.usd` file containing the relevant information.
**NOTE:** As publishing a prop is more relevant to our needs, this section only
goes into further details about *Publish Prop*. Publishing a project is
more or less identical steps.
When the user presses the *Publish Prop* button in order to publish a
new prop, a dialog appears similar to the one below. The flow for saving
to Nucleus is:
1. Ensure your SketchUp project is saved.
2. Select the path you want to use in Omniverse Nucleus
3. Enter the name of the file after the path in the *File Name* field.
(Extension not required.)
4. Click *Export*
In this screenshot, we are saving a file named "docdemo.usd" to the
Library folder within Nucleus.
![Export Dialog](resources/sketchup_export.png)
**NOTE:** The *Show Publish Options* button is a quick way to open the settings
dialog if you forget to open settings and uncheck *Do Not Use Nucleus*
checkbox when you opened or started your project.
If you are resuming work on a prop and want to properly link to Nucleus
so it recieves your latest edits, simply follow the same instructions
but choose the file you want in the picker. This will create a new
session with Omniverse so you can continue syncing your SketchUp file
with Nucleus. Failure to do so when you reopen your file will result in
Nucleus not receiving the changes.
Live Editing
------------
Live editing with the SketchUp Connector does work however it appears to
be unidirectional in the direction of Omniverse. In order to enable Live
Editing, click the *Live Sync Mode* button in the middle of the
Omniverse Toolbar. This will open a dialog:
![Live Sync Dialog](resources/sketchup_live_sync.png)
Once the dialog is open, ensure that the *Live Sync* checkbox is checked
and Live Editing will be enabled. Once you make changes they will be
automatically shared with Omniverse.
**WARNING:** Do not close the Omniverse Live Sync dialog box or click the *Connect
USD* button. Doing so will both clear the link you currently have with
Nucleus for the file, and will end the Live Sync session. We have
confirmed with Nvidia that this is intended behavior.
References
==========
- [Official Nvidia Omniverse
Documentation](https://docs.omniverse.nvidia.com/con_connect/con_connect/sketchup.html)
| 5,515 | Markdown | 37.305555 | 112 | 0.75485 |
CesiumGS/cesium-omniverse/docs/kit/README.md | # So you want to programmatically change a setting in Omniverse?
An optional first step is to copy the settings. The easiest way to do this is to dump them out when one of our functions in `window.py` is called. This snippet will help:
```python
import json
import carb.settings
with open("<path to desired dump file>", "w") as fh:
fh.write(json.dumps(carb.settings.get_settings().get("/")))
```
Having these settings isn't required but it may be helpful. Once pretty printed using the JSON formatter of your choice, it can help you find file paths to help in your search, and you can take a closer look at all of the current settings.
In the case of this ticket, we needed to set a setting for the RTX renderer. A quick search of the `tokens` object gives us this path:
```
c:/users/amorris/appdata/local/ov/pkg/code-2022.2.0/kit/exts/omni.rtx.settings.core
```
Perform a grep in this folder for the menu item you wish to configure programmatically. In this case, I searched for `Normal & Tangent Space Generation Mode`. That should direct you to the file where the widget is available, and you should find the following:
```python
tbnMode = ["AUTO", "CPU", "GPU", "Force GPU"]
self._add_setting_combo("Normal & Tangent Space Generation Mode", "/rtx/hydra/TBNFrameMode", tbnMode)
```
The most important piece here is the path `/rtx/hydra/TBNFrameMode`. This refers to the path in the settings. Once you have this, programmatically changing the setting is simple:
```python
import carb.settings
carb.settings.get_settings().set("/rtx/hydra/TBNFrameMode", 1)
```
If you are unsure about what the type of the value for the setting should be, I suggest checking the JSON dump of the settings. The path `/rtx/hydra/TBNFrameMode` refers to, from root, the rtx object, the child hydra object, and followed by the TBNFrameMode property within. You can also search for the property, but beware that there may be multiple that are unrelated. For example, `TBNFrameMode` has three results total, but only one is relevant to our needs. | 2,038 | Markdown | 58.970587 | 462 | 0.760059 |
CesiumGS/cesium-omniverse/extern/nvidia/deps/target-deps.packman.xml | <project toolsVersion="5.6">
<!-- Import dependencies from Kit SDK to ensure we're using the same versions. -->
<import path="../_build/target-deps/kit-sdk/dev/all-deps.packman.xml">
<filter include="python"/>
<filter include="nv_usd_py310_release"/>
<filter include="usdrt"/>
<filter include="carb_sdk_plugins"/>
<filter include="pybind11"/>
<filter include="cuda"/>
</import>
<!-- Override the link paths to point to the correct locations. -->
<dependency name="python" linkPath="../_build/target-deps/python"/>
<dependency name="nv_usd_py310_release" linkPath="../_build/target-deps/usd/release"/>
<dependency name="usdrt" linkPath="../_build/target-deps/usdrt"/>
<dependency name="carb_sdk_plugins" linkPath="../_build/target-deps/carb_sdk_plugins"/>
<dependency name="pybind11" linkPath="../_build/target-deps/pybind11/pybind11"/>
<dependency name="cuda" linkPath="../_build/target-deps/cuda/cuda"/>
</project>
| 960 | XML | 49.578945 | 89 | 0.6875 |
CesiumGS/cesium-omniverse/extern/nvidia/deps/kit-sdk.packman.xml | <project toolsVersion="5.6">
<dependency name="kit_sdk" linkPath="../_build/target-deps/kit-sdk/">
<package name="kit-sdk" version="105.1.2+release.135279.09b309e7.tc.${platform}.release"/>
</dependency>
</project>
| 223 | XML | 36.333327 | 94 | 0.690583 |
CesiumGS/cesium-omniverse/extern/nvidia/scripts/install.py | import os
import packmanapi
import sys
REPO_ROOT = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..")
KIT_SDK_FILE = os.path.join(REPO_ROOT, "deps/kit-sdk.packman.xml")
TARGET_DEPS_FILE = os.path.join(REPO_ROOT, "deps/target-deps.packman.xml")
if __name__ == "__main__":
platform = sys.argv[2]
packmanapi.pull(KIT_SDK_FILE, platform=platform)
packmanapi.pull(TARGET_DEPS_FILE, platform=platform)
| 424 | Python | 29.357141 | 75 | 0.698113 |
CesiumGS/cesium-omniverse/extern/nvidia/debug-deps/target-deps.packman.user.xml | <project toolsVersion="5.6">
<import path="../_build/target-deps/kit-sdk-debug/dev/all-deps.packman.xml">
<filter include="nv_usd_py310_debug"/>
</import>
<dependency name="nv_usd_py310_debug" linkPath="../_build/target-deps/usd/debug"/>
</project>
| 259 | XML | 36.142852 | 84 | 0.687259 |
CesiumGS/cesium-omniverse/extern/nvidia/debug-deps/kit-sdk.packman.user.xml | <project toolsVersion="5.6">
<dependency name="kit_sdk_debug" linkPath="../_build/target-deps/kit-sdk-debug/">
<package name="kit-sdk" version="105.1.2+release.135279.09b309e7.tc.${platform}.debug"/>
</dependency>
</project>
| 233 | XML | 37.999994 | 92 | 0.695279 |
CesiumGS/cesium-omniverse-samples/CHANGES.md | ### v0.19.0 - 2024-04-01
- Updates for Cesium for Omniverse v0.19.0.
### v0.18.0 - 2024-03-01
- Added samples for Raster Overlays.
- Updates for Cesium for Omniverse v0.18.0.
### v0.17.0 - 2024-02-01
- Added project files for Tileset Clipping tutorial.
- Updates for Cesium for Omniverse v0.17.0.
### v0.16.0 - 2024-01-02
- Added project files for Placing Objects on the Globe tutorial.
- Added project files for Style by Properties tutorial.
- Updates for Cesium for Omniverse v0.16.0.
### v0.15.0 - 2023-12-14
- Updates for Cesium for Omniverse v0.15.0.
### v0.14.0 - 2023-12-01
- Updates for Cesium for Omniverse v0.14.0.
### v0.13.0 - 2023-11-01
- Fixed Google Photorealistic 3D Tiles tutorial sample.
- Updates for Cesium for Omniverse v0.13.0.
### v0.12.0 - 2023-10-25
- Changed Google Photorealistic 3D Tiles samples to go through Cesium ion.
- Added samples for Globe Anchors.
- Added samples for Tileset Clipping.
- Added samples for Tileset Materials.
- Updates for Cesium for Omniverse v0.12.0.
### v0.11.0 - 2023-10-02
- Updates for Cesium for Omniverse v0.11.0.
### v0.10.0 - 2023-09-01
- Updates for Cesium for Omniverse v0.10.0.
### v0.9.0 - 2023-08-01
- Added project files for dynamic skies and sun study tutorial.
- Updates for Cesium for Omniverse v0.9.0.
### v0.8.0 - 2023-07-03
- Updates for Cesium for Omniverse v0.8.0.
### v0.7.0 - 2023-06-01
- Switched to RTX Real-Time renderer for Google 3D Tiles examples.
- Updates for Cesium for Omniverse v0.7.0.
### v0.6.0 - 2023-05-10
- Added samples to showcase Photorealistic 3D Tiles via Google Maps Platform.
- Updates for Cesium for Omniverse v0.6.0.
### v0.5.0 - 2023-05-01
- Updates for Cesium for Omniverse v0.5.0.
### v0.4.0 - 2023-04-03
- Updates for Cesium for Omniverse v0.4.0.
### v0.3.0 - 2023-03-20
- Initial release.
| 1,831 | Markdown | 21.9 | 77 | 0.700164 |
CesiumGS/cesium-omniverse-samples/README.md | [![Cesium for Omniverse Logo](images/Cesium_Omniverse_dark_color.png)](https://cesium.com/)
# Cesium for Omniverse Samples
The Cesium for Omniverse Samples contains a series of USD files to help learn and explore the [Cesium for Omniverse](https://cesium.com/platform/cesium-for-omniverse) extension.
The USDs in this project will walk you through the extension's features and demonstrate global-scale content and experiences in Nvidia Omniverse USD Composer.
The source code for Cesium for Omniverse itself may be found in the [cesium-omniverse](https://github.com/CesiumGS/cesium-omniverse) repo.
![Aerometrex Photogrammetry of San Francisco in Cesium for Omniverse](images/san_francisco.jpg)
*<p align="center">Photogrammetry of San Francisco, California visualized in Omniverse USD Composer, using Cesium for Omniverse.<br>Open <b>examples/SanFrancisco/SanFrancisco.usd</b> in Omniverse USD Composer to experience it yourself!</p>*
### :rocket: Get Started
1. **[Download Cesium for Omniverse Samples](https://github.com/CesiumGS/cesium-omniverse-samples/releases/latest)**.
2. Extract the `.zip` file into a suitable location on your computer.
3. Follow the Quickstart tutorial to setup Cesium for Omniverse with Omniverse USD Composer.
4. Open any of the USD files within this repo to explore them.
Have questions? Ask them on the [community forum](https://community.cesium.com).
## :mountain: USD Descriptions
The content in this repo is split into two main folders - Examples and Tutorials.
### :one: Examples Folder
The example folder contain cities built with various datasets, high quality lighting, and rendering settings optimised for real-time interaction whilst also providing high quality image and video outputs.
#### Denver
In Denver you'll see [Cesium World Terrain](https://cesium.com/platform/cesium-ion/content/cesium-world-terrain/) combined with photogrammetry of the city center, captured by [Aerometrex](https://aerometrex.com.au/).
#### San Francisco
In San Francisco you'll see [Cesium World Terrain](https://cesium.com/platform/cesium-ion/content/cesium-world-terrain/) combined with photogrammetry of the city, captured by [Aerometrex](https://aerometrex.com.au/).
#### Vancouver
In Vancouver you'll see [Cesium World Terrain](https://cesium.com/platform/cesium-ion/content/cesium-world-terrain/) combined with [Cesium OSM Buildings](https://cesium.com/platform/cesium-ion/content/cesium-osm-buildings/).
### :two: Tutorials Folder
The tutorial folder contain USD's representing the completed steps of each tutorial found [here](https://cesium.com/learn/omniverse/).
If you want to see the intended outcome of each tutorial, simply open the corresponding USD.
### :green_book:License
[Apache 2.0](http://www.apache.org/licenses/LICENSE-2.0.html). Cesium for Omniverse Samples is free to use as starter project for both commercial and non-commercial use.
| 2,907 | Markdown | 58.346938 | 240 | 0.783626 |
boredengineering/Robots_for_Omniverse/README.md | # Robots_for_Omniverse
The objective of this project is to make developing robotics an engaging and exciting experience.<br/>
> OriginalAuthor:<br/>
> Renan Monteiro Barbosa<br/>
Please feel free to contribute with either robots in openUSD or URDF descriptions that can be converted.<br/>
## openUSD_assets
List of robots converted to openUSD.<br/>
### Quadrupeds
- [Boston Dynamics](https://www.bostondynamics.com/)
- [Spot](https://github.com/chvmp/spot_ros)
- [SpotMicroAI](https://spotmicroai.readthedocs.io/en/latest/)
### Bipedal
- [Agility Robotics](https://agilityrobotics.com/)
- [Digit](https://github.com/adubredu/DigitRobot.jl)
- [Unitree Robotics](https://www.unitree.com/h1/)
- [NJIT - TOCABI](https://github.com/cadop/tocabi)
## URDF_descriptions
It contains all the robot descriptions in URDF.<br/>
Below is the list of all the sources where the URDFs where obtained from.<br/>
### Quadrupeds
- [kodlab_gazebo - Ghost Robotics](https://github.com/KodlabPenn/kodlab_gazebo)
- [ANYbotics](https://github.com/ANYbotics)
- [ANYbotics' ANYmal B](https://github.com/ANYbotics/anymal_b_simple_description)
- [ANYbotics' ANYmal B - Modified for CHAMP](https://github.com/chvmp/anymal_b_simple_description)
- [ANYbotics' ANYmal C](https://github.com/ANYbotics/anymal_c_simple_description)
- [ANYbotics' ANYmal B - Modified for CHAMP](https://github.com/chvmp/anymal_c_simple_description)
- **Boston Dynamic's Little Dog**
- [Boston Dynamic's Little Dog - by RobotLocomotion](https://github.com/RobotLocomotion/LittleDog)
- [Boston Dynamic's Little Dog - Modified for CHAMP](https://github.com/chvmp/littledog_description)
- **Boston Dynamic's Spot**
- [Boston Dynamic's Spot - by heuristicus](https://github.com/heuristicus/spot_ros)
- [Boston Dynamic's Spot - Modified for CHAMP](https://github.com/chvmp/spot_ros)
- [Dream Walker](https://github.com/Ohaginia/dream_walker)
- [MIT Mini Cheetah - Original](https://github.com/HitSZwang/mini-cheetah-gazebo-urdf)
- [MIT Mini Cheetah - Modified for CHAMP](https://github.com/chvmp/mini-cheetah-gazebo-urdf)
- [OpenDog V2 - Original](https://github.com/XRobots/openDogV2)
- [OpenDog V2 - Modified for CHAMP](https://github.com/chvmp/opendog_description)
- **Open Quadruped**
- [Open Quadruped](https://github.com/moribots/spot_mini_mini)
- [SpotMicroAI - Gitlab](https://gitlab.com/custom_robots/spotmicroai)
- [Spot Micro](https://github.com/chvmp/spotmicro_description)
- [Unitree Robotics All](https://github.com/unitreerobotics/unitree_ros)
- [Unitree Robotics' Youtube](https://www.youtube.com/@unitreerobotics7482)
- [Unitree Robotics All - Modified for CHAMP](https://github.com/chvmp/unitree_ros)
- [Unitree Robotics' A1](https://github.com/unitreerobotics/unitree_ros/tree/master/robots/a1_description)
- [Unitree Robotics' AliengoZ1](https://github.com/unitreerobotics/unitree_ros/tree/master/robots/aliengoZ1_description)
- [Unitree Robotics'Aliengo](https://github.com/unitreerobotics/unitree_ros/tree/master/robots/aliengo_description)
- [Unitree Robotics' B1](https://github.com/unitreerobotics/unitree_ros/tree/master/robots/b1_description)
- [Unitree Robotics' Go1](https://github.com/unitreerobotics/unitree_ros/tree/master/robots/go1_description)
- [Unitree Robotics' Laikago](https://github.com/unitreerobotics/unitree_ros/tree/master/robots/laikago_description)
- [Unitree Robotics' Z1](https://github.com/unitreerobotics/unitree_ros/tree/master/robots/z1_description)
- [Stochlab's Stochlite](https://stochlab.github.io/)
- [Stochlab's Stochlite - Modified by aditya-shirwatkar](https://github.com/aditya-shirwatkar/stochlite_description)
- **Mini Pupper**
- [MangDang's Mini Pupper](https://github.com/mangdangroboticsclub/QuadrupedRobot)
- [simplified robot description of the MangDang's Mini Pupper](https://github.com/nisshan-x/mini_pupper_description)
- [Stanford pupper - Original](https://stanfordstudentrobotics.org/pupper)
- [Stanford pupper - Modified by Chandykunju Alex](https://github.com/chandyalex/stanford_pupper_description.git)
### Bipedal
- [Agility Robotics' Cassie - UMich-BipedLab](https://github.com/UMich-BipedLab/cassie_description)
- [Agility Robotics' Digit - DigitRobot.jl](https://github.com/adubredu/DigitRobot.jl)
- [NJIT - TOCABI](https://github.com/cadop/tocabi)
- [Unitree H1](https://github.com/google-deepmind/mujoco_menagerie/tree/main/unitree_h1)
### Manipulation
- [GoogleAI ROBEL D'Kitty](https://github.com/google-research/robel-scenes)
- [GoogleAI ROBEL D'Kitty - Modified for CHAMP](https://github.com/chvmp/dkitty_description)
- [The Shadow Robot Company](https://github.com/shadow-robot)
- [Shadow Hand - archived](https://github.com/AndrejOrsula/shadow_hand_ign)
# Appendix
## Notes<br/>
- NJIT-TOCABi has a high poly and low poly version, this repo has the low poly version [light_weight](https://github.com/cadop/tocabi/tree/main/light_weight).<br/>
- Dream Walker usd files are too large. Could not commit instanceable_meshes.usd<br/>
RobotEra TECHNOLOGY CO.,LTD.
Founded in 2023, RobotEra TECHNOLOGY CO., LTD focuses on the R&D of embodied AI general-purpose humanoid robots.
https://github.com/roboterax
| 5,263 | Markdown | 61.666666 | 163 | 0.746152 |
boredengineering/Robots_for_Omniverse/URDF_descriptions/openDogV2/README.md | # openDogV2
> Original Author: <br/>
> James Bruton <br/>
> Xrobots<br/>
> Modified by:
> Renan Monteiro Barbosa<br/>
Purpose: Adapt this opensource quadruped robot project for the standards of the Isaac-Sim simulator.<br/>
>Sources:<br/>
>- [OpenDog V2 - Original](https://github.com/XRobots/openDogV2)<br/>
>- [OpenDog V2 - Modified for CHAMP](https://github.com/chvmp/opendog_description)<br/>
> CAD and Code that relates to this YouTube series:<br/>
> https://www.youtube.com/playlist?list=PLpwJoq86vov9CcmrLGyM2XyyYDAYG0-Iu
- **Release 1:** created at the end of part 6 of the YouTube series. Please note the issues stated at the end of this video.<br/>
- **Release 2:** created at the end of part 7 of the YouTube series. Please note the issues stated during this video. Note that the remote is unchanged since release 1.<br/>
- **Relase 3:** created for part 8 of the YouTube series. Includes the modified knee motor pulley, Python and Arduino code for the deep learning model.<br/>
## Related Community Projects:
OpenDog URDF/config for CHAMP: https://github.com/chvmp/opendog_description
'openDog 2.1' with higher belt reductions and cooling fans: https://github.com/J-DIndustries/openDog-V2.1
# Modified for the OpenUSD format
Import on Isaac-Sim | 1,272 | Markdown | 42.89655 | 173 | 0.745283 |
boredengineering/Robots_for_Omniverse/URDF_descriptions/DigitRobot/README.md | # Digit
Manufacturer: [Agility Robotics](https://agilityrobotics.com/)<br/>
> Source:<br/>
> - [DigitRobot.jl](https://github.com/adubredu/DigitRobot.jl)<br/>
| 160 | Markdown | 25.833329 | 67 | 0.70625 |
boredengineering/Robots_for_Omniverse/URDF_descriptions/UnitreeRobotics/unitree_h1/scene.xml | <mujoco model="h1 scene">
<include file="h1.xml"/>
<statistic center="0 0 1" extent="1.8"/>
<visual>
<headlight diffuse="0.6 0.6 0.6" ambient="0.3 0.3 0.3" specular="0 0 0"/>
<rgba haze="0.15 0.25 0.35 1"/>
<global azimuth="160" elevation="-20"/>
</visual>
<asset>
<texture type="skybox" builtin="gradient" rgb1="0.3 0.5 0.7" rgb2="0 0 0" width="512" height="3072"/>
<texture type="2d" name="groundplane" builtin="checker" mark="edge" rgb1="0.2 0.3 0.4" rgb2="0.1 0.2 0.3"
markrgb="0.8 0.8 0.8" width="300" height="300"/>
<material name="groundplane" texture="groundplane" texuniform="true" texrepeat="5 5" reflectance="0.2"/>
</asset>
<worldbody>
<light pos="0 0 3.5" dir="0 0 -1" directional="true"/>
<geom name="floor" size="0 0 0.05" type="plane" material="groundplane"/>
</worldbody>
<keyframe>
<key name="home"
qpos="
0 0 0.98
1 0 0 0
0 0 -0.4 0.8 -0.4
0 0 -0.4 0.8 -0.4
0
0 0 0 0
0 0 0 0"/>
</keyframe>
</mujoco>
| 1,021 | XML | 27.388888 | 109 | 0.578844 |
boredengineering/Robots_for_Omniverse/URDF_descriptions/UnitreeRobotics/unitree_h1/README.md | # Unitree H1 Description (MJCF)
Requires MuJoCo 2.2.2 or later.
## Overview
This package contains a simplified robot description (MJCF) of the [H1 Humanoid
Robot](https://www.unitree.com/h1/) developed by [Unitree
Robotics](https://www.unitree.com/). The original URDF and assets were provided
directly by [Unitree Robotics](https://www.unitree.com/) under a [BSD-3-Clause
License](LICENSE).
<p float="left">
<img src="h1.png" width="400">
</p>
## URDF → MJCF derivation steps
1. Added `<mujoco> <compiler discardvisual="false" strippath="false" fusestatic="false"/> </mujoco>` to the URDF's
`<robot>` clause in order to preserve visual geometries.
2. Loaded the URDF into MuJoCo and saved a corresponding MJCF.
3. Manually edited the MJCF to extract common properties into the `<default>` section.
4. Added actuators.
5. Added `scene.xml` which includes the robot, with a textured groundplane, skybox, and haze.
## License
This model is released under a [BSD-3-Clause License](LICENSE).
| 1,001 | Markdown | 33.551723 | 114 | 0.739261 |
boredengineering/Robots_for_Omniverse/URDF_descriptions/UnitreeRobotics/unitree_h1/h1.xml | <mujoco model="h1">
<compiler angle="radian" meshdir="assets" autolimits="true"/>
<statistic meansize="0.05"/>
<default>
<default class="h1">
<joint damping="1" armature="0.1"/>
<default class="visual">
<geom type="mesh" contype="0" conaffinity="0" group="2" material="black"/>
</default>
<default class="collision">
<geom group="3" mass="0" density="0"/>
<default class="foot">
<geom type="capsule" size=".014"/>
<default class="foot1">
<geom fromto="-.035 0 -0.056 .02 0 -0.045"/>
</default>
<default class="foot2">
<geom fromto=".02 0 -0.045 .115 0 -0.056"/>
</default>
<default class="foot3">
<geom fromto=".14 -.03 -0.056 .14 .03 -0.056"/>
</default>
</default>
</default>
<site size="0.001" rgba="0.5 0.5 0.5 0.3" group="4"/>
</default>
</default>
<asset>
<material name="black" rgba="0.1 0.1 0.1 1"/>
<material name="white" rgba="1 1 1 1"/>
<mesh file="pelvis.stl"/>
<mesh file="left_hip_yaw_link.stl"/>
<mesh file="left_hip_roll_link.stl"/>
<mesh file="left_hip_pitch_link.stl"/>
<mesh file="left_knee_link.stl"/>
<mesh file="left_ankle_link.stl"/>
<mesh file="right_hip_yaw_link.stl"/>
<mesh file="right_hip_roll_link.stl"/>
<mesh file="right_hip_pitch_link.stl"/>
<mesh file="right_knee_link.stl"/>
<mesh file="right_ankle_link.stl"/>
<mesh file="torso_link.stl"/>
<mesh file="left_shoulder_pitch_link.stl"/>
<mesh file="left_shoulder_roll_link.stl"/>
<mesh file="left_shoulder_yaw_link.stl"/>
<mesh file="left_elbow_link.stl"/>
<mesh file="right_shoulder_pitch_link.stl"/>
<mesh file="right_shoulder_roll_link.stl"/>
<mesh file="right_shoulder_yaw_link.stl"/>
<mesh file="right_elbow_link.stl"/>
<mesh file="logo_link.stl"/>
</asset>
<worldbody>
<light mode="targetbodycom" target="torso_link" pos="2 0 2.5"/>
<body name="pelvis" pos="0 0 1.06" childclass="h1">
<inertial pos="-0.0002 4e-05 -0.04522" quat="0.498303 0.499454 -0.500496 0.501741" mass="5.39"
diaginertia="0.0490211 0.0445821 0.00824619"/>
<freejoint/>
<geom class="visual" mesh="pelvis"/>
<body name="left_hip_yaw_link" pos="0 0.0875 -0.1742">
<inertial pos="-0.04923 0.0001 0.0072" quat="0.69699 0.219193 0.233287 0.641667" mass="2.244"
diaginertia="0.00304494 0.00296885 0.00189201"/>
<joint name="left_hip_yaw" axis="0 0 1" range="-0.43 0.43"/>
<geom class="visual" mesh="left_hip_yaw_link"/>
<geom size="0.06 0.035" pos="-0.067 0 0" quat="0.707123 0 0.70709 0" type="cylinder" class="collision"/>
<body name="left_hip_roll_link" pos="0.039468 0 0">
<inertial pos="-0.0058 -0.00319 -9e-05" quat="0.0438242 0.70721 -0.0729075 0.701867" mass="2.232"
diaginertia="0.00243264 0.00225325 0.00205492"/>
<joint name="left_hip_roll" axis="1 0 0" range="-0.43 0.43"/>
<geom class="visual" mesh="left_hip_roll_link"/>
<geom class="collision" type="cylinder" size="0.05 0.03" quat="1 1 0 0" pos="0 -0.02 0"/>
<body name="left_hip_pitch_link" pos="0 0.11536 0">
<inertial pos="0.00746 -0.02346 -0.08193" quat="0.979828 0.0513522 -0.0169854 -0.192382" mass="4.152"
diaginertia="0.0829503 0.0821457 0.00510909"/>
<joint name="left_hip_pitch" axis="0 1 0" range="-1.57 1.57"/>
<geom class="visual" mesh="left_hip_pitch_link"/>
<geom class="collision" type="capsule" size="0.03" fromto="0.02 0 -0.4 -0.02 0 0.02"/>
<geom class="collision" type="capsule" size="0.03" fromto="0.02 0 -0.4 0.02 0 0.02"/>
<geom class="collision" type="cylinder" size="0.05 0.02" quat="1 1 0 0" pos="0 -0.07 0"/>
<body name="left_knee_link" pos="0 0 -0.4">
<inertial pos="-0.00136 -0.00512 -0.1384" quat="0.626132 -0.034227 -0.0416277 0.777852" mass="1.721"
diaginertia="0.0125237 0.0123104 0.0019428"/>
<joint name="left_knee" axis="0 1 0" range="-0.26 2.05"/>
<geom class="visual" mesh="left_knee_link"/>
<geom class="collision" type="capsule" size="0.025" fromto="0.02 0 -0.4 0.02 0 0"/>
<geom class="collision" type="sphere" size="0.05" pos="0 0 -0.115"/>
<body name="left_ankle_link" pos="0 0 -0.4">
<inertial pos="0.06722 0.00015 -0.04497" quat="0.489101 0.503197 0.565782 0.432972" mass="0.446"
diaginertia="0.00220848 0.00218961 0.000214202"/>
<joint name="left_ankle" axis="0 1 0" range="-0.87 0.52"/>
<geom class="visual" mesh="left_ankle_link"/>
<geom class="foot1"/>
<geom class="foot2"/>
<geom class="foot3"/>
</body>
</body>
</body>
</body>
</body>
<body name="right_hip_yaw_link" pos="0 -0.0875 -0.1742">
<inertial pos="-0.04923 -0.0001 0.0072" quat="0.641667 0.233287 0.219193 0.69699" mass="2.244"
diaginertia="0.00304494 0.00296885 0.00189201"/>
<joint name="right_hip_yaw" axis="0 0 1" range="-0.43 0.43"/>
<geom class="visual" mesh="right_hip_yaw_link"/>
<geom size="0.06 0.035" pos="-0.067 0 0" quat="0.707123 0 0.70709 0" type="cylinder" class="collision"/>
<body name="right_hip_roll_link" pos="0.039468 0 0">
<inertial pos="-0.0058 0.00319 -9e-05" quat="-0.0438242 0.70721 0.0729075 0.701867" mass="2.232"
diaginertia="0.00243264 0.00225325 0.00205492"/>
<joint name="right_hip_roll" axis="1 0 0" range="-0.43 0.43"/>
<geom class="visual" mesh="right_hip_roll_link"/>
<geom class="collision" type="cylinder" size="0.05 0.03" quat="1 1 0 0" pos="0 0.02 0"/>
<body name="right_hip_pitch_link" pos="0 -0.11536 0">
<inertial pos="0.00746 0.02346 -0.08193" quat="0.979828 -0.0513522 -0.0169854 0.192382" mass="4.152"
diaginertia="0.0829503 0.0821457 0.00510909"/>
<joint name="right_hip_pitch" axis="0 1 0" range="-1.57 1.57"/>
<geom class="visual" mesh="right_hip_pitch_link"/>
<geom class="collision" type="capsule" size="0.03" fromto="0.02 0 -0.4 -0.02 0 0.02"/>
<geom class="collision" type="capsule" size="0.03" fromto="0.02 0 -0.4 0.02 0 0.02"/>
<geom class="collision" type="cylinder" size="0.05 0.02" quat="1 1 0 0" pos="0 0.07 0"/>
<body name="right_knee_link" pos="0 0 -0.4">
<inertial pos="-0.00136 0.00512 -0.1384" quat="0.777852 -0.0416277 -0.034227 0.626132" mass="1.721"
diaginertia="0.0125237 0.0123104 0.0019428"/>
<joint name="right_knee" axis="0 1 0" range="-0.26 2.05"/>
<geom class="visual" mesh="right_knee_link"/>
<geom class="collision" type="capsule" size="0.025" fromto="0.02 0 -0.4 0.02 0 0"/>
<geom class="collision" type="sphere" size="0.05" pos="0 0 -0.115"/>
<body name="right_ankle_link" pos="0 0 -0.4">
<inertial pos="0.06722 -0.00015 -0.04497" quat="0.432972 0.565782 0.503197 0.489101" mass="0.446"
diaginertia="0.00220848 0.00218961 0.000214202"/>
<joint name="right_ankle" axis="0 1 0" range="-0.87 0.52"/>
<geom class="visual" mesh="right_ankle_link"/>
<geom class="foot1"/>
<geom class="foot2"/>
<geom class="foot3"/>
</body>
</body>
</body>
</body>
</body>
<body name="torso_link">
<inertial pos="0.000489 0.002797 0.20484" quat="0.999989 -0.00130808 -0.00282289 -0.00349105" mass="17.789"
diaginertia="0.487315 0.409628 0.127837"/>
<joint name="torso" axis="0 0 1" range="-2.35 2.35"/>
<geom class="visual" mesh="torso_link"/>
<geom class="visual" material="white" mesh="logo_link"/>
<geom name="head" class="collision" type="capsule" size="0.06" fromto="0.05 0 0.68 0.05 0 0.6"/>
<geom name="helmet" class="collision" type="sphere" size="0.073" pos="0.045 0 0.68"/>
<geom name="torso" class="collision" type="box" size="0.07 0.1 0.22" pos="0 0 0.25"/>
<geom name="hip" class="collision" type="capsule" size="0.05" fromto="0 -0.1 -0.05 0 0.1 -0.05"/>
<site name="imu" pos="-0.04452 -0.01891 0.27756"/>
<body name="left_shoulder_pitch_link" pos="0.0055 0.15535 0.42999" quat="0.976296 0.216438 0 0">
<inertial pos="0.005045 0.053657 -0.015715" quat="0.814858 0.579236 -0.0201072 -0.00936488" mass="1.033"
diaginertia="0.00129936 0.000987113 0.000858198"/>
<joint name="left_shoulder_pitch" axis="0 1 0" range="-2.87 2.87"/>
<geom class="visual" mesh="left_shoulder_pitch_link"/>
<body name="left_shoulder_roll_link" pos="-0.0055 0.0565 -0.0165" quat="0.976296 -0.216438 0 0">
<inertial pos="0.000679 0.00115 -0.094076" quat="0.732491 0.00917179 0.0766656 0.676384" mass="0.793"
diaginertia="0.00170388 0.00158256 0.00100336"/>
<joint name="left_shoulder_roll" axis="1 0 0" range="-0.34 3.11"/>
<geom class="visual" mesh="left_shoulder_roll_link"/>
<geom name="left_shoulder" class="collision" type="capsule" size="0.04" fromto="0 0.01 0.008 0 -0.07 -0.02"/>
<body name="left_shoulder_yaw_link" pos="0 0 -0.1343">
<inertial pos="0.01365 0.002767 -0.16266" quat="0.703042 -0.0331229 -0.0473362 0.708798" mass="0.839"
diaginertia="0.00408038 0.00370367 0.000622687"/>
<joint name="left_shoulder_yaw" axis="0 0 1" range="-1.3 4.45"/>
<geom class="visual" mesh="left_shoulder_yaw_link"/>
<geom class="collision" type="capsule" size="0.03" fromto="0 0 0.15 0 0 -0.2"/>
<body name="left_elbow_link" pos="0.0185 0 -0.198">
<inertial pos="0.15908 -0.000144 -0.015776" quat="0.0765232 0.720327 0.0853116 0.684102" mass="0.669"
diaginertia="0.00601829 0.00600579 0.000408305"/>
<joint name="left_elbow" axis="0 1 0" range="-1.25 2.61"/>
<geom class="visual" mesh="left_elbow_link"/>
<geom class="collision" type="capsule" size="0.025" fromto="0 0 0 0.28 0 -0.015"/>
<geom class="collision" type="sphere" size="0.033" pos="0.28 0 -0.015"/>
</body>
</body>
</body>
</body>
<body name="right_shoulder_pitch_link" pos="0.0055 -0.15535 0.42999" quat="0.976296 -0.216438 0 0">
<inertial pos="0.005045 -0.053657 -0.015715" quat="0.579236 0.814858 0.00936488 0.0201072" mass="1.033"
diaginertia="0.00129936 0.000987113 0.000858198"/>
<joint name="right_shoulder_pitch" axis="0 1 0" range="-2.87 2.87"/>
<geom class="visual" mesh="right_shoulder_pitch_link"/>
<body name="right_shoulder_roll_link" pos="-0.0055 -0.0565 -0.0165" quat="0.976296 0.216438 0 0">
<inertial pos="0.000679 -0.00115 -0.094076" quat="0.676384 0.0766656 0.00917179 0.732491" mass="0.793"
diaginertia="0.00170388 0.00158256 0.00100336"/>
<joint name="right_shoulder_roll" axis="1 0 0" range="-3.11 0.34"/>
<geom class="visual" mesh="right_shoulder_roll_link"/>
<geom name="right_shoulder" class="collision" type="capsule" size="0.04" fromto="0 -0.01 0.008 0 0.07 -0.02"/>
<body name="right_shoulder_yaw_link" pos="0 0 -0.1343">
<inertial pos="0.01365 -0.002767 -0.16266" quat="0.708798 -0.0473362 -0.0331229 0.703042" mass="0.839"
diaginertia="0.00408038 0.00370367 0.000622687"/>
<joint name="right_shoulder_yaw" axis="0 0 1" range="-4.45 1.3"/>
<geom class="visual" mesh="right_shoulder_yaw_link"/>
<geom class="collision" type="capsule" size="0.03" fromto="0 0 0.15 0 0 -0.2"/>
<body name="right_elbow_link" pos="0.0185 0 -0.198">
<inertial pos="0.15908 0.000144 -0.015776" quat="-0.0765232 0.720327 -0.0853116 0.684102" mass="0.669"
diaginertia="0.00601829 0.00600579 0.000408305"/>
<joint name="right_elbow" axis="0 1 0" range="-1.25 2.61"/>
<geom class="visual" mesh="right_elbow_link"/>
<geom class="collision" type="capsule" size="0.025" fromto="0 0 0 0.28 0 -0.015"/>
<geom class="collision" type="sphere" size="0.033" pos="0.28 0 -0.015"/>
</body>
</body>
</body>
</body>
</body>
</body>
</worldbody>
<contact>
<exclude body1="torso_link" body2="left_shoulder_roll_link"/>
<exclude body1="torso_link" body2="right_shoulder_roll_link"/>
</contact>
<actuator>
<motor class="h1" name="left_hip_yaw" joint="left_hip_yaw" ctrlrange="-200 200"/>
<motor class="h1" name="left_hip_roll" joint="left_hip_roll" ctrlrange="-200 200"/>
<motor class="h1" name="left_hip_pitch" joint="left_hip_pitch" ctrlrange="-200 200"/>
<motor class="h1" name="left_knee" joint="left_knee" ctrlrange="-300 300"/>
<motor class="h1" name="left_ankle" joint="left_ankle" ctrlrange="-40 40"/>
<motor class="h1" name="right_hip_yaw" joint="right_hip_yaw" ctrlrange="-200 200"/>
<motor class="h1" name="right_hip_roll" joint="right_hip_roll" ctrlrange="-200 200"/>
<motor class="h1" name="right_hip_pitch" joint="right_hip_pitch" ctrlrange="-200 200"/>
<motor class="h1" name="right_knee" joint="right_knee" ctrlrange="-300 300"/>
<motor class="h1" name="right_ankle" joint="right_ankle" ctrlrange="-40 40"/>
<motor class="h1" name="torso" joint="torso" ctrlrange="-200 200"/>
<motor class="h1" name="left_shoulder_pitch" joint="left_shoulder_pitch" ctrlrange="-40 40"/>
<motor class="h1" name="left_shoulder_roll" joint="left_shoulder_roll" ctrlrange="-40 40"/>
<motor class="h1" name="left_shoulder_yaw" joint="left_shoulder_yaw" ctrlrange="-18 18"/>
<motor class="h1" name="left_elbow" joint="left_elbow" ctrlrange="-18 18"/>
<motor class="h1" name="right_shoulder_pitch" joint="right_shoulder_pitch" ctrlrange="-40 40"/>
<motor class="h1" name="right_shoulder_roll" joint="right_shoulder_roll" ctrlrange="-40 40"/>
<motor class="h1" name="right_shoulder_yaw" joint="right_shoulder_yaw" ctrlrange="-18 18"/>
<motor class="h1" name="right_elbow" joint="right_elbow" ctrlrange="-18 18"/>
</actuator>
</mujoco>
| 14,745 | XML | 59.683127 | 122 | 0.577077 |
boredengineering/Robots_for_Omniverse/URDF_descriptions/MIT_mini-cheetah/README.md | # MIT Mini Cheetah
An urdf description file of a quadruped robot modeled on mini cheetah.
>Source: <br/>
>- YOBOTICS, INC.<br/>
>- [MIT Mini Cheetah - Original](https://github.com/HitSZwang/mini-cheetah-gazebo-urdf)<br/>
>- [MIT Mini Cheetah - Modified for CHAMP](https://github.com/chvmp/mini-cheetah-gazebo-urdf)<br/>
| 321 | Markdown | 39.249995 | 98 | 0.725857 |
boredengineering/Robots_for_Omniverse/URDF_descriptions/BostonDynamics/README.md | # Boston Dynamics Robots
https://www.bostondynamics.com/
## Little Dog
> Source:<br/>
> - [Boston Dynamic's Little Dog - by RobotLocomotion](https://github.com/RobotLocomotion/LittleDog)
> - [Boston Dynamic's Little Dog - Modified for CHAMP](https://github.com/chvmp/littledog_description)
## Spot
> Source:<br/>
> - [Boston Dynamic's Spot - by heuristicus](https://github.com/heuristicus/spot_ros)
> - [Boston Dynamic's Spot - Modified for CHAMP](https://github.com/chvmp/spot_ros) | 486 | Markdown | 33.785712 | 102 | 0.726337 |
XiaomingY/omni-ies-viewer/README.md | # IES Viewer Omniverse Extension
![](./exts/IESViewer/data/preview.png)
This extension displays IES profile web for selected light objects. It is particularly useful for visualizing architectural lighting designs. Orientation of measured light distribution profiles could be quickly tested with visual feedback. IES files are resampled to be light weight and consistant to render. [A video demo](https://drive.google.com/file/d/1DxvjVGT6ZlfukfuTvyBu3iXaHz8qvY5Q/view?usp=sharing)
This extension is developed based on the [omni.example.ui_scene.object_info](https://github.com/NVIDIA-Omniverse/kit-extension-sample-ui-scene/tree/main/exts/omni.example.ui_scene.object_info)
Supported light type: sphere light, rectangular light, disk light and cylinder light.
Only Type C IES file is supported currently, which is also the most commonly used type for architectural light.
## Adding This Extension
To add a this extension to your Omniverse app:
1. Go to Extension Manager and turn on Viewport Utility extension
2. Add `git://github.com/XiaomingY/omni-ies-viewer.git?branch=main&dir=exts` to extension search path
3. Turn on IES Viewer Extension
| 1,150 | Markdown | 59.578944 | 407 | 0.806087 |
XiaomingY/omni-ies-viewer/exts/IESViewer/IESViewer/extension.py | import omni.ext
import omni.ui as ui
from omni.kit.viewport.utility import get_active_viewport_window
from .viewport_scene import ViewportSceneInfo
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class AimingToolExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def __init__(self) -> None:
super().__init__()
self.viewport_scene = None
def on_startup(self, ext_id):
viewport_window = get_active_viewport_window()
self.viewport_scene = ViewportSceneInfo(viewport_window, ext_id)
def on_shutdown(self):
if self.viewport_scene:
self.viewport_scene.destroy()
self.viewport_scene = None
| 1,023 | Python | 38.384614 | 119 | 0.7087 |
XiaomingY/omni-ies-viewer/exts/IESViewer/IESViewer/viewport_scene.py | from omni.ui import scene as sc
import omni.ui as ui
from .object_info_manipulator import ObjInfoManipulator
from .object_info_model import ObjInfoModel
class ViewportSceneInfo():
"""The Object Info Manipulator, placed into a Viewport"""
def __init__(self, viewport_window, ext_id) -> None:
self.scene_view = None
self.viewport_window = viewport_window
# NEW: Create a unique frame for our SceneView
with self.viewport_window.get_frame(ext_id):
# Create a default SceneView (it has a default camera-model)
self.scene_view = sc.SceneView()
# Add the manipulator into the SceneView's scene
with self.scene_view.scene:
ObjInfoManipulator(model=ObjInfoModel())
# Register the SceneView with the Viewport to get projection and view updates
self.viewport_window.viewport_api.add_scene_view(self.scene_view)
def __del__(self):
self.destroy()
def destroy(self):
if self.scene_view:
# Empty the SceneView of any elements it may have
self.scene_view.scene.clear()
# un-register the SceneView from Viewport updates
if self.viewport_window:
self.viewport_window.viewport_api.remove_scene_view(self.scene_view)
# Remove our references to these objects
self.viewport_window = None
self.scene_view = None | 1,422 | Python | 39.657142 | 89 | 0.656118 |
XiaomingY/omni-ies-viewer/exts/IESViewer/IESViewer/object_info_model.py | from pxr import Tf
from pxr import Gf
from pxr import Usd
from pxr import UsdGeom
from pxr import UsdShade
from pxr import UsdLux
from .IESReader import IESLight
import os.path
import numpy as np
from omni.ui import scene as sc
import omni.usd
def _flatten_matrix(matrix: Gf.Matrix4d):
m0, m1, m2, m3 = matrix[0], matrix[1], matrix[2], matrix[3]
return [
m0[0],
m0[1],
m0[2],
m0[3],
m1[0],
m1[1],
m1[2],
m1[3],
m2[0],
m2[1],
m2[2],
m2[3],
m3[0],
m3[1],
m3[2],
m3[3],
]
class ObjInfoModel(sc.AbstractManipulatorModel):
"""
The model tracks the position and info of the selected object.
"""
class MatrixItem(sc.AbstractManipulatorItem):
"""
The Model Item represents the tranformation. It doesn't contain anything
because we take the tranformation directly from USD when requesting.
"""
identity = [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]
def __init__(self):
super().__init__()
self.value = self.identity.copy()
class PositionItem(sc.AbstractManipulatorItem):
"""
The Model Item represents the position. It doesn't contain anything
because we take the position directly from USD when requesting.
"""
def __init__(self) -> None:
super().__init__()
self.value = [0, 0, 0]
class PositionList(sc.AbstractManipulatorItem):
"""
The Model Item represents the position. It doesn't contain anything
because we take the position directly from USD when requesting.
"""
def __init__(self) -> None:
super().__init__()
self.value = [[0,0,0]]
def __init__(self) -> None:
super().__init__()
# Current selected prim list
self.prim = []
self.current_path = []
self.material_name = []
self.stage_listener = None
self.horizontal_step = 15
self.vertical_step = 15
self.IESPoints = [ObjInfoModel.PositionList()]
self.transformation = [ObjInfoModel.MatrixItem()]
# Save the UsdContext name (we currently only work with a single Context)
self.usd_context = self._get_context()
# Track selection changes
self.events = self.usd_context.get_stage_event_stream()
self.stage_event_delegate = self.events.create_subscription_to_pop(
self.on_stage_event, name="Object Info Selection Update"
)
@property
def _time(self):
return Usd.TimeCode.Default()
def _get_context(self) -> Usd.Stage:
# Get the UsdContext we are attached to
return omni.usd.get_context()
#Update when light are transformed or modified
def notice_changed(self, notice: Usd.Notice, stage: Usd.Stage) -> None:
"""Called by Tf.Notice. Used when the current selected object changes in some way."""
light_path = self.current_path
if not light_path:
return
for p in notice.GetChangedInfoOnlyPaths():
prim_path = p.GetPrimPath().pathString
#check if prim_path not in selected list but parent of prim_path is in selected list
if prim_path not in light_path:
if (True in (light_path_item.startswith(prim_path) for light_path_item in light_path)):
if UsdGeom.Xformable.IsTransformationAffectedByAttrNamed(p.name):
self._item_changed(self.transformation[0])
continue
if UsdGeom.Xformable.IsTransformationAffectedByAttrNamed(p.name):
self._item_changed(self.transformation[0])
#if light property changed such as ies file changed, update profile
self._item_changed(self.transformation[0])
def _get_transform(self, time: Usd.TimeCode):
"""Returns world transform of currently selected object"""
if not self.prim:
return [ObjInfoModel.MatrixItem.identity.copy()]
# Compute matrix from world-transform in USD
#get transform matrix for each selected light
world_xform_list = [UsdGeom.BasisCurves(prim).ComputeLocalToWorldTransform(time) for prim in self.prim]
# Flatten Gf.Matrix4d to list
return [_flatten_matrix(world_xform) for world_xform in world_xform_list]
def get_item(self, identifier):
if identifier == "IESPoints":
return self.IESPoints
if identifier == "transformation":
return self.transformation
def get_as_floats(self, item):
if item == self.transformation:
return self._get_transform(self._time)
if item == self.IESPoints:
return self.get_points(self._time)
return []
#get ies points for each selected light
def get_points(self, time: Usd.TimeCode):
if not self.prim:
return [[0,0,0]]
allIESPoint = []
for prim in self.prim:
iesFile = prim.GetAttribute('shaping:ies:file').Get()
allIESPoint.append(IESLight(str(iesFile).replace('@', '')).points)
return allIESPoint
def on_stage_event(self, event):
"""Called by stage_event_stream. We only care about selection changes."""
if event.type == int(omni.usd.StageEventType.SELECTION_CHANGED):
self.current_path = []
self.prim = []
primList = []
primPathList = []
usd_context = self._get_context()
stage = usd_context.get_stage()
if not stage:
return
prim_paths = usd_context.get_selection().get_selected_prim_paths()
if not prim_paths:
# This turns off the manipulator when everything is deselected
self._item_changed(self.transformation[0])
return
#select light with ies file applied.
lightCount = 0
for i in prim_paths:
prim = stage.GetPrimAtPath(i)
if(UsdLux.Light(prim) and prim.GetAttribute('shaping:ies:file').Get() and not (prim.IsA(UsdLux.DistantLight))):
primList.append(prim)
primPathList.append(i)
lightCount = lightCount +1
if(lightCount==0):
if self.stage_listener:
self.stage_listener.Revoke()
self.stage_listener = None
self._item_changed(self.transformation[0])
return
if not self.stage_listener:
# This handles camera movement
self.stage_listener = Tf.Notice.Register(Usd.Notice.ObjectsChanged, self.notice_changed, stage)
self.prim = primList
self.current_path = primPathList
# Position is changed because new selected object has a different position
self._item_changed(self.transformation[0])
def destroy(self):
self.events = None
self.stage_event_delegate.unsubscribe() | 7,171 | Python | 33.985366 | 127 | 0.585553 |
XiaomingY/omni-ies-viewer/exts/IESViewer/IESViewer/object_info_manipulator.py | from __future__ import division
from omni.ui import scene as sc
from omni.ui import color as cl
import omni.ui as ui
import numpy as np
class ObjInfoManipulator(sc.Manipulator):
"""Manipulator that displays the object path and material assignment
with a leader line to the top of the object's bounding box.
"""
def on_build(self):
"""Called when the model is changed and rebuilds the whole manipulator"""
if not self.model:
return
IESPoints = self.model.get_as_floats(self.model.IESPoints)
numHorizontal = int((360/self.model.horizontal_step)+1)
primCount = 0
for transformation in self.model.get_as_floats(self.model.transformation):
self.__root_xf = sc.Transform(transformation)
with self.__root_xf:
self._x_xform = sc.Transform()
with self._x_xform:
self._shape_xform = sc.Transform()
IESPoint = IESPoints[primCount]
numVertical = int(len(IESPoint)/numHorizontal)
for index in range(0,numHorizontal):
points = IESPoint[index*numVertical:(index+1)*numVertical]
if(len(points)>0):
sc.Curve(points.tolist(), thicknesses=[1.0], colors=[cl.yellow],tessellation=9)
primCount = primCount+1
def on_model_updated(self, item):
# Regenerate the manipulator
self.invalidate() | 1,526 | Python | 37.174999 | 107 | 0.589122 |
XiaomingY/omni-ies-viewer/exts/IESViewer/IESViewer/IESReader.py | import numpy as np
import re
import math
#import matplotlib.pyplot as plt
from scipy import interpolate
import os.path
#from mpl_toolkits.mplot3d.axes3d import Axes3D
import omni.ext
import omni.ui as ui
omni.kit.pipapi.install("astropy")
from astropy.coordinates import spherical_to_cartesian
DEFAULT_HORIZONTAL_STEP = 15
DEFAULT_VERTICAL_STEP = 15
IES_MaxLength = 80
class IESLight():
def __init__(self,iesFile):
# Current selected prim
if iesFile and os.path.exists(iesFile):
self.file = iesFile
else:
return
self.width = 0
self.length = 0
self.radius = 0
all_values = self.readIESfile(self.file)
verticalAngles,horizontalAngles,intensities,self.width,self.length,self.radius = self.getIESproperties(all_values)
horizontalAnglesMirrored, intensityMirrored = self.mirrorAngles(horizontalAngles,intensities)
horizontalResampled = np.arange(0, 361, DEFAULT_HORIZONTAL_STEP)
verticalResampled = np.arange(0, verticalAngles[-1]+1, DEFAULT_VERTICAL_STEP)
resampledIntensity = self.interpolateIESValues(np.array(horizontalAnglesMirrored),np.array(verticalAngles),horizontalResampled,verticalResampled,intensityMirrored)
self.points = self.IESCoord2XYZ(horizontalResampled,verticalResampled,resampledIntensity,IES_MaxLength)
#read ies files and return vertical angles, horizontal angles, intensities, width, length, radius.
#based on the symmetry, horizontal angles and resampled
def readIESfile(self, fileName):
f=open(fileName, encoding = "ISO-8859-1")#need rb to read \r\n correctly. Otherwise universial newline function ignores carriage return.
startReading = 0
line = f.readline()
allValues = ""
while line:
if( not(line.strip())):
break
else:
#after this line, there are actual useful values
if("TILT=NONE" in line.strip()):
line = f.readline()
startReading = 1
#read all number to one string
if(startReading):
allValues = allValues+line
line = f.readline()
f.close()
#one array with all values
dimentions = re.split('\s+',allValues.strip())
return dimentions
def getIESproperties(self, allValues):
#return
FEET2METER = 0.3048
verticalAngles = []
horizontalAngles = []
width = 0
length = 0
radius = 0
intensityMultiplier = 1
numberVerticalAngle = 0
numberHorizontalAngle = 0
unit = 1 #1 for feet, 2 for meter
#number of vertical angles and horizontal angles measured
numberVerticalAngle = int(allValues[3])
numberHorizontalAngle = int(allValues[4])
#check if shape is rectangle or disk
if(float(allValues[7])<0):
radius = allValues[7]*-1
else:
width = allValues[7]
length = allValues[8]
#convert dimentions to meter if measured in feet
if(float(allValues[6])==1):
radius = radius*FEET2METER
width = width *FEET2METER
length = length * FEET2METER
#the actual vertical angles and horizontal angles in list
verticalAngles = list(map(float, allValues[13:13+numberVerticalAngle]))
horizontalAngles = list(map(float,allValues[13+numberVerticalAngle:13+numberVerticalAngle+numberHorizontalAngle]))
#read intensities and convert it to 2d array
intensities = np.array(allValues[13+numberVerticalAngle+numberHorizontalAngle:len(allValues)])
intensities = intensities.reshape(numberHorizontalAngle,numberVerticalAngle).astype(np.float16)
return verticalAngles,horizontalAngles,intensities,width,length,radius
#ies could have several symmetry:
#(1)only measured in one horizontal angle (0) which need to be repeated to all horizontal angle from 0 to 360
#(2)only measured in horizontal angles (0~90) which need to be mirrored twice to horizontal angle from 0 to 360
#(3)only measured in horizontal angles (0~180) which need to be mirrored to horizontal angle from 0 to 360
#(4)only measured in horizontal angles (0~360) which could be used directly
def mirrorAngles(self, horizontalAngles,intensities):
#make use of symmetry in the file and produce horizontal angles from 0~360
if(horizontalAngles[-1]==0):
horizontalAnglesMirrored = list(np.arange(0,361,DEFAULT_HORIZONTAL_STEP))
else:
horizontalAnglesMirrored = list(np.arange(0,361,horizontalAngles[-1]/(len(horizontalAngles)-1)))
#make use of symmetry in the file and copy intensitys for horizontal angles from 0~360
if(horizontalAngles[-1]==90):
#mirror results [90:180]
a = np.concatenate((intensities, np.flip(intensities, 0)[1:]), axis=0)
intensityMirrored = np.concatenate((a, np.flip(a, 0)[1:]), axis=0)
elif(horizontalAngles[-1]==180):
intensityMirrored = np.concatenate((intensities, np.flip(intensities, 0)[1:]), axis=0)
elif(horizontalAngles[-1]==0):
intensityMirrored = np.array(([intensities[0],]*len(np.arange(0,361,DEFAULT_HORIZONTAL_STEP))))
else:
#print("Symmetry 360")
intensityMirrored = intensities
return horizontalAnglesMirrored, intensityMirrored
def IESCoord2XYZ(self, horizontalAngles,verticalAngles,intensity,maxLength):
maxValue = np.amax(intensity)
if(maxValue>maxLength):
intensity = intensity*(maxLength/maxValue)
for index, horizontalAngle in enumerate(horizontalAngles):
if(index ==0):
#Omniverse and 3ds Max makes the light upside down, horizontal angle rotation direction need to be flipped.
points = np.array(spherical_to_cartesian(intensity[index].tolist(), [math.radians(90-x) for x in verticalAngles], [math.radians(-1*horizontalAngle)]*len(verticalAngles))).transpose()
else:
newPoints = np.array(spherical_to_cartesian(intensity[index], [math.radians(90-x) for x in verticalAngles], [math.radians(-1*horizontalAngle)]*len(verticalAngles))).transpose()
points = np.concatenate((points, newPoints), axis=0)
#Omniverse and 3ds Max makes the light upside down, so flip z.
points[:,2] *= -1
return points
def interpolateIESValues(self, originalHorizontalAngles, originalVerticalAngles, newHorizontalAngles,newVerticalAngles, intensity):
fun = interpolate.interp2d(originalVerticalAngles, originalHorizontalAngles, intensity, kind='linear') # kind could be {'linear', 'cubic', 'quintic'}
interpolatedIntensity = fun(newVerticalAngles,newHorizontalAngles)
return interpolatedIntensity | 7,030 | Python | 47.489655 | 198 | 0.661024 |
XiaomingY/omni-ies-viewer/exts/IESViewer/config/extension.toml | [package]
# Semantic Versionning is used: https://semver.org/
version = "1.0.0"
authors = ["Xiaoming Yang"]
# The title and description fields are primarily for displaying extension info in UI
title = "IES Viewer For Display IES Light Profiles"
description="This extension displays IES profiles for selected light objects."
# Path (relative to the root) or content of readme markdown file for UI.
readme = "docs/README.md"
# URL of the extension source repository.
repository = "https://github.com/XiaomingY/omni-ies-viewer"
# One of categories for UI.
category = "Lighting"
# Keywords for the extension
keywords = ["Lighting", "IES"]
changelog = "docs/CHANGELOG.md"
preview_image = "data/preview.png"
icon = "data/icon.png"
# Use omni.ui to build simple UI
[dependencies]
"omni.ui.scene" = { }
"omni.usd" = { }
"omni.kit.viewport.utility" = { }
# Main python module this extension provides, it will be publicly available as "import AimingTool".
[[python.module]]
name = "IESViewer"
| 993 | TOML | 29.121211 | 99 | 0.732125 |
Ekozmaster/NvidiaOmniverseRTXRemixTools/README.md | # RTX Remix Tools [ekozerski.rtxremixtools]
Focusing on improving RTX Remix modding workflows, this extension is designed to speed up iteration when producing assets and mods by providing useful UI operations inside Omniverse apps like USD Composer/Create or Code.
It provides some options for the "Right click" context menu to setup ideal replacement assets, as well as some converting operations to ensure assets will be compatible with the Remix runtime.
![Alt text](ContextMenu.png)
It is primarily designed to operate on Remix captured scenes, so users can have instant feedbacks on what their mods are gonna look like in the game scenes and iterate faster.
## Available Tools
### Fix Meshes Geometry
<i>(Operation is performed on every mesh of a USD/USDA source file and can\'t be undone)</i>
Interpolation Mode
- RTX Remix runtime only supports meshes with "vertex" interpolation mode, in which "points" "normals" and "uvs" arrays
must have the same length, but DCC tools usually export the mesh using "faceVarying" interpolation mode.
This operation reorganizes the geometry to be compatible with the runtime.
- See: "Interpolation of Geometric Primitive Variables" - https://openusd.org/dev/api/class_usd_geom_primvar.html
- This operation only applies for meshes inside the mods folder, not the captured ones.
UV Maps
- The runtime supports one single UV map per mesh, which should have one of a few known names, so this script finds many variations, picks one and renames to the standard "primvars:st", while also setting the appropriate type as "TextureCoordinate" (TexCoord2fArray / TexCoord2f[]). The other UVmaps are discarded.
Unused Primvars
- displayColor and displayOpacity are now removed from the mesh.
### Setup for Mesh Replacement
Exports the selected mesh in a selected path, already setting up the replacements and references to work in the runtime, so for every change the user only needs to:
- Open the exported mesh in it's DCC of choice, make the changes and export again (with the right settings, triangulating faces, no materials, etc.)
- Back in OV, refresh the reference to see the changes in the captured scene.
- Use the "Fix Meshes Geometry" again to make it Remix-compatible.
- Enjoy.
The original mesh is kept in case the user only wants to add more models. Make sure to delete it if the intention is to completely replace the original mesh.
### Add Model
If the user already has authored USD models, this option allows to select multiple models and add to the mesh_HASH prim.
### Add Material
This option allows to select a material .MDL file (AperturePBR_Opacity.mdl or AperturePBR_Translucent.mdl) to add a material prim to the mesh_HASH prim.
### Original Draw Call Preservation
Allows to set the "custom int preserveOriginalDrawCall" attribute to indicate whether the runtime should be forced to render the original mesh or not. Must be set to 1 when placing custom lights or else the original mesh disappears. PS: Remember to set this to 0 if you want to make a mesh replacement and remove the original mesh.
### Select Source Mesh
Quick way to select the originial source mesh_HASH prim in the scene when you have an instance prim selected.
<br>
## Things to Keep in mind
- In a capture scene, any changes made to the "inst_SOMEHASH_x" prims won't show up in the runtime, so every changes must be done in the "mesh_SOMEHASH" they're referencing. Whenever the user clicks a inst_ prim to perform an action like Fixing geometry or Add Model (Ex: Right clicking in the 3D viewport), this tool will try to find the referenced mesh_SOMEHASH and perform the operations in it instead.
- Having that in mind, always keep an eye in the "Layers" tab to check if you have done any changes to the "instances" path. Try to delete those changes as much as possible.
- The only material types that work in the runtime are described in the AperturePBR_Opacity.MDL and AperturePBR_Translucent.MDL, and every mesh must be triangulated. If you want to add a model you got from somewhere else like an asset store, make sure to convert the assets to work in the runtime.
- When placing lights in the scene, it is necesssary to set an int "preserveOriginalDrawCall" to "1" in order to keep rendering the original mesh. If another layer is setting this flag somewhere and you want to replace/remove the original mesh in your own layer, you will notice that the original mesh can't be removed without setting this flag back to "0". You can do that on your own layer, set it back to "0", but make sure your layer comes on top of the other one that sets it to true.
| 4,604 | Markdown | 79.789472 | 489 | 0.786273 |
Ekozmaster/NvidiaOmniverseRTXRemixTools/exts/ekozerski.rtxremixtools/ekozerski/rtxremixtools/add_model.py | import os
from pathlib import Path
from typing import List
import omni
from omni.client import make_relative_url
from omni.kit.window.file_importer import get_file_importer
from omni.kit.window.file_exporter import get_file_exporter
import omni.usd as usd
from pxr import UsdGeom, Usd, Sdf
from ekozerski.rtxremixtools.utils import find_inst_hash_prim, find_source_mesh_hash_prim
from ekozerski.rtxremixtools.commons import log_info
from ekozerski.rtxremixtools import mesh_utils
class UserCache:
LAST_OPENED_MODEL = None
def open_export_dialog_for_captured_mesh(prim_path, mesh):
def setup_references_in_stage(current_stage, reference_file_location):
_, mesh_hash, __ = Usd.Prim.GetName(mesh.GetParent()).split('_')
xform_prim_path = f'/RootNode/meshes/mesh_{mesh_hash}/Xform_{mesh_hash}_0'
omni.kit.commands.execute('CreatePrim', prim_type='Xform', prim_path=xform_prim_path)
editing_layer = current_stage.GetEditTarget().GetLayer()
relative_file_path = make_relative_url(editing_layer.realPath, reference_file_location)
omni.kit.commands.execute('AddReference',
stage=current_stage,
prim_path=Sdf.Path(xform_prim_path),
reference=Sdf.Reference(relative_file_path)
)
selection = omni.usd.get_context().get_selection()
selection.clear_selected_prim_paths()
source_layer = mesh.GetPrimStack()[-1].layer
source_layer.Reload()
selection.set_selected_prim_paths([xform_prim_path], False)
def file_export_handler(filename: str, dirname: str, extension: str = "", selections: List[str] = []):
stage = Usd.Stage.CreateInMemory()
root_xform = UsdGeom.Xform.Define(stage, '/root').GetPrim()
stage.SetDefaultPrim(root_xform)
new_mesh = UsdGeom.Mesh.Define(stage, f'/root/{prim_path.rsplit("/", 1)[-1]}')
needed_attr_names = ['doubleSided', 'extent', 'faceVertexCounts', 'faceVertexIndices', 'normals', 'points', 'primvars:st']
[
new_mesh.GetPrim().CreateAttribute(attr.GetName(), attr.GetTypeName()).Set(attr.Get())
for attr in mesh.GetAttributes()
if attr.Get() and attr.GetName() in needed_attr_names
]
mesh_utils.convert_mesh_to_vertex_interpolation_mode(new_mesh)
ctx = usd.get_context()
current_stage = ctx.get_stage()
upAxis = UsdGeom.GetStageUpAxis(current_stage)
UsdGeom.SetStageUpAxis(stage, upAxis)
save_location = dirname + filename + extension
stage.Export(save_location)
setup_references_in_stage(current_stage, save_location)
log_info(f"> Exporting {prim_path} in '{save_location}'")
source_layer = mesh.GetPrimStack()[-1].layer
rtx_remix_path_parts = source_layer.realPath.split(os.path.join("rtx-remix"), 1)
if len(rtx_remix_path_parts) > 1:
rtx_remix_path = os.path.join(rtx_remix_path_parts[0], "rtx-remix", "mods", "gameReadyAssets")
else:
rtx_remix_path = source_layer.realPath
rtx_remix_path = os.path.join(rtx_remix_path, "CustomMesh")
file_exporter = get_file_exporter()
file_exporter.show_window(
title=f'Export "{prim_path}"',
export_button_label="Save",
export_handler=file_export_handler,
filename_url=rtx_remix_path,
)
def copy_original_mesh(prim_path, mesh, output_path):
stage = Usd.Stage.CreateInMemory()
root_xform = UsdGeom.Xform.Define(stage, '/root').GetPrim()
stage.SetDefaultPrim(root_xform)
new_mesh = UsdGeom.Mesh.Define(stage, f'/root/{prim_path.rsplit("/", 1)[-1]}')
needed_attr_names = ['doubleSided', 'extent', 'faceVertexCounts', 'faceVertexIndices', 'normals', 'points', 'primvars:st']
[
new_mesh.GetPrim().CreateAttribute(attr.GetName(), attr.GetTypeName()).Set(attr.Get())
for attr in mesh.GetAttributes()
if attr.Get() and attr.GetName() in needed_attr_names
]
mesh_utils.convert_mesh_to_vertex_interpolation_mode(new_mesh)
ctx = usd.get_context()
current_stage = ctx.get_stage()
upAxis = UsdGeom.GetStageUpAxis(current_stage)
UsdGeom.SetStageUpAxis(stage, upAxis)
stage.Export(output_path)
def setup_references_in_stage(mesh, current_stage, reference_file_location):
inst_hash_prim = find_inst_hash_prim(mesh)
_, mesh_hash, __ = Usd.Prim.GetName(inst_hash_prim).split('_')
export_prim_name = os.path.basename(reference_file_location).split('.', 1)[0]
xform_prim_path = f'/RootNode/meshes/mesh_{mesh_hash}/{export_prim_name}'
omni.kit.commands.execute('CreatePrim', prim_type='Xform', prim_path=xform_prim_path)
editing_layer = current_stage.GetEditTarget().GetLayer()
relative_file_path = make_relative_url(editing_layer.realPath, reference_file_location)
omni.kit.commands.execute('AddReference',
stage=current_stage,
prim_path=Sdf.Path(xform_prim_path),
reference=Sdf.Reference(relative_file_path)
)
source_layer = mesh.GetPrimStack()[-1].layer
source_layer.Reload()
selection = omni.usd.get_context().get_selection()
selection.clear_selected_prim_paths()
selection.set_selected_prim_paths([xform_prim_path], False)
def open_export_dialog_for_captured_mesh(prim_path, mesh):
def export_mesh(filename: str, dirname: str, extension: str = "", selections: List[str] = []):
file_location = dirname + filename + extension
copy_original_mesh(prim_path, mesh, file_location)
ctx = usd.get_context()
current_stage = ctx.get_stage()
setup_references_in_stage(mesh, current_stage, file_location)
source_layer = mesh.GetPrimStack()[-1].layer
rtx_remix_path_parts = source_layer.realPath.split(os.path.join("rtx-remix"), 1)
rtx_remix_path = source_layer.realPath
if len(rtx_remix_path_parts) > 1:
rtx_remix_path = os.path.join(rtx_remix_path_parts[0], "rtx-remix", "mods", "gameReadyAssets")
rtx_remix_path = os.path.join(rtx_remix_path, "CustomMesh")
file_exporter = get_file_exporter()
file_exporter.show_window(
title=f'Export "{prim_path}"',
export_button_label="Save",
export_handler=export_mesh,
filename_url=rtx_remix_path,
)
def open_import_dialog_for_add_models(prim_path):
def import_mesh(filename: str, dirname: str, selections: List[str] = []):
# TODO: Loop through all selections and add them all to the mesh_HASH with their respective xforms correctly named without collisions.
mesh_path = mesh.GetPath().pathString
new_selection = list()
counter = 0
for reference_file in selections:
xform_name = Path(reference_file).stem
new_mesh_path = mesh_path + f'/{xform_name}_{counter}'
while current_stage.GetPrimAtPath(new_mesh_path).IsValid():
counter += 1
new_mesh_path = mesh_path + f'/{xform_name}_{counter}'
omni.kit.commands.execute('CreatePrim', prim_type='Xform', prim_path=new_mesh_path)
editing_layer = current_stage.GetEditTarget().GetLayer()
relative_file_path = make_relative_url(editing_layer.realPath, reference_file)
omni.kit.commands.execute('AddReference',
stage=current_stage,
prim_path=Sdf.Path(new_mesh_path),
reference=Sdf.Reference(relative_file_path)
)
new_selection.append(new_mesh_path)
UserCache.LAST_OPENED_MODEL = os.path.dirname(reference_file)
counter += 1
source_layer = mesh.GetPrimStack()[-1].layer
source_layer.Reload()
selection = omni.usd.get_context().get_selection()
selection.clear_selected_prim_paths()
selection.set_selected_prim_paths(new_selection, False)
ctx = usd.get_context()
current_stage = ctx.get_stage()
inst_prim = current_stage.GetPrimAtPath(prim_path)
mesh = find_source_mesh_hash_prim(current_stage, inst_prim)
source_layer = mesh.GetPrimStack()[-1].layer
filename_url = UserCache.LAST_OPENED_MODEL if UserCache.LAST_OPENED_MODEL is not None else source_layer.realPath
file_importer = get_file_importer()
file_importer.show_window(
title=f'Import Models',
import_button_label="Import",
import_handler=import_mesh,
filename_url=filename_url,
)
def open_add_model_dialog():
for path in usd.get_context().get_selection().get_selected_prim_paths():
open_import_dialog_for_add_models(path)
def open_mesh_replacement_setup_dialog():
for path, mesh in mesh_utils.get_selected_mesh_prims().items():
if mesh_utils.is_a_captured_mesh(mesh):
open_export_dialog_for_captured_mesh(path, mesh)
| 8,826 | Python | 41.23445 | 142 | 0.660775 |
Ekozmaster/NvidiaOmniverseRTXRemixTools/exts/ekozerski.rtxremixtools/ekozerski/rtxremixtools/commons.py | import carb
def log_info(msg: str):
carb.log_info(f"[RTX Remix Tool] {msg}")
def log_warn(msg: str):
carb.log_warn(f"[RTX Remix Tool] {msg}")
def log_error(msg: str):
carb.log_error(f"[RTX Remix Tool] {msg}")
| 227 | Python | 15.285713 | 45 | 0.621145 |
Ekozmaster/NvidiaOmniverseRTXRemixTools/exts/ekozerski.rtxremixtools/ekozerski/rtxremixtools/extension.py | import omni.ext
import omni.ui as ui
from omni.kit import context_menu
from omni.kit.hotkeys.core import get_hotkey_registry
from omni.kit.actions.core import get_action_registry
from . import commons
from .rtx_context_menu import build_rtx_remix_menu
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class RtxRemixTools(omni.ext.IExt):
def on_startup(self, ext_id):
self.ext_id = ext_id
commons.log_info(f"Starting Up")
menu = {"name": "RTX Remix", "populate_fn": build_rtx_remix_menu}
self._context_menu_subscription = context_menu.add_menu(menu, "MENU", "")
self.hotkey_registry = get_hotkey_registry()
register_actions(self.ext_id)
self.select_source_mesh_hotkey = self.hotkey_registry.register_hotkey(
self.ext_id,
"SHIFT + F",
self.ext_id,
"select_source_mesh",
filter=None,
)
def on_shutdown(self):
commons.log_info(f"Shutting Down")
# remove event
self._context_menu_subscription.release()
self.hotkey_registry.deregister_hotkey(
self.select_source_mesh_hotkey,
)
deregister_actions(self.ext_id)
def register_actions(extension_id):
from . import select_source_mesh
action_registry = get_action_registry()
actions_tag = "RTX Remix Tools Actions"
action_registry.register_action(
extension_id,
"select_source_mesh",
select_source_mesh.select_source_meshes,
display_name="Select Source Mesh",
description="Selects the corresponding mesh_HASH the prim is related to.",
tag=actions_tag,
)
def deregister_actions(extension_id):
action_registry = get_action_registry()
action_registry.deregister_all_actions_for_extension(extension_id)
| 2,043 | Python | 31.444444 | 118 | 0.664709 |
Ekozmaster/NvidiaOmniverseRTXRemixTools/exts/ekozerski.rtxremixtools/ekozerski/rtxremixtools/mesh_utils.py | from collections import OrderedDict
import os
from pxr import UsdGeom, Usd, Sdf
import omni.usd as usd
from ekozerski.rtxremixtools.commons import log_error
def get_selected_mesh_prims():
ctx = usd.get_context()
current_stage = ctx.get_stage()
selection = ctx.get_selection().get_selected_prim_paths()
selected_prims = {
path: current_stage.GetPrimAtPath(path)
for path in selection
}
meshes = {
prim_path: prim
for prim_path, prim in selected_prims.items()
if UsdGeom.Mesh(prim)
}
return meshes
def convert_mesh_to_vertex_interpolation_mode(mesh):
"""
This method attemps to convert Remix meshes' interpolation mode from constant or faceVarying to vertex.
If there is any faceVarying attribute, it means the data arrays (points, uvs, normals...) will have different
lengths, so this script will copy data around using the faceVertexIndices array to ensure they all end up with the
same length.
"""
# TODO: Study interpolation modes in depth to implement a decent conversion script.
prim = mesh.GetPrim()
primvar_api = UsdGeom.PrimvarsAPI(prim)
primvars = {var for var in primvar_api.GetPrimvars()}
face_varying_primvars = [v for v in primvars if v.GetInterpolation() == UsdGeom.Tokens.faceVarying]
if face_varying_primvars or mesh.GetNormalsInterpolation() == UsdGeom.Tokens.faceVarying:
non_face_varying_primvars = list(primvars.difference(face_varying_primvars))
non_face_varying_primvars = [var for var in non_face_varying_primvars if var.GetInterpolation() != 'uniform']
indices = prim.GetAttribute("faceVertexIndices")
# Settings points separately since it doesn't have a "SetInterpolation" like primvars.
points = prim.GetAttribute("points")
points_arr = points.Get()
new_arr = [points_arr[i] for i in indices.Get()]
points.Set(new_arr)
for var in non_face_varying_primvars:
original_arr = var.Get()
if original_arr:
new_arr = [original_arr[i] for i in indices.Get()]
var.Set(new_arr)
indices.Set([i for i in range(len(indices.Get()))])
[var.SetInterpolation(UsdGeom.Tokens.vertex) for var in primvars]
mesh.SetNormalsInterpolation(UsdGeom.Tokens.vertex)
def convert_uv_primvars_to_st(mesh):
# https://github.com/NVIDIAGameWorks/dxvk-remix/blob/ebb0ecfd638d6a32ab5f10708b5b07bc763cf79b/src/dxvk/rtx_render/rtx_mod_usd.cpp#L696
# https://github.com/Kim2091/RTXRemixTools/blob/8ae25224ef8d1d284f3e208f671b2ce6a35b82af/RemixMeshConvert/For%20USD%20Composer/RemixMeshConvert_OV.py#L4
known_uv_names = [
'primvars:st',
'primvars:uv',
'primvars:st0',
'primvars:st1',
'primvars:st2',
'primvars:UVMap',
'primvars:UVChannel_1',
'primvars:map1',
]
# Preserving the order of found primvars to use the first one, in case a primvars:st can't be found.
primvar_api = UsdGeom.PrimvarsAPI(mesh)
uv_primvars = OrderedDict(
(primvar.GetName(), primvar)
for primvar in primvar_api.GetPrimvars()
if primvar.GetTypeName().role == 'TextureCoordinate'
or primvar.GetName() in known_uv_names
)
if not uv_primvars:
return
# Picking only one UV and blowing up everything else as the runtime only reads the first anyway.
considered_uv = uv_primvars.get('primvars:st') or next(iter(uv_primvars.values()))
uv_data = considered_uv.Get()
[primvar_api.RemovePrimvar(uv_name) for uv_name in uv_primvars.keys()]
# Recreating the primvar with appropriate name, type and role
new_uv_primvar = primvar_api.CreatePrimvar('primvars:st', Sdf.ValueTypeNames.TexCoord2fArray, UsdGeom.Tokens.vertex)
new_uv_primvar.Set(uv_data)
def remove_unused_primvars(mesh):
unused_primvar_names = [
'primvars:displayColor',
'primvars:displayOpacity',
]
primvar_api = UsdGeom.PrimvarsAPI(mesh)
[primvar_api.RemovePrimvar(uv_name) for uv_name in unused_primvar_names]
def fix_meshes_in_file(usd_file_path):
stage = Usd.Stage.Open(usd_file_path)
mesh_prims = [prim for prim in stage.TraverseAll() if UsdGeom.Mesh(prim)]
for prim in mesh_prims:
faceVertices = prim.GetAttribute("faceVertexCounts").Get()
if not faceVertices or not all({x == 3 for x in faceVertices}):
log_error(f"Mesh {prim.GetPath()} in '{usd_file_path}' hasn't been triangulated and this tools doesn't do that for you yet :(")
continue
convert_mesh_to_vertex_interpolation_mode(UsdGeom.Mesh(prim))
convert_uv_primvars_to_st(UsdGeom.Mesh(prim))
remove_unused_primvars(UsdGeom.Mesh(prim))
stage.Save()
def is_a_captured_mesh(mesh):
"""
Returns True if the Mesh's defining USD file is located in the captures folder.
"""
return os.path.normpath("captures/meshes") in os.path.normpath(mesh.GetPrimStack()[-1].layer.realPath)
def fix_meshes_geometry():
meshes = {k: v for k,v in get_selected_mesh_prims().items() if not is_a_captured_mesh(v)}
for path, mesh in meshes.items():
source_layer = mesh.GetPrimStack()[-1].layer
fix_meshes_in_file(source_layer.realPath)
source_layer.Reload()
| 5,330 | Python | 39.082706 | 156 | 0.67955 |
Ekozmaster/NvidiaOmniverseRTXRemixTools/exts/ekozerski.rtxremixtools/ekozerski/rtxremixtools/add_material.py | import os
from typing import List
from omni import usd, kit
from omni.kit.window.file_importer import get_file_importer
from omni.client import make_relative_url
from ekozerski.rtxremixtools.utils import find_source_mesh_hash_prim
def open_add_material_dialog_for_prim(mesh_hash, ctx, current_stage):
def create_material_from_mdl_file(filename: str, dirname: str, selections: List[str] = []):
if not filename.endswith('mdl'):
raise ValueError(f"The selected file '{filename}' doesn't have a mdl extension.")
mesh_hash_path = mesh_hash.GetPath().pathString
counter = 0
material_name = os.path.basename(filename).replace('.mdl', '')
new_material_path = mesh_hash_path + f'/{material_name}_{counter}'
while current_stage.GetPrimAtPath(new_material_path).IsValid():
counter += 1
new_material_path = mesh_hash_path + f'/{material_name}_{counter}'
# TODO: Get material name by inspecting the MDL file rather than guessing from it's name, so users can
# rename it at will.
mtl_name = 'AperturePBR_Opacity' if 'Opacity' in filename else 'AperturePBR_Translucent'
editing_layer = current_stage.GetEditTarget().GetLayer()
relative_file_path = make_relative_url(editing_layer.realPath, os.path.join(dirname, filename))
success, _ = kit.commands.execute('CreateMdlMaterialPrimCommand',
mtl_url=relative_file_path,
mtl_name=mtl_name,
mtl_path=new_material_path,
select_new_prim=True,
)
def filter_handler(filename: str, _, extension_option):
if extension_option == '.mdl':
return filename.lower().endswith('.mdl')
return True
file_importer = get_file_importer()
file_importer.show_window(
title=f'Select MDL File',
import_button_label="Select",
import_handler=create_material_from_mdl_file,
file_extension_types=[(".mdl", "Opacity or Translucent MDL file")],
file_filter_handler=filter_handler,
)
def open_add_material_dialog():
ctx = usd.get_context()
current_stage = ctx.get_stage()
selection = ctx.get_selection().get_selected_prim_paths()
selected_prims = {
path: current_stage.GetPrimAtPath(path)
for path in selection
}
source_meshes = [find_source_mesh_hash_prim(current_stage, prim) for prim in selected_prims.values()]
source_meshes = set([mesh for mesh in source_meshes if mesh is not None])
for mesh_hash in list(source_meshes):
open_add_material_dialog_for_prim(mesh_hash, ctx, current_stage)
| 2,650 | Python | 41.079364 | 112 | 0.659245 |
Ekozmaster/NvidiaOmniverseRTXRemixTools/exts/ekozerski.rtxremixtools/ekozerski/rtxremixtools/utils.py | from pxr import Usd
from omni import usd
def find_source_mesh_hash_prim(current_stage, prim):
if not current_stage.GetPrimAtPath('/RootNode/meshes'):
return prim
search_prim = prim
valid_paths = ['/RootNode/meshes', '/RootNode/instances']
while search_prim.GetParent().IsValid() and search_prim.GetParent().GetPath().pathString not in valid_paths:
search_prim = search_prim.GetParent()
if not search_prim:
return None
if 'mesh_' in Usd.Prim.GetName(search_prim):
return search_prim
_, mesh_hash, __ = Usd.Prim.GetName(search_prim).split('_')
mesh_prim_path = f'/RootNode/meshes/mesh_{mesh_hash}'
return current_stage.GetPrimAtPath(mesh_prim_path)
def find_inst_hash_prim(instance_mesh):
search_prim = instance_mesh
root_path = '/RootNode/instances'
while search_prim.GetParent().IsValid() and search_prim.GetParent().GetPath().pathString != root_path:
search_prim = search_prim.GetParent()
if not search_prim:
return None
return search_prim
| 1,075 | Python | 29.742856 | 112 | 0.665116 |
Ekozmaster/NvidiaOmniverseRTXRemixTools/exts/ekozerski.rtxremixtools/ekozerski/rtxremixtools/preserve_draw_calls.py | from omni import usd, kit
from pxr import Sdf
from ekozerski.rtxremixtools.utils import find_source_mesh_hash_prim
def set_preserve_original_draw_call(enabled: bool = False):
ctx = usd.get_context()
current_stage = ctx.get_stage()
selection = ctx.get_selection().get_selected_prim_paths()
selected_prims = {
path: current_stage.GetPrimAtPath(path)
for path in selection
}
source_meshes = [find_source_mesh_hash_prim(current_stage, prim) for prim in selected_prims.values()]
source_meshes = set([mesh for mesh in source_meshes if mesh is not None])
for mesh_prim in source_meshes:
kit.commands.execute(
'CreateUsdAttributeCommand',
prim=mesh_prim,
attr_name='preserveOriginalDrawCall',
attr_type=Sdf.ValueTypeNames.Int,
attr_value=1 if enabled else 0
)
| 880 | Python | 32.884614 | 105 | 0.664773 |
Ekozmaster/NvidiaOmniverseRTXRemixTools/exts/ekozerski.rtxremixtools/ekozerski/rtxremixtools/rtx_context_menu.py | from omni.kit.ui import get_custom_glyph_code
from omni import usd
import omni.ui as ui
from . import mesh_utils
from . import add_model
from . import add_material
from . import preserve_draw_calls
from . import select_source_mesh
def _build_fix_mesh_geometry_menu_item():
tooltip = ''.join([
'Interpolation Mode\n',
'OBS: Operation Can\'t be undone\n',
' RTX Remix runtime only supports "vertex" interpolation mode, in which "points", "normals" and "uvs" arrays ',
'must have the same length, but DCC tools usually export the mesh using "faceVarying" interpolation mode.',
'This operation reorganizes the geometry to be compatible with the runtime. See:\n',
' "Interpolation of Geometric Primitive Variables" - https://openusd.org/dev/api/class_usd_geom_primvar.html',
'\n\nThis operation only applies for meshes inside the mods folder, not the captured ones.',
])
ui.MenuItem(
"Fix Meshes Geometry",
triggered_fn=mesh_utils.fix_meshes_geometry,
enabled=any([
not mesh_utils.is_a_captured_mesh(mesh)
for mesh in mesh_utils.get_selected_mesh_prims().values()
]),
tooltip=tooltip
)
def _build_setup_for_mesh_replacements_menu_item():
tooltip = ''.join([
"Export the original mesh to a selected location and setup the references to work within the runtime so you",
" can focus on remodeling the mesh and export back at the same location."
])
ui.MenuItem(
"Setup for Mesh Replacement",
triggered_fn=add_model.open_mesh_replacement_setup_dialog,
enabled=any([
mesh_utils.is_a_captured_mesh(mesh)
for mesh in mesh_utils.get_selected_mesh_prims().values()
]),
tooltip=tooltip
)
def _build_add_model_menu_item():
tooltip = ''.join([
"Add external authored meshes to the prim, setting up properly to work within the runtime."
])
ui.MenuItem(
"Add Model",
triggered_fn=add_model.open_add_model_dialog,
tooltip=tooltip,
enabled=bool(usd.get_context().get_selection().get_selected_prim_paths())
)
def _build_add_material_menu_item():
tooltip = ''.join([
"Add a material defined from an external MDL file to the selected prim."
])
ui.MenuItem(
"Add Material",
triggered_fn=add_material.open_add_material_dialog,
tooltip=tooltip,
enabled=bool(usd.get_context().get_selection().get_selected_prim_paths())
)
def _build_preserve_original_draw_call_menu_item():
tooltip = ''.join([
"Add a 'custom int preserveOriginalDrawCall' attribute set to '1' to the mesh_HASH prim. Used to indicate to",
" the runtime whether it should keep rendering the original mesh or not. Should be set when adding custom ",
" lights without removing the original mesh from rendering."
])
ui.MenuItem(
"Preserve",
triggered_fn=lambda: preserve_draw_calls.set_preserve_original_draw_call(True),
tooltip=tooltip,
enabled=bool(usd.get_context().get_selection().get_selected_prim_paths())
)
def _build_dont_preserve_original_draw_call_menu_item():
tooltip = ''.join([
"Add a 'custom int preserveOriginalDrawCall' attribute set to '0' to the mesh_HASH prim. Used to indicate to",
" the runtime whether it should keep rendering the original mesh or not. Should be set when adding custom ",
" lights without removing the original mesh from rendering."
])
ui.MenuItem(
"Don't Preserve",
triggered_fn=lambda: preserve_draw_calls.set_preserve_original_draw_call(False),
tooltip=tooltip,
enabled=bool(usd.get_context().get_selection().get_selected_prim_paths())
)
def _build_select_source_meshes_menu():
tooltip = ''.join([
"Selects the corresponding mesh_HASH the prim is related to."
])
ui.MenuItem(
"Select Source Mesh (Shift + F)",
triggered_fn=select_source_mesh.select_source_meshes,
tooltip=tooltip,
enabled=bool(usd.get_context().get_selection().get_selected_prim_paths())
)
def build_rtx_remix_menu(event):
icon = get_custom_glyph_code("${glyphs}/menu_create.svg")
with ui.Menu(f' {icon} RTX Remix'):
_build_fix_mesh_geometry_menu_item()
_build_setup_for_mesh_replacements_menu_item()
_build_add_model_menu_item()
_build_add_material_menu_item()
with ui.Menu(f'Original Draw Call Preservation'):
_build_preserve_original_draw_call_menu_item()
_build_dont_preserve_original_draw_call_menu_item()
_build_select_source_meshes_menu()
| 4,736 | Python | 37.201613 | 122 | 0.652872 |
Ekozmaster/NvidiaOmniverseRTXRemixTools/exts/ekozerski.rtxremixtools/ekozerski/rtxremixtools/select_source_mesh.py | from omni import usd
from ekozerski.rtxremixtools.utils import find_source_mesh_hash_prim
def select_source_meshes():
ctx = usd.get_context()
current_stage = ctx.get_stage()
selection = ctx.get_selection().get_selected_prim_paths()
selected_prims = {
path: current_stage.GetPrimAtPath(path)
for path in selection
}
source_meshes = [find_source_mesh_hash_prim(current_stage, prim) for prim in selected_prims.values()]
source_meshes = set([mesh for mesh in source_meshes if mesh is not None])
paths = [mesh.GetPath().pathString for mesh in source_meshes]
selection = usd.get_context().get_selection()
selection.clear_selected_prim_paths()
selection.set_selected_prim_paths(paths, False)
| 749 | Python | 34.714284 | 105 | 0.70494 |
Ekozmaster/NvidiaOmniverseRTXRemixTools/exts/ekozerski.rtxremixtools/config/extension.toml | [core]
reloadable = true
[package]
# Semantic Versioning is used: https://semver.org/
version = "0.0.2"
# Lists people or organizations that are considered the "authors" of the package.
authors = ["Emanuel Kozerski"]
# The title and description fields are primarily for displaying extension info in UI
title = "RTX Remix Tools"
description="Simple toolkit for creating remixing assets compatible with RTX Remix runtime"
# Path (relative to the root) or content of readme markdown file for UI.
readme = "docs/README.md"
# URL of the extension source repository.
repository = "https://github.com/Ekozmaster/Nvidia-Omniverse-RTX-Remix-Tools"
# One of categories for UI.
category = "Other"
# Keywords for the extension
keywords = ["Tool", "Toolkit", "Tools", "RTX", "Remix"]
# Location of change log file in target (final) folder of extension, relative to the root.
# More info on writing changelog: https://keepachangelog.com/en/1.0.0/
changelog="docs/CHANGELOG.md"
# Preview image and icon. Folder named "data" automatically goes in git lfs (see .gitattributes file).
# Preview image is shown in "Overview" of Extensions window. Screenshot of an extension might be a good preview image.
preview_image = "data/preview.png"
# Icon is shown in Extensions window, it is recommended to be square, of size 256x256.
icon = "data/icon.png"
# Use omni.ui to build simple UI
[dependencies]
"omni.kit.uiapp" = {}
# Main python module this extension provides, it will be publicly available as "import ekozerski.rtxremixtools".
[[python.module]]
name = "ekozerski.rtxremixtools"
[[test]]
# Extra dependencies only to be used during test run
dependencies = [
"omni.kit.ui_test" # UI testing extension
]
| 1,709 | TOML | 30.090909 | 118 | 0.747221 |
Ekozmaster/NvidiaOmniverseRTXRemixTools/exts/ekozerski.rtxremixtools/docs/CHANGELOG.md | # Changelog
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
## [0.0.3] - 2023-12-22
- "Add Model", "Add Material" and "Fix Mesh Geometry" also works when not in a capture scene now.
- Fixed somes errors when using "Fix Mesh Geometry" option in some meshes.
- Added "Shift + F" hotkey to "Select Source Mesh".
- Fixed error when using "Setup for Mesh Replacement" on captures which nests original game meshes inside a "ref" Xform.
- Added convertion of many "primvar:*" name variations for UV-related primvars to "primvars:st" while discarding extra UV maps.
- Removing unused primvars "displayColor" and "displayOpacity".
- Xforms from added models and materials now are named according to the imported file rather than Xform_HASH_x
## [0.0.2] - 2023-08-28
- Fixing relative paths converted to absolute on the "Fix Meshes Geometry" function.
- Picking best UV map available between all primvars and discarding everything else in the "Fix Meshes Geometry"
- Removing unused primvars when using the "Fix Meshes Geometry".
- Few more bugfixes.
## [0.0.1] - 2023-08-25
- Initial version
- Added "Fix Meshes Geometry" option converting interpolation mode to "vertex".
- Added "Setup for Mesh Replacement" option to export the original mesh for remodeling by external DCC tools.
- Added "Add Model" option to add external authored .USD models to the mesh_HASH prim.
- Added "Add Material" option to add MDL materials to the mesh_HASH prim.
- Added "Original Draw Call Preservation" submenu to set.
- Added "Select Source Mesh" option to quickly select the mesh_HASH prim.
| 1,603 | Markdown | 56.285712 | 127 | 0.758578 |
rcervellione-nv/omni.rhinocompute/CONTRIBUTING.md |
## Contribution Rules
#### Issue Tracking
* All enhancement, bugfix, or change requests must begin with the creation of a [TensorRT Issue Request](https://github.com/nvidia/TensorRT/issues).
* The issue request must be reviewed by TensorRT engineers and approved prior to code review.
#### Coding Guidelines
- All source code contributions must strictly adhere to the [TensorRT Coding Guidelines](CODING-GUIDELINES.md).
- In addition, please follow the existing conventions in the relevant file, submodule, module, and project when you add new code or when you extend/fix existing functionality.
- To maintain consistency in code formatting and style, you should also run `clang-format` on the modified sources with the provided configuration file. This applies TensorRT code formatting rules to:
- class, function/method, and variable/field naming
- comment style
- indentation
- line length
- Format git changes:
```bash
# Commit ID is optional - if unspecified, run format on staged changes.
git-clang-format --style file [commit ID/reference]
```
- Format individual source files:
```bash
# -style=file : Obtain the formatting rules from .clang-format
# -i : In-place modification of the processed file
clang-format -style=file -i -fallback-style=none <file(s) to process>
```
- Format entire codebase (for project maintainers only):
```bash
find samples plugin -iname *.h -o -iname *.c -o -iname *.cpp -o -iname *.hpp \
| xargs clang-format -style=file -i -fallback-style=none
```
- Avoid introducing unnecessary complexity into existing code so that maintainability and readability are preserved.
- Try to keep pull requests (PRs) as concise as possible:
- Avoid committing commented-out code.
- Wherever possible, each PR should address a single concern. If there are several otherwise-unrelated things that should be fixed to reach a desired endpoint, our recommendation is to open several PRs and indicate the dependencies in the description. The more complex the changes are in a single PR, the more time it will take to review those changes.
- Write commit titles using imperative mood and [these rules](https://chris.beams.io/posts/git-commit/), and reference the Issue number corresponding to the PR. Following is the recommended format for commit texts:
```
#<Issue Number> - <Commit Title>
<Commit Body>
```
- Ensure that the build log is clean, meaning no warnings or errors should be present.
- Ensure that all `sample_*` tests pass prior to submitting your code.
- All OSS components must contain accompanying documentation (READMEs) describing the functionality, dependencies, and known issues.
- See `README.md` for existing samples and plugins for reference.
- All OSS components must have an accompanying test.
- If introducing a new component, such as a plugin, provide a test sample to verify the functionality.
- To add or disable functionality:
- Add a CMake option with a default value that matches the existing behavior.
- Where entire files can be included/excluded based on the value of this option, selectively include/exclude the relevant files from compilation by modifying `CMakeLists.txt` rather than using `#if` guards around the entire body of each file.
- Where the functionality involves minor changes to existing files, use `#if` guards.
- Make sure that you can contribute your work to open source (no license and/or patent conflict is introduced by your code). You will need to [`sign`](#signing-your-work) your commit.
- Thanks in advance for your patience as we review your contributions; we do appreciate them!
#### Pull Requests
Developer workflow for code contributions is as follows:
1. Developers must first [fork](https://help.github.com/en/articles/fork-a-repo) the [upstream](https://github.com/nvidia/TensorRT) TensorRT OSS repository.
2. Git clone the forked repository and push changes to the personal fork.
```bash
git clone https://github.com/YOUR_USERNAME/YOUR_FORK.git TensorRT
# Checkout the targeted branch and commit changes
# Push the commits to a branch on the fork (remote).
git push -u origin <local-branch>:<remote-branch>
```
3. Once the code changes are staged on the fork and ready for review, a [Pull Request](https://help.github.com/en/articles/about-pull-requests) (PR) can be [requested](https://help.github.com/en/articles/creating-a-pull-request) to merge the changes from a branch of the fork into a selected branch of upstream.
* Exercise caution when selecting the source and target branches for the PR.
Note that versioned releases of TensorRT OSS are posted to `release/` branches of the upstream repo.
* Creation of a PR creation kicks off the code review process.
* Atleast one TensorRT engineer will be assigned for the review.
* While under review, mark your PRs as work-in-progress by prefixing the PR title with [WIP].
4. Since there is no CI/CD process in place yet, the PR will be accepted and the corresponding issue closed only after adequate testing has been completed, manually, by the developer and/or TensorRT engineer reviewing the code.
#### Signing Your Work
* We require that all contributors "sign-off" on their commits. This certifies that the contribution is your original work, or you have rights to submit it under the same license, or a compatible license.
* Any contribution which contains commits that are not Signed-Off will not be accepted.
* To sign off on a commit you simply use the `--signoff` (or `-s`) option when committing your changes:
```bash
$ git commit -s -m "Add cool feature."
```
This will append the following to your commit message:
```
Signed-off-by: Your Name <[email protected]>
```
* Full text of the DCO:
```
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
1 Letterman Drive
Suite D4700
San Francisco, CA, 94129
Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed.
```
```
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I have the right to submit it under the open source license indicated in the file; or
(b) The contribution is based upon previous work that, to the best of my knowledge, is covered under an appropriate open source license and I have the right under that license to submit that work with modifications, whether created in whole or in part by me, under the same open source license (unless I am permitted to submit under a different license), as indicated in the file; or
(c) The contribution was provided directly to me by some other person who certified (a), (b) or (c) and I have not modified it.
(d) I understand and agree that this project and the contribution are public and that a record of the contribution (including all personal information I submit with it, including my sign-off) is maintained indefinitely and may be redistributed consistent with this project or the open source license(s) involved.
``` | 7,222 | Markdown | 50.22695 | 387 | 0.7545 |
rcervellione-nv/omni.rhinocompute/README.md | # About
This is an extension designed to run in a Nvidia Omniverse application such as Create or Machinima. The extension creates a link to a Rhino.Compute Server [https://developer.rhino3d.com/guides/compute/] allowing you to run Rhino commands such as quad remesh or Grasshopper files.
This is designed to be a sample to extend. there are examples for using some basic rhino command like volume and quad remesh as well as running a Grasshopper script. Use this as a starting point to integrate your grasshopper scripts and functions directly into Omniverse and create the necessary UI elements.
![Rhino Compute Image 01](exts/cerver.util.rhinocompute/data/CreateAndCompute.png "Rhino Compute and Create")
# Using It
- "app" - It is a folder link to the location of your *Omniverse Kit* based app.
- "exts" - is the folder where you add to extension search path. (Extension Manager -> Gear Icon -> Extension Search Path).
Open this folder using Visual Studio Code. It will suggest you install a few extensions that will make python experience better.
Look for "cerver.util.rhinocompute" extension in extension manager inside Omniverse Create and enable it. Try applying changes to any python files, it will hot-reload and you can observe results immediately.
The first time you enable it will take some time to load. this is because all of the required packages from rhino and rhino compute will be installed into your Omniverse python library via a automatic pip install.
# 3rd party Libraries
This project references 3rd party libraries with the following licensing
Rhino.compute
https://github.com/mcneel/compute.rhino3d/blob/master/LICENSE
Rhino3dm
https://github.com/mcneel/rhino3dm/blob/main/LICENSE
Plotly
https://github.com/plotly/plotly.py/blob/master/LICENSE.txt
| 1,794 | Markdown | 53.393938 | 309 | 0.795987 |
rcervellione-nv/omni.rhinocompute/exts/cerver.util.rhinocompute/cerver/util/rhinocompute/extension.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import omni.ext
import omni.ui as ui
import omni.usd
from .RhinoComputeFunctions import RhinoFunctions, GrasshopperFunctions
from .RhinoComputUtil import SaveSelectedAs3dm
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class MyExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def __init__(self):
self.computeUrl="http://localhost:6500/"
self.progressbarprog = 0
self.progbarwindow = None
self.excludeLastGroupAsLayer = False
def on_startup(self, ext_id):
#print("[omni.RhinoCompute] MyExtension startup")
def serverAddrChanged(addr):
self.computeUrl = addr
self._window = ui.Window("Rhino Compute Functions", width=300, height=400)
with self._window.frame:
with ui.VStack():
ui.Label("Compute Server Address")
serverAddrUi = ui.StringField(height = 30)
serverAddrUi.model.set_value(self.computeUrl)
serverAddrUi.model.add_value_changed_fn(lambda m:serverAddrChanged(m.get_value_as_string()))
with ui.CollapsableFrame("Util Functions", height = 0):
with ui.VStack():
ui.Button("save sel as 3dm", clicked_fn=lambda: SaveSelectedAs3dm(self,"S:/test.3dm"), height=40)
ui.Button("save all as 3dm", clicked_fn=lambda: RhinoFunctions.SaveAllAs3DM_UI(self), height=40)
with ui.CollapsableFrame("Mesh Functions", height = 0):
with ui.VStack():
ui.Button("Volume", clicked_fn=lambda: RhinoFunctions.MeshVolume(self), height=40)
ui.Button("Mesh Bool Union", clicked_fn=lambda: RhinoFunctions.MeshBoolUnion(self), height=40)
ui.Button("Quad Remesh", clicked_fn=lambda: RhinoFunctions.MeshQuadRemesh(self), height=40)
ui.Button("Mesh Offset", clicked_fn=lambda: RhinoFunctions.MeshOffset(self), height=40)
with ui.CollapsableFrame("Grasshopper Functions", height = 0):
with ui.VStack():
ui.Button("Random Diamonds Script", clicked_fn=lambda: GrasshopperFunctions.randomDiamonds_UI(self), height=40)
def on_shutdown(self):
print("[omni.RhinoCompute] MyExtension shutdown")
| 3,121 | Python | 51.915253 | 135 | 0.664851 |
rcervellione-nv/omni.rhinocompute/exts/cerver.util.rhinocompute/cerver/util/rhinocompute/RhinoComputeFunctions.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import string
import omni.ext
import omni.ui as ui
from pxr import Usd, UsdGeom
import omni.usd
import carb.events
import omni.kit.app
import os
import json
import time
omni.kit.pipapi.install("rhino3dm")
from rhino3dm import *
omni.kit.pipapi.install("compute-rhino3d")
import compute_rhino3d.Util
import compute_rhino3d.Mesh
import compute_rhino3d.Grasshopper as gh
from .RhinoComputUtil import *
omni.kit.pipapi.install("plotly==5.4.0")
import plotly.graph_objects as go
class RhinoFunctions:
def ComputeServerUrl(self):
return self.computeUrl
def MeshVolume(self):
#add the compute server location
compute_rhino3d.Util.url = self.computeUrl
#convert selected items to rhino mesh
meshes = convertSelectedUsdMeshToRhino()
vols = []
names = []
rhinoMeshes = []
#for each mesh compute the volume and then add the volume and name to a list
for m in meshes:
rhinoMeshes.append(m["Mesh"])
vol = compute_rhino3d.Mesh.Volume(m["Mesh"])
vols.append(vol)
names.append(m["Name"])
#use plotly to plot the volumes as a pie chart
fig = go.Figure(
data=[go.Pie(values=vols, labels=names)],
layout_title_text="the Volumes"
)
fig.show()
def MeshBoolUnion(self) -> None:
#add the compute server location
compute_rhino3d.Util.url = self.computeUrl
#convert selected items to rhino mesh
meshes = convertSelectedUsdMeshToRhino()
#for each mesh compute the bool union
rhinoMeshes = []
for m in meshes:
rhinoMeshes.append(m["Mesh"])
rhinoMeshes = compute_rhino3d.Mesh.CreateBooleanUnion(rhinoMeshes)
#add to the stage after converting back from rhino to USD mesh
#ToDo: add UI to define prim path and names
ct=0
for rm in rhinoMeshes:
RhinoMeshToUsdMesh("/World/rhinoComputed/",f"BoolUnion_{ct}",rm)
def MeshQuadRemesh(self)-> None:
compute_rhino3d.Util.url = self.computeUrl
meshes = convertSelectedUsdMeshToRhino()
#setup all the params for quad remesh
#ToDo: make this a UI for user
parameters = {
'AdaptiveQuadCount': True,
'AdaptiveSize': 50.0,
'DetectHardEdges': True,
'GuideCurveInfluence': 0,
'PreserveMeshArrayEdgesMode': 0,
'TargetQuadCount': 2000
}
names = []
rhinoMeshes = []
for m in meshes:
weldVerts = compute_rhino3d.Mesh.Weld(m["Mesh"],0.5)
qrm =compute_rhino3d.Mesh.QuadRemesh(weldVerts,parameters)
name = m["Name"]
if qrm is not None:
rhinoMeshes.append(qrm)
names.append(name)
RhinoMeshToUsdMesh("/World/rhinoComputed/",name+"_QuadRemesh",qrm)
else:
warning(f"QuadRemesh Failed on {name}")
def MeshWeld(self, tol)-> None:
compute_rhino3d.Util.url = self.computeUrl
meshes = convertSelectedUsdMeshToRhino()
names = []
rhinoMeshes = []
for m in meshes:
weldVerts = compute_rhino3d.Mesh.Weld(m["Mesh"],tol)
name = m["Name"]
if weldVerts is not None:
rhinoMeshes.append(weldVerts)
names.append(name)
RhinoMeshToUsdMesh("/World/rhinoComputed/",name+"_Weld",weldVerts)
else:
warning(f"Weld Failed on {name}")
def MeshOffset(self)-> None:
compute_rhino3d.Util.url = self.computeUrl
meshes = convertSelectedUsdMeshToRhino()
names = []
rhinoMeshes = []
for m in meshes:
macf = compute_rhino3d.Mesh.Offset1(m["Mesh"],1,True)
rhinoMeshes.append(macf)
name = m["Name"]
names.append(name)
RhinoMeshToUsdMesh("/World/rhinoComputed/",name+"_offset",macf)
def SaveAllAs3DM_UI(self):
window_flags = ui.WINDOW_FLAGS_NO_SCROLLBAR
#window_flags |= ui.WINDOW_FLAGS_NO_TITLE_BAR
self.export3dmwindow = ui.Window("Export Stage As 3DM", width=300, height=130, flags=window_flags)
with self.export3dmwindow.frame:
with ui.VStack():
with ui.HStack():
ui.Label("Path", width=50, height = 25)
path = ui.StringField( height = 25, tooltip = "Set the location and name of the file i.e c:/temp/myRhinofile.3dm")
with ui.HStack( height = 35):
def exLastGrpAsLayCb_changed(self, val):
self.excludeLastGroupAsLayer = val
print(val)
exLastGrpAsLayCb = ui.CheckBox(width = 30)
exLastGrpAsLayCb.model.add_value_changed_fn(lambda cb: exLastGrpAsLayCb_changed(self,cb.get_value_as_bool() ) )
ui.Label("Exlude last group as layer", width=50, height = 15)
def exportbt():
SaveAllas3DM(self,path.model.get_value_as_string())
ui.Line()
ui.Button("Export", clicked_fn=lambda: exportbt(), height=25)
class GrasshopperFunctions:
def randomDiamonds(self,uCt,vCt,rrA,rrB):
compute_rhino3d.Util.url = self.computeUrl
ghFile = os.path.dirname(os.path.dirname(__file__)) + "/rhinocompute/gh/randomDiamonds.ghx"
selectedMeshes = convertSelectedUsdMeshToRhino()
inputMesh = selectedMeshes[0]["Mesh"]
# create list of input trees
ghMesh = json.dumps(inputMesh.Encode())
mesh_tree = gh.DataTree("baseMesh")
mesh_tree.Append([0], [ghMesh])
srfU_tree = gh.DataTree("srfU")
srfU_tree.Append([0], [uCt])
srfV_tree = gh.DataTree("srfV")
srfV_tree.Append([0], [vCt])
rrA_tree = gh.DataTree("RR_A")
rrA_tree.Append([0], [rrA])
rrB_tree = gh.DataTree("RR_B")
rrB_tree.Append([0], [rrB])
inputs = [mesh_tree, srfU_tree, srfV_tree, rrA_tree, rrB_tree]
results = gh.EvaluateDefinition(ghFile, inputs)
# decode results
data = results['values'][0]['InnerTree']['{0}']
outMeshes = [rhino3dm.CommonObject.Decode(json.loads(item['data'])) for item in data]
ct = 0
for m in outMeshes:
RhinoMeshToUsdMesh("/World",f"/randomDiamonds/randomDiamonds_{ct}",m)
ct+=1
def randomDiamonds_UI(self):
def run(uCt,vCt,rrA,rrB):
GrasshopperFunctions.randomDiamonds(self,uCt, vCt, rrA,rrB)
#window_flags = ui.WINDOW_FLAGS_NO_RESIZE
sliderStyle = {"border_radius":15, "background_color": 0xFFDDDDDD, "secondary_color":0xFFAAAAAA, "color":0xFF111111, "margin":3}
window_flags = ui.WINDOW_FLAGS_NO_SCROLLBAR
self.theWindow = ui.Window("Random Diamonds", width=300, height=200, flags=window_flags)
with self.theWindow.frame:
with ui.VStack():
with ui.HStack():
ui.Label("U Ct", width=40)
srfU = ui.IntSlider(height= 20, min=1, max=50, style= sliderStyle )
with ui.HStack():
ui.Label("V Ct", width=40)
srfV = ui.IntSlider(height= 20, min=1, max=50, style= sliderStyle )
with ui.HStack():
ui.Label("min D", width=40)
rrA = ui.FloatSlider(height= 20, min=0.1, max=150, style= sliderStyle )
with ui.HStack():
ui.Label("max D", width=40)
rrB = ui.FloatSlider(height= 20, min=0.1, max=150, style= sliderStyle )
srfU.model.set_value(4)
srfV.model.set_value(4)
rrA.model.set_value(4)
rrB.model.set_value(75)
srfU.model.add_value_changed_fn(lambda m:run(srfU.model.get_value_as_int(),srfV.model.get_value_as_int(),rrA.model.get_value_as_float(),rrB.model.get_value_as_float()))
srfV.model.add_value_changed_fn(lambda m:run(srfU.model.get_value_as_int(),srfV.model.get_value_as_int(),rrA.model.get_value_as_float(),rrB.model.get_value_as_float()))
rrA.model.add_value_changed_fn(lambda m:run(srfU.model.get_value_as_int(),srfV.model.get_value_as_int(),rrA.model.get_value_as_float(),rrB.model.get_value_as_float()))
rrB.model.add_value_changed_fn(lambda m:run(srfU.model.get_value_as_int(),srfV.model.get_value_as_int(),rrA.model.get_value_as_float(),rrB.model.get_value_as_float()))
ui.Line(height=10)
ui.Button("Run >>", clicked_fn=lambda: GrasshopperFunctions.randomDiamonds(self,
srfU.model.get_value_as_int(),
srfV.model.get_value_as_int(),
rrA.model.get_value_as_float(),
rrB.model.get_value_as_float(),
), height=30) | 9,607 | Python | 36.976284 | 184 | 0.584678 |
rcervellione-nv/omni.rhinocompute/exts/cerver.util.rhinocompute/cerver/util/rhinocompute/RhinoComputUtil.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import compute_rhino3d.Util
import compute_rhino3d.Mesh
import compute_rhino3d.Grasshopper as gh
import rhino3dm
import json
import omni.ext
import omni.ui as ui
from pxr import Usd, UsdGeom, Gf
import omni.usd
import asyncio
def convertSelectedUsdMeshToRhino():
context = omni.usd.get_context()
stage = omni.usd.get_context().get_stage()
prims = [stage.GetPrimAtPath(m) for m in context.get_selection().get_selected_prim_paths() ]
#filter out prims that are not mesh
selected_prims = [
prim for prim
in prims
if UsdGeom.Mesh(prim)]
#setup var to hold the mesh, its name in the dict
sDict = []
#add the converted prims to the dict
for m in selected_prims:
sDict.append({"Name": m.GetName(), "Mesh":UsdMeshToRhinoMesh(m)})
return sDict
def UsdMeshToRhinoMesh(usdMesh):
#array for the mesh items
vertices = []
faces = []
#get the USD points
points = UsdGeom.Mesh(usdMesh).GetPointsAttr().Get()
#setup the items needed to deal with world and local transforms
xform_cache = UsdGeom.XformCache()
mtrx_world = xform_cache.GetLocalToWorldTransform(usdMesh)
#create the rhino mesh
mesh = rhino3dm.Mesh()
#convert the USD points to rhino points
for p in points:
world_p = mtrx_world.Transform(p)
mesh.Vertices.Add(world_p[0],world_p[1],world_p[2])
#faces we can extend directly into the aray becaue they are just ints
faces.extend( UsdGeom.Mesh(usdMesh).GetFaceVertexIndicesAttr().Get())
faceCount = UsdGeom.Mesh(usdMesh).GetFaceVertexCountsAttr().Get()
ct = 0
#add the face verts, USD uses a flat list of ints so we need to deal with
#3 or 4 sided faces. USD supports ngons but that is not accounted for
#ToDo: Deal with ngons
for i in range(0,len(faceCount)):
fc=faceCount[i]
if fc is 3:
mesh.Faces.AddFace(faces[ct], faces[ct+1], faces[ct+2])
if fc is 4:
mesh.Faces.AddFace(faces[ct], faces[ct+1], faces[ct+2], faces[ct+3])
ct+=fc
#compute normals, i dont use the USD normals here but you could
mesh.Normals.ComputeNormals()
mesh.Compact()
return mesh
def save_stage():
stage = omni.usd.get_context().get_stage()
stage.GetRootLayer().Save()
omni.client.usd_live_process()
def RhinoMeshToUsdMesh( rootUrl, meshName, rhinoMesh: rhino3dm.Mesh , primPath=None):
#get the stage
stage = omni.usd.get_context().get_stage()
# Create the geometry inside of "Root"
meshPrimPath = rootUrl + meshName
mesh = UsdGeom.Mesh.Define(stage, meshPrimPath)
# Add all of the vertices
points = []
for i in range(0,len(rhinoMesh.Vertices)):
v = rhinoMesh.Vertices[i]
points.append(Gf.Vec3f(v.X, v.Y, v.Z))
mesh.CreatePointsAttr(points)
# Calculate indices for each triangle
faceIndices = []
faceVertexCounts = []
for i in range(0, rhinoMesh.Faces.Count):
fcount=3
curf = rhinoMesh.Faces[i]
faceIndices.append(curf[0])
faceIndices.append(curf[1])
faceIndices.append(curf[2])
if curf[2] != curf[3]:
faceIndices.append(curf[3])
fcount=4
#print(f"{fcount} : {curf}")
faceVertexCounts.append(fcount)
mesh.CreateFaceVertexIndicesAttr(faceIndices)
mesh.CreateFaceVertexCountsAttr(faceVertexCounts)
# Add vertex normals
meshNormals = []
for n in rhinoMesh.Normals:
meshNormals.append(Gf.Vec3f(n.X,n.Y,n.Z))
mesh.CreateNormalsAttr(meshNormals)
def SaveRhinoFile(rhinoMeshes, path):
model = rhino3dm.File3dm()
[ model.Objects.AddMesh(m) for m in rhinoMeshes]
model.Write(path)
def SaveSelectedAs3dm(self,path):
selectedMeshes = convertSelectedUsdMeshToRhino()
meshobj = [d['Mesh'] for d in selectedMeshes]
SaveRhinoFile(meshobj, path)
def SaveAllas3DM(self, path):
#get the stage
stage = omni.usd.get_context().get_stage()
#get all prims that are meshes
meshPrims = [stage.GetPrimAtPath(prim.GetPath()) for prim in stage.Traverse() if UsdGeom.Mesh(prim)]
#make a rhino file
rhinoFile = rhino3dm.File3dm()
uniqLayers = {}
#figure out how many elements there are (to implament progress bar in future)
numPrims = len(meshPrims)
curPrim = 0
#loop over all the meshes
for mp in meshPrims:
#convert from usd mesh to rhino mesh
rhinoMesh = UsdMeshToRhinoMesh(mp)
objName = mp.GetName()
rhinoAttr = rhino3dm.ObjectAttributes()
dataOnParent = False
#get the properties on the prim
bimProps = None
parentPrim = mp.GetParent()
#see if this prim has BIM properties (from revit)
if parentPrim:
bimProps = mp.GetPropertiesInNamespace("BIM")
dataOnParent = False
#see if this prims parent has BIM properties (from revit)
if not bimProps:
bimProps = parentPrim.GetPropertiesInNamespace("BIM")
dataOnParent = True
#if no bim properties just add regular ones
if not bimProps :
bimProps = mp.GetProperties()
dataOnParent = False
for p in bimProps:
try:
pName = p.GetBaseName()
var = p.Get()
rhinoAttr.SetUserString(pName, str(var))
except Exception :
pass
# get the prims path and use that to create nested layers in rhino
primpath = str(mp.GetPath())
sepPrimPath = primpath.split('/')
sepPrimPath.pop(0)
sepPrimPath.pop()
# this will ajust the layer structure if the data is from the revit connector
# or if you just want to prune the last group in the export dialogue
if dataOnParent or self.excludeLastGroupAsLayer:
sepPrimPath.pop()
nestedLayerName = '::'.join(sepPrimPath)
ct=0
curLayer = ""
#loop over all the prim paths to created the nested layers in rhino
for pp in sepPrimPath:
if ct == 0:
curLayer += pp
else:
curLayer += f"::{pp}"
#check if the layer exists, if not make it
if not curLayer in uniqLayers :
layer = rhino3dm.Layer()
if ct>0:
prevLayer = curLayer.split('::')
prevLayer.pop()
prevLayer = '::'.join(prevLayer)
layer.ParentLayerId = rhinoFile.Layers.FindIndex(uniqLayers[prevLayer]).Id
layer.Color = (255,255,255,255)
layer.Name = pp
idx = rhinoFile.Layers.Add(layer)
uniqLayers[curLayer]= int(idx)
ct+=1
rhinoAttr.Name = objName
#print(str(uniqLayers[nestedLayerName]))
rhinoAttr.LayerIndex = int(str(uniqLayers[nestedLayerName]))
#add the mesh and its attributes to teh rhino file
rhinoFile.Objects.AddMesh(rhinoMesh, rhinoAttr)
curPrim += 1
self.progressbarprog = curPrim/numPrims
#save it all
rhinoFile.Write(path)
print("completed saving")
| 7,771 | Python | 30.983539 | 104 | 0.625402 |
vinjn/llm-metahuman/README.md | # LLM MetaHuman
LLM MetaHuman is an open solution for AI-powered photorealistic digital humans.
## Preparation steps
- Install [Omniverse Launcher](https://www.nvidia.com/en-us/omniverse/download/)
- Inside Omniverse Launcher, Install `Audio2Face`.
- Install [Epic Games Store](https://store.epicgames.com/en-US/)
- Inside Epic Games Store, Install Unreal Engine 5.x.
- Follow [Audio2Face to UE Live Link Plugin](https://docs.omniverse.nvidia.com/audio2face/latest/user-manual/livelink-ue-plugin.html) to connect Audi2Face to Unreal Engine.
## Launch Audio2Face headless
## Launch llm.py
## Launch Unreal Engine Metahuman
| 629 | Markdown | 32.157893 | 172 | 0.772655 |
vinjn/llm-metahuman/audio-client/gen_protoc.py | import os
import subprocess
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
proto_src_root = os.path.normpath(os.path.join(ROOT_DIR, "proto/"))
proto_dst_root = os.path.normpath(os.path.join(ROOT_DIR, "."))
proto_fpath = os.path.normpath(os.path.join(ROOT_DIR, "proto", "audio2face.proto"))
cmd = [
"python",
"-m",
"grpc_tools.protoc",
"-I",
f"{proto_src_root}",
f"--python_out={proto_dst_root}",
f"--grpc_python_out={proto_dst_root}",
f"{proto_fpath}",
]
print(cmd)
subprocess.call(cmd)
| 530 | Python | 22.086956 | 83 | 0.633962 |
vinjn/llm-metahuman/audio-client/llm.py | from openai import OpenAI
from pydub import AudioSegment
import gradio as gr
import requests
import os
from litellm import completion
import time
import threading
import queue
import gradio_client as gc
# XXX: increase requests speed
# https://stackoverflow.com/a/72440253
requests.packages.urllib3.util.connection.HAS_IPV6 = False
args = None
CWD = os.getcwd()
print("CWD:", CWD)
VOICE_ACTORS = ["nova", "alloy", "echo", "fable", "onyx", "shimmer"]
def timing_decorator(func):
def wrapper(*args, **kwargs):
start_time = time.time()
result = func(*args, **kwargs)
end_time = time.time()
elapsed_time = end_time - start_time
print(f"{func.__name__} cost: {elapsed_time:.2f} seconds.")
return result
return wrapper
class A2fInstance:
files_to_delete = []
instaces = []
def __init__(self, index) -> None:
self.SERVICE_HEALTHY = False
self.LIVELINK_SERVICE_HEALTHY = False
self.index = index
@timing_decorator
def post(self, end_point, data=None, verbose=True):
if not self.SERVICE_HEALTHY:
return None
if verbose:
print(f"++ {end_point}")
api_url = f"{self.base_url}/{end_point}"
try:
response = requests.post(api_url, json=data)
if response and response.status_code == 200:
if verbose:
print(response.json())
return response.json()
else:
if verbose:
print(f"Error: {response.status_code} - {response.text}")
return {"Error": response.status_code, "Reason": response.text}
except Exception as e:
print(e)
self.SERVICE_HEALTHY = False
return None
@timing_decorator
def get(self, end_point, data=None, verbose=True):
if not self.SERVICE_HEALTHY:
return None
if verbose:
print(f"++ {end_point}")
api_url = f"{self.base_url}/{end_point}"
try:
response = requests.get(api_url, json=data)
if response.status_code == 200:
if verbose:
print(response.json())
return response.json()
else:
if verbose:
print(f"Error: {response.status_code} - {response.text}")
return {"Error": response.status_code, "Reason": response.text}
except Exception as e:
print(e)
self.SERVICE_HEALTHY = False
return None
def player_setlooping(self, flag=True):
self.post(
"A2F/Player/SetLooping",
{"a2f_player": args.a2f_player_id, "loop_audio": flag},
)
def player_play(self):
self.post("A2F/Player/Play", {"a2f_player": args.a2f_player_id})
def player_pause(self):
self.post("A2F/Player/Pause", {"a2f_player": args.a2f_player_id})
def player_setrootpath(self, dir_path):
self.post(
"A2F/Player/SetRootPath",
{"a2f_player": args.a2f_player_id, "dir_path": dir_path},
)
def player_settrack(self, file_name):
self.post(
"A2F/Player/SetTrack",
{"a2f_player": args.a2f_player_id, "file_name": file_name},
)
def player_gettracks(self):
self.post("A2F/Player/GetTracks", {"a2f_player": args.a2f_player_id})
def player_gettime(self):
response = self.post(
"A2F/Player/GetTime", {"a2f_player": args.a2f_player_id}, False
)
if response and response["status"] == "OK":
return response["result"]
else:
return 0
def player_getrange(self):
response = self.post(
"A2F/Player/GetRange", {"a2f_player": args.a2f_player_id}, False
)
if response and response["status"] == "OK":
return response["result"]["work"]
else:
return (0, 0)
def generatekeys(self):
self.post("A2F/A2E/GenerateKeys", {"a2f_instance": args.a2f_instance_id})
def ActivateStreamLivelink(self, flag):
self.post(
"A2F/Exporter/ActivateStreamLivelink",
{"node_path": args.a2f_livelink_id, "value": flag},
)
def IsStreamLivelinkConnected(self):
response = self.post(
"A2F/Exporter/IsStreamLivelinkConnected",
{"node_path": args.a2f_livelink_id},
)
if response and response["status"] == "OK":
return response["result"]
else:
return False
def enable_audio_stream(self, flag):
self.post(
"A2F/Exporter/SetStreamLivelinkSettings",
{
"node_path": args.a2f_livelink_id,
"values": {"enable_audio_stream": flag},
},
)
def set_livelink_ports(
self,
livelink_host,
livelink_subject,
livelink_port,
livelink_audio_port,
):
self.post(
"A2F/Exporter/SetStreamLivelinkSettings",
{
"node_path": args.a2f_livelink_id,
"values": {
"livelink_host": livelink_host,
"livelink_subject": livelink_subject,
"livelink_port": livelink_port,
"audio_port": livelink_audio_port,
},
},
)
def get_preprocessing(self):
response = self.post(
"A2F/PRE/GetSettings",
{"a2f_instance": args.a2f_instance_id},
)
if response and response["status"] == "OK":
return response["result"]
else:
return {}
def set_preprocessing(self, settings):
settings["a2f_instance"] = args.a2f_instance_id
self.post("A2F/PRE/SetSettings", settings)
def get_postprocessing(self):
response = self.post(
"A2F/POST/GetSettings",
{"a2f_instance": args.a2f_instance_id},
)
if response and response["status"] == "OK":
return response["result"]
else:
return {}
def set_postprocessing(self, settings):
self.post(
"A2F/POST/SetSettings",
{"a2f_instance": args.a2f_instance_id, "settings": settings},
)
def setup(self):
self.base_url = f"http://{args.a2f_host}:{args.a2f_port+self.index}"
self.tts_voice = args.tts_voice
if self.index > 0:
# TODO: make it elegant
self.tts_voice = VOICE_ACTORS[self.index % len(VOICE_ACTORS)]
# always ping SERVICE_HEALTHY again in setup()
self.SERVICE_HEALTHY = True
self.ActivateStreamLivelink(True)
if not self.SERVICE_HEALTHY:
return
self.player_setrootpath(CWD)
self.player_setlooping(False)
self.LIVELINK_SERVICE_HEALTHY = self.IsStreamLivelinkConnected()
if not self.LIVELINK_SERVICE_HEALTHY:
return
self.enable_audio_stream(True)
self.set_livelink_ports(
args.livelink_host,
f"{args.livelink_subject}-{self.index}",
args.livelink_port + 10 * self.index,
args.livelink_audio_port + 10 * self.index,
)
pre_settings = self.get_preprocessing()
pre_settings["prediction_delay"] = 0
pre_settings["blink_interval"] = 1.5
self.set_preprocessing(pre_settings)
post_settings = self.get_postprocessing()
post_settings["skin_strength"] = 1.3
self.set_postprocessing(post_settings)
A2fInstance.instaces = []
openai_client = OpenAI()
gc_client: gc.Client = None
chat_ui: gr.ChatInterface = None
def run_single_pipeline(a2f, answer, a2f_peer=None):
global stop_current_a2f_play
if not a2f_peer:
a2f_peer = a2f
# print(answer)
mp3_file = text_to_mp3(answer, a2f.tts_voice)
wav_file = mp3_to_wav(mp3_file)
duration = a2f_peer.player_getrange()[1]
position = a2f_peer.player_gettime()
while position > 0 and position < duration:
print(position, duration)
if stop_current_a2f_play:
print("stop_current_a2f_play")
stop_current_a2f_play = False
return
time.sleep(1)
position = a2f_peer.player_gettime()
print("z")
time.sleep(1)
a2f.player_setrootpath(CWD)
a2f.player_settrack(wav_file)
# a2f_generatekeys()
a2f.player_play()
for file in A2fInstance.files_to_delete:
try:
os.remove(file)
except Exception:
pass
A2fInstance.files_to_delete.clear()
A2fInstance.files_to_delete.append(mp3_file)
A2fInstance.files_to_delete.append(wav_file)
current_speaker = -1
@timing_decorator
def run_pipeline(answer):
if args.a2f_instance_count == 1:
run_single_pipeline(A2fInstance.instaces[0], answer)
return
global current_speaker
if answer.startswith("("):
current_speaker = -1
elif answer.startswith("A:"):
current_speaker = 0
answer = answer[2:]
elif answer.startswith("B:"):
current_speaker = 1
answer = answer[2:]
if current_speaker < 0 or current_speaker >= args.a2f_instance_count:
return
a2f = A2fInstance.instaces[current_speaker]
if not a2f.SERVICE_HEALTHY:
return
run_single_pipeline(a2f, answer)
@timing_decorator
def text_to_mp3(text, voice):
response = openai_client.audio.speech.create(
model=args.tts_model,
voice=voice,
speed=args.tts_speed,
input=text,
)
timestamp = time.time()
mp3_filename = f"{timestamp}.mp3"
response.stream_to_file(mp3_filename)
return mp3_filename
@timing_decorator
def mp3_to_wav(mp3_filename):
sound = AudioSegment.from_mp3(mp3_filename)
sound = sound.set_frame_rate(22050)
wav_filename = f"{mp3_filename}.wav"
sound.export(wav_filename, format="wav")
return wav_filename
@timing_decorator
def get_completion(chat_history):
response = completion(
model=args.llm_model,
messages=chat_history,
api_base=args.llm_url,
stream=args.llm_streaming,
)
print(response)
return response
q = queue.Queue()
cleanup_queue = False
stop_current_a2f_play = False
def pipeline_worker():
while True:
print("--------------------------")
global cleanup_queue
global stop_current_a2f_play
if cleanup_queue:
while not q.empty():
item = q.get()
q.task_done()
if item == "cleanup_queue_token":
break
cleanup_queue = False
stop_current_a2f_play = True
item = q.get()
if item == "cleanup_queue_token":
continue
print(f"Begin: {item}")
run_pipeline(item)
print(f"End: {item}")
q.task_done()
def talk_to_peer(message):
if not gc_client:
return
result = gc_client.predict(
message, api_name="/chat" # str in 'Message' Textbox component
)
print(f"from peer: {result}")
# chat_ui.textbox.submit(None, [result, result])
# chat_ui.textbox.submit()
def predict(message, history):
print("==========================")
if message == "setup":
str = ""
for a2f in A2fInstance.instaces:
a2f.setup()
str += f"A2F running: {a2f.SERVICE_HEALTHY}\n"
str += f"Live Link running: {a2f.LIVELINK_SERVICE_HEALTHY}\n"
yield str
return
if message == "ping":
for a2f in A2fInstance.instaces:
a2f.post("")
a2f.get("")
yield "A2F ping"
return
if message == "redo":
for a2f in A2fInstance.instaces:
a2f.player_play()
yield "A2F redo"
return
if message == "stop":
global cleanup_queue
cleanup_queue = True
q.put("cleanup_queue_token")
yield "stopped"
return
if message.startswith("peer"):
items = message.split()
if len(items) >= 2:
gradio_port = int(items[1])
# TODO: support non localhost
args.gradio_peer_url = f"http://{args.gradio_host}:{gradio_port}/"
global gc_client
gc_client = gc.Client(args.gradio_peer_url)
yield f"I will chat with another llm-metahuman: {args.gradio_peer_url}"
return
history_openai_format = []
for human, assistant in history:
history_openai_format.append({"role": "user", "content": human})
history_openai_format.append({"role": "assistant", "content": assistant})
history_openai_format.append({"role": "user", "content": message})
# start_time = time.time()
response = get_completion(history_openai_format)
yield ".."
# global cleanup_queue
# cleanup_queue = True
# q.put("cleanup_queue_token")
if args.llm_streaming:
# create variables to collect the stream of chunks
UNUSED_collected_chunks = []
collected_messages = []
complete_sentences = ""
# iterate through the stream of events
for chunk in response:
# chunk_time = (
# time.time() - start_time
# ) # calculate the time delay of the chunk
UNUSED_collected_chunks.append(chunk) # save the event response
chunk_message = chunk.choices[0].delta.content # extract the message
if not chunk_message:
continue
collected_messages.append(chunk_message) # save the message
# print(
# f"Message {chunk_time:.2f} s after request: {chunk_message}"
# ) # print the delay and text
print(chunk_message)
if chunk_message in [
".",
"!",
"?",
"。",
"!",
"?",
] or chunk_message.endswith("\n"):
# if not chunk_message or "\n" in chunk_message:
one_sentence = "".join([m for m in collected_messages if m is not None])
if len(one_sentence) < 10:
# ignore short sentences
continue
collected_messages = []
complete_sentences += one_sentence
q.put(one_sentence)
# run_pipeline(one_sentence)
yield complete_sentences
talk_to_peer(one_sentence)
# print the time delay and text received
# print(f"Full response received {chunk_time:.2f} seconds after request")
# # clean None in collected_messages
# collected_messages = [m for m in collected_messages if m is not None]
# full_reply_content = "".join([m for m in collected_messages])
# print(f"Full conversation received: {full_reply_content}")
# yield full_reply_content
else:
if len(response.choices[0].message.content) == 0:
return
answer = response.choices[0].message.content
yield answer
run_pipeline(answer)
def main():
import argparse
parser = argparse.ArgumentParser(description="llm.py arguments")
# gradio settings
parser.add_argument("--a2f_instance_count", type=int, default=1)
parser.add_argument("--gradio_host", default="localhost")
parser.add_argument("--gradio_port", type=int, default=7860)
parser.add_argument(
"--gradio_peer_url",
default=None,
help="the gradio peer that this gradio instance will chat with. Default value is None, which means chat with a human.",
)
# llm / litellm settings
parser.add_argument("--llm_engine", default="gpt", choices=["gpt", "llama2"])
parser.add_argument(
"--llm_model", default=None, help="https://docs.litellm.ai/docs/providers"
)
parser.add_argument("--llm_url", default=None)
parser.add_argument(
"--llm_streaming", default=True, action=argparse.BooleanOptionalAction
)
# audio2face settings
parser.add_argument("--a2f_host", default="localhost")
parser.add_argument("--a2f_port", default=8011, type=int)
parser.add_argument("--a2f_instance_id", default="/World/audio2face/CoreFullface")
parser.add_argument("--a2f_player_id", default="/World/audio2face/Player")
parser.add_argument("--a2f_livelink_id", default="/World/audio2face/StreamLivelink")
# tts settings
parser.add_argument("--tts_model", default="tts-1", choices=["tts-1", "tts-1-hd"])
parser.add_argument("--tts_speed", default=1.1, type=float)
# livelink settings
parser.add_argument("--livelink_host", default="localhost")
parser.add_argument("--livelink_port", default=12030, type=int)
parser.add_argument("--livelink_subject", default="Audio2Face")
parser.add_argument("--livelink_audio_port", default=12031, type=int)
parser.add_argument(
"--tts_voice",
default="nova",
choices=VOICE_ACTORS,
help="https://platform.openai.com/docs/guides/text-to-speech",
)
global args
args = parser.parse_args()
if not args.llm_model:
if args.llm_engine == "gpt":
args.llm_model = args.llm_model or "gpt-3.5-turbo"
elif args.llm_engine == "llama2":
args.llm_model = args.llm_model or "ollama/llama2"
args.llm_url = args.llm_url or "http://localhost:11434"
threading.Thread(target=pipeline_worker, daemon=True).start()
for i in range(args.a2f_instance_count):
a2f = A2fInstance(i)
a2f.setup()
A2fInstance.instaces.append(a2f)
global chat_ui
chat_ui = gr.ChatInterface(
predict,
title=f"llm-metahuman @{args.gradio_port}",
examples=["hello", "tell me 3 jokes", "what's the meaning of life?"],
)
chat_ui.queue().launch(server_name=args.gradio_host, server_port=args.gradio_port)
q.join()
if __name__ == "__main__":
main()
| 18,138 | Python | 28.736066 | 127 | 0.573327 |
vinjn/llm-metahuman/audio-client/ref/pytts-demo.py | import pyttsx3
engine = pyttsx3.init() # object creation
""" RATE"""
rate = engine.getProperty("rate") # getting details of current speaking rate
print(rate) # printing current voice rate
engine.setProperty("rate", 125) # setting up new voice rate
"""VOLUME"""
volume = engine.getProperty(
"volume"
) # getting to know current volume level (min=0 and max=1)
print(volume) # printing current volume level
engine.setProperty("volume", 1.0) # setting up volume level between 0 and 1
"""VOICE"""
voices = engine.getProperty("voices") # getting details of current voice
print(voices)
engine.setProperty("voice", voices[0].id) # changing index, changes voices. o for male
# engine.setProperty('voice', voices[1].id) #changing index, changes voices. 1 for female
engine.say("Hello World!")
engine.say("说什么 current speaking rate is " + str(rate))
engine.runAndWait()
engine.stop()
"""Saving Voice to a file"""
# On linux make sure that 'espeak' and 'ffmpeg' are installed
engine.save_to_file("Hello World", "test.mp3")
engine.runAndWait()
| 1,054 | Python | 30.969696 | 91 | 0.721063 |
vinjn/llm-metahuman/audio-client/ref/minimal-chatbot.py | import random
import gradio as gr
def alternatingly_agree(message, history):
if len(history) % 2 == 0:
return f"Yes, I do think that '{message}'"
else:
return "I don't think so"
count = 0
def textbox_update(chatui_textbox):
global count
count += 1
if count % 10 == 0:
return "z"
else:
return chatui_textbox
if __name__ == "__main__":
with gr.ChatInterface(alternatingly_agree) as chat_ui:
chat_ui.textbox.change(
textbox_update,
chat_ui.textbox,
chat_ui.textbox,
every=1,
trigger_mode="once",
)
chat_ui.launch()
| 660 | Python | 18.441176 | 58 | 0.554545 |
vinjn/llm-metahuman/audio-client/ref/portal.py | import gradio as gr
def task1(input_text):
return "Task 1 Result: " + input_text
def task2(input_image):
return "Task 2 Result"
def task3(input_image):
return "Task 2 Result"
# interface one
iface1 = gr.Interface(
fn=task1, inputs="text", outputs="text", title="Multi-Page Interface"
)
# interface two
iface2 = gr.Interface(
fn=task2, inputs="image", outputs="text", title="Multi-Page Interface"
)
tts_examples = [
"I love learning machine learning",
"How do you do?",
]
tts_demo = gr.load(
"huggingface/facebook/fastspeech2-en-ljspeech",
title=None,
examples=tts_examples,
description="Give me something to say!",
cache_examples=False,
)
stt_demo = gr.load(
"huggingface/facebook/wav2vec2-base-960h",
title=None,
inputs="mic",
description="Let me try to guess what you're saying!",
)
demo = gr.TabbedInterface(
[iface1, iface2, tts_demo, stt_demo],
["Text-to-text", "image-to-text", "Text-to-speech", "Speech-to-text"],
)
# Run the interface
demo.launch(share=True)
| 1,054 | Python | 18.537037 | 74 | 0.666034 |
vinjn/llm-metahuman/audio-client/ref/sine-curve.py | import math
import gradio as gr
import plotly.express as px
import numpy as np
plot_end = 2 * math.pi
def get_plot(period=1):
global plot_end
x = np.arange(plot_end - 2 * math.pi, plot_end, 0.02)
y = np.sin(2*math.pi*period * x)
fig = px.line(x=x, y=y)
plot_end += 2 * math.pi
if plot_end > 1000:
plot_end = 2 * math.pi
return fig
with gr.Blocks() as demo:
with gr.Row():
with gr.Column():
gr.Markdown("Change the value of the slider to automatically update the plot")
period = gr.Slider(label="Period of plot", value=1, minimum=0, maximum=10, step=1)
plot = gr.Plot(label="Plot (updates every half second)")
dep = demo.load(get_plot, None, plot, every=1)
period.change(get_plot, period, plot, every=1, cancels=[dep])
if __name__ == "__main__":
demo.queue().launch() | 871 | Python | 25.424242 | 94 | 0.6062 |
mnaskret/omni-tetGen/README.md | # omni-tetGen
An omniverse extension to generate soft body meshes
![extTestBunny](https://user-images.githubusercontent.com/4333336/185104847-a556bf22-2323-4d70-8bb8-b8a57e1ec67d.gif)
## Description:
omni-tetGen uses the famous tetgen mesh generator developed by Hang Si to create tetrahedral and edge meshes for soft body simulation. The extension allows for a user-friendly drag-and-drop mechanism for input mesh data in standard .obj format. Then, it runs the python tetgen wrapper to create meshes which are converted to numpy arrays and described with additional infomration like edges rest lengths or tetrahedra volumes. Generated mesh is added to the stage with additional attributes:
- edge
- edgesRestLengths
- elem
- tetrahedronsRestVolumes
- inverseMasses
![Screenshot from 2022-08-17 13-22-38](https://user-images.githubusercontent.com/4333336/185106588-6f87d9be-c9f1-4ee4-add1-e3bff3a1538d.png)
## PBD .ogn node
Additionally, an omniverse node with a simple Position Based Dynamics algorithm implementation with CUDA kernels is attached in order to test generated meshes.
![Screenshot from 2022-08-17 13-25-31](https://user-images.githubusercontent.com/4333336/185107000-5837f3be-8540-4c5c-884f-1eb7c01b42b8.png)
## Usage
- [Install omniverse](https://www.nvidia.com/en-us/omniverse/) with e.g. create environment
- Go to: Window -> Extensions -> Gear icon -> Add extension search path: `git://github.com/mnaskret/omni-tetGen.git?branch=main`
- Find Tetrahedralizer in the list of extensions and turn it on (preferably with autoload)
- In the Tetrahedralizer window you can drop any .obj file from Omniverse Content browser, choose preferred options and generate a cool mesh
- Add a graph with PBDBasicGravity node or create your own node that utilizes mesh extra attributes to have fun with your mesh
| 1,824 | Markdown | 59.833331 | 491 | 0.800987 |
mnaskret/omni-tetGen/mnresearch/tetgen/extension.py | import omni.ext
import omni.ui as ui
import omni.kit.commands as commands
import pxr
from pxr import Sdf
import numpy as np
import tetgenExt
import os
import math
import warp as wp
class MyExtension(omni.ext.IExt):
fileUrl = ''
def drop_accept(url, ext):
# Accept drops of specific extension only
print("File dropped")
return url.endswith(ext)
def drop(widget, event):
widget.text = event.mime_data
MyExtension.fileUrl = event.mime_data
def drop_area(self, ext):
# If drop is acceptable, the rectangle is blue
style = {}
style["Rectangle"] = {"background_color": 0xFF999999}
style["Rectangle:drop"] = {"background_color": 0xFF994400}
stack = ui.ZStack()
with stack:
ui.Rectangle(style=style)
text = ui.Label(f"Accepts {ext}", alignment=ui.Alignment.CENTER, word_wrap=True)
self.fileUrl = stack.set_accept_drop_fn(lambda d, e=ext: MyExtension.drop_accept(d, e))
stack.set_drop_fn(lambda a, w=text: MyExtension.drop(w, a))
def createMesh(usd_context, stage, meshName):
commands.execute('CreateReferenceCommand',
usd_context=usd_context,
path_to='/World/' + meshName,
asset_path=MyExtension.fileUrl,
instanceable=True)
prim = stage.GetPrimAtPath('/World/' + meshName + '/' + meshName + '/' + meshName)
return prim
def addAttributes(stage, prim, node, elem, face, edge, normals, colors, meshName):
numberOfTris = int(face.shape[0] / 3)
faceCount = np.full((numberOfTris), 3)
mesh = pxr.PhysicsSchemaTools.createMesh(stage,
pxr.Sdf.Path('/World/' + meshName + 'Mesh'),
node.tolist(),
normals.tolist(),
face.tolist(),
faceCount.tolist())
newPrim = stage.GetPrimAtPath('/World/' + meshName + 'Mesh')
velocitiesNP = np.zeros_like(node)
inverseMasses = np.ones(len(node), dtype=float)
edgesRestLengths = np.zeros(len(edge), dtype=float)
tetrahedronsRestVolumes = np.zeros(len(elem), dtype=float)
for i in range(len(edge)):
edgesRestLengths[i] = np.linalg.norm(node[edge[i][0]] - node[edge[i][1]])
for i in range(len(elem)):
tetrahedronPositionA = node[elem[i][0]]
tetrahedronPositionB = node[elem[i][1]]
tetrahedronPositionC = node[elem[i][2]]
tetrahedronPositionD = node[elem[i][3]]
p1 = tetrahedronPositionB - tetrahedronPositionA
p2 = tetrahedronPositionC - tetrahedronPositionA
p3 = tetrahedronPositionD - tetrahedronPositionA
volume = wp.dot(wp.cross(p1, p2), p3) / 6.0
tetrahedronsRestVolumes[i] = volume
velocitiesValue = pxr.Vt.Vec3fArray().FromNumpy(velocitiesNP)
elemValue = pxr.Vt.Vec4iArray().FromNumpy(elem)
edgeValue = pxr.Vt.Vec2iArray().FromNumpy(edge)
edgesRestLengthsValue = pxr.Vt.FloatArray().FromNumpy(edgesRestLengths)
inverseMassesValue = pxr.Vt.FloatArray().FromNumpy(inverseMasses)
tetrahedronsRestVolumesValue = pxr.Vt.FloatArray().FromNumpy(tetrahedronsRestVolumes)
elemAtt = newPrim.CreateAttribute('elem', Sdf.ValueTypeNames.Int4Array)
edgeAtt = newPrim.CreateAttribute('edge', Sdf.ValueTypeNames.Int2Array)
edgesRestLengthsAtt = newPrim.CreateAttribute('edgesRestLengths', Sdf.ValueTypeNames.FloatArray)
inverseMassesAtt = newPrim.CreateAttribute('inverseMasses', Sdf.ValueTypeNames.FloatArray)
tetrahedronsRestVolumesAtt = newPrim.CreateAttribute('tetrahedronsRestVolumes', Sdf.ValueTypeNames.FloatArray)
velocitiesAtt = newPrim.GetAttribute('velocities')
velocitiesAtt.Set(velocitiesValue)
elemAtt.Set(elemValue)
edgeAtt.Set(edgeValue)
edgesRestLengthsAtt.Set(edgesRestLengthsValue)
inverseMassesAtt.Set(inverseMassesValue)
tetrahedronsRestVolumesAtt.Set(tetrahedronsRestVolumesValue)
return mesh, newPrim
def extractMeshDataToNP(prim):
points = prim.GetAttribute('points').Get()
faces = prim.GetAttribute('faceVertexIndices').Get()
pointsNP = np.array(points, dtype=float)
facesNP = np.array(faces, dtype=int)
facesNP = facesNP.reshape((-1, 3))
return pointsNP, facesNP
def setPLC(self, value):
self.PLC = value
def setQuality(self, value):
self.Quality = value
def cross(a, b):
c = [a[1]*b[2] - a[2]*b[1],
a[2]*b[0] - a[0]*b[2],
a[0]*b[1] - a[1]*b[0]]
return c
def calculateNormals(node, face):
numberOfTris = int(face.shape[0] / 3)
normals = np.empty_like(node)
for i in range(numberOfTris):
pIdA = face[i][0]
pIdB = face[i][1]
pIdC = face[i][2]
pA = node[pIdA]
pB = node[pIdB]
pC = node[pIdC]
vA = pB - pA
vB = pC - pA
normal = MyExtension.cross(vA, vB)
normalized = np.linalg.norm(normal)
normals[pIdA] += normalized
normals[pIdB] += normalized
normals[pIdC] += normalized
return normals
def on_startup(self, ext_id):
print("[mnresearch.tetgen] MyExtension startup")
self._window = ui.Window("Tetrahedralizer", width=300, height=300)
with self._window.frame:
self.PLC = False
self.Quality = False
with ui.VStack():
MyExtension.drop_area(self, ".obj")
with ui.HStack():
ui.Label("PLC", height=0)
plcCB = ui.CheckBox(width=20)
plcCB.model.add_value_changed_fn(
lambda a: MyExtension.setPLC(self, a.get_value_as_bool()))
with ui.HStack():
ui.Label("Quality", height=0)
qualityCB = ui.CheckBox(width=20)
qualityCB.model.add_value_changed_fn(
lambda a: MyExtension.setQuality(self, a.get_value_as_bool()))
def on_click():
print("clicked!")
self.usd_context = omni.usd.get_context()
self.stage = self.usd_context.get_stage()
if MyExtension.fileUrl != "":
meshName = MyExtension.fileUrl.split(os.sep)[-1][:-4]
prim = MyExtension.createMesh(self.usd_context, self.stage, meshName)
points, faces = MyExtension.extractMeshDataToNP(prim)
tet = tetgenExt.TetGen(points, faces)
print('Running tetGen on: ', MyExtension.fileUrl,
'\nwith options:',
'PLC: ', self.PLC,
'\nQuality: ', self.Quality)
node, elem, face, edge = tet.tetrahedralize(quality=True,
plc=True,
facesout=1,
edgesout=1)
normals = MyExtension.calculateNormals(node, face)
colors = np.ones_like(normals)
face = face.ravel()
mesh, newPrim = MyExtension.addAttributes(self.stage,
prim,
node,
elem,
face,
edge,
normals,
colors,
meshName)
pxr.Usd.Stage.RemovePrim(self.stage, '/World/' + meshName)
ui.Button("Generate tetrahedral mesh", clicked_fn=lambda: on_click())
def on_shutdown(self):
print("[mnresearch.tetgen] MyExtension shutdown")
| 8,644 | Python | 38.474886 | 118 | 0.518047 |
mnaskret/omni-tetGen/mnresearch/tetgen/PBDBasicGravityDatabase.py | """Support for simplified access to data on nodes of type mnresearch.tetgen.PBDBasicGravity
PBDBasicGravity
"""
import omni.graph.core as og
import traceback
import sys
import numpy
class PBDBasicGravityDatabase(og.Database):
"""Helper class providing simplified access to data on nodes of type mnresearch.tetgen.PBDBasicGravity
Class Members:
node: Node being evaluated
Attribute Value Properties:
Inputs:
inputs.edge
inputs.edgesRestLengths
inputs.elem
inputs.gravity
inputs.ground
inputs.inverseMasses
inputs.ks_distance
inputs.ks_volume
inputs.num_substeps
inputs.points
inputs.sim_constraints
inputs.tetrahedronsRestVolumes
inputs.velocities
inputs.velocity_dampening
Outputs:
outputs.points
outputs.velocities
"""
# This is an internal object that provides per-class storage of a per-node data dictionary
PER_NODE_DATA = {}
# This is an internal object that describes unchanging attributes in a generic way
# The values in this list are in no particular order, as a per-attribute tuple
# Name, Type, ExtendedTypeIndex, UiName, Description, Metadata, Is_Required, DefaultValue
# You should not need to access any of this data directly, use the defined database interfaces
INTERFACE = og.Database._get_interface([
('inputs:edge', 'int2[]', 0, None, 'Input edges', {og.MetadataKeys.DEFAULT: '[]'}, True, []),
('inputs:edgesRestLengths', 'float[]', 0, None, 'Input edges rest lengths', {og.MetadataKeys.DEFAULT: '[]'}, True, []),
('inputs:elem', 'int4[]', 0, None, 'Input tetrahedrons', {og.MetadataKeys.DEFAULT: '[]'}, True, []),
('inputs:gravity', 'vector3f', 0, None, 'Gravity constant', {og.MetadataKeys.DEFAULT: '[0.0, -9.8, 0.0]'}, True, [0.0, -9.8, 0.0]),
('inputs:ground', 'float', 0, None, 'Ground level', {og.MetadataKeys.DEFAULT: '-100.0'}, True, -100.0),
('inputs:inverseMasses', 'float[]', 0, None, 'Inverse masses', {og.MetadataKeys.DEFAULT: '[]'}, True, []),
('inputs:ks_distance', 'float', 0, None, '', {og.MetadataKeys.DEFAULT: '1.0'}, True, 1.0),
('inputs:ks_volume', 'float', 0, None, '', {og.MetadataKeys.DEFAULT: '1.0'}, True, 1.0),
('inputs:num_substeps', 'int', 0, None, '', {og.MetadataKeys.DEFAULT: '8'}, True, 8),
('inputs:points', 'point3f[]', 0, None, 'Input points', {og.MetadataKeys.DEFAULT: '[]'}, True, []),
('inputs:sim_constraints', 'int', 0, None, '', {og.MetadataKeys.DEFAULT: '1'}, True, 1),
('inputs:tetrahedronsRestVolumes', 'float[]', 0, None, 'Input tetrahedrons rest volumes', {og.MetadataKeys.DEFAULT: '[]'}, True, []),
('inputs:velocities', 'vector3f[]', 0, None, 'Input velocities', {og.MetadataKeys.DEFAULT: '[]'}, True, []),
('inputs:velocity_dampening', 'float', 0, None, '', {og.MetadataKeys.DEFAULT: '0.1'}, True, 0.1),
('outputs:points', 'point3f[]', 0, None, 'Output points', {}, True, None),
('outputs:velocities', 'vector3f[]', 0, None, 'Output velocities', {}, True, None),
])
@classmethod
def _populate_role_data(cls):
"""Populate a role structure with the non-default roles on this node type"""
role_data = super()._populate_role_data()
role_data.inputs.gravity = og.Database.ROLE_VECTOR
role_data.inputs.points = og.Database.ROLE_POINT
role_data.inputs.velocities = og.Database.ROLE_VECTOR
role_data.outputs.points = og.Database.ROLE_POINT
role_data.outputs.velocities = og.Database.ROLE_VECTOR
return role_data
class ValuesForInputs(og.DynamicAttributeAccess):
"""Helper class that creates natural hierarchical access to input attributes"""
def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface):
"""Initialize simplified access for the attribute data"""
context = node.get_graph().get_default_graph_context()
super().__init__(context, node, attributes, dynamic_attributes)
@property
def edge(self):
data_view = og.AttributeValueHelper(self._attributes.edge)
return data_view.get()
@edge.setter
def edge(self, value):
if self._setting_locked:
raise og.ReadOnlyError(self._attributes.edge)
data_view = og.AttributeValueHelper(self._attributes.edge)
data_view.set(value)
self.edge_size = data_view.get_array_size()
@property
def edgesRestLengths(self):
data_view = og.AttributeValueHelper(self._attributes.edgesRestLengths)
return data_view.get()
@edgesRestLengths.setter
def edgesRestLengths(self, value):
if self._setting_locked:
raise og.ReadOnlyError(self._attributes.edgesRestLengths)
data_view = og.AttributeValueHelper(self._attributes.edgesRestLengths)
data_view.set(value)
self.edgesRestLengths_size = data_view.get_array_size()
@property
def elem(self):
data_view = og.AttributeValueHelper(self._attributes.elem)
return data_view.get()
@elem.setter
def elem(self, value):
if self._setting_locked:
raise og.ReadOnlyError(self._attributes.elem)
data_view = og.AttributeValueHelper(self._attributes.elem)
data_view.set(value)
self.elem_size = data_view.get_array_size()
@property
def gravity(self):
data_view = og.AttributeValueHelper(self._attributes.gravity)
return data_view.get()
@gravity.setter
def gravity(self, value):
if self._setting_locked:
raise og.ReadOnlyError(self._attributes.gravity)
data_view = og.AttributeValueHelper(self._attributes.gravity)
data_view.set(value)
@property
def ground(self):
data_view = og.AttributeValueHelper(self._attributes.ground)
return data_view.get()
@ground.setter
def ground(self, value):
if self._setting_locked:
raise og.ReadOnlyError(self._attributes.ground)
data_view = og.AttributeValueHelper(self._attributes.ground)
data_view.set(value)
@property
def inverseMasses(self):
data_view = og.AttributeValueHelper(self._attributes.inverseMasses)
return data_view.get()
@inverseMasses.setter
def inverseMasses(self, value):
if self._setting_locked:
raise og.ReadOnlyError(self._attributes.inverseMasses)
data_view = og.AttributeValueHelper(self._attributes.inverseMasses)
data_view.set(value)
self.inverseMasses_size = data_view.get_array_size()
@property
def ks_distance(self):
data_view = og.AttributeValueHelper(self._attributes.ks_distance)
return data_view.get()
@ks_distance.setter
def ks_distance(self, value):
if self._setting_locked:
raise og.ReadOnlyError(self._attributes.ks_distance)
data_view = og.AttributeValueHelper(self._attributes.ks_distance)
data_view.set(value)
@property
def ks_volume(self):
data_view = og.AttributeValueHelper(self._attributes.ks_volume)
return data_view.get()
@ks_volume.setter
def ks_volume(self, value):
if self._setting_locked:
raise og.ReadOnlyError(self._attributes.ks_volume)
data_view = og.AttributeValueHelper(self._attributes.ks_volume)
data_view.set(value)
@property
def num_substeps(self):
data_view = og.AttributeValueHelper(self._attributes.num_substeps)
return data_view.get()
@num_substeps.setter
def num_substeps(self, value):
if self._setting_locked:
raise og.ReadOnlyError(self._attributes.num_substeps)
data_view = og.AttributeValueHelper(self._attributes.num_substeps)
data_view.set(value)
@property
def points(self):
data_view = og.AttributeValueHelper(self._attributes.points)
return data_view.get()
@points.setter
def points(self, value):
if self._setting_locked:
raise og.ReadOnlyError(self._attributes.points)
data_view = og.AttributeValueHelper(self._attributes.points)
data_view.set(value)
self.points_size = data_view.get_array_size()
@property
def sim_constraints(self):
data_view = og.AttributeValueHelper(self._attributes.sim_constraints)
return data_view.get()
@sim_constraints.setter
def sim_constraints(self, value):
if self._setting_locked:
raise og.ReadOnlyError(self._attributes.sim_constraints)
data_view = og.AttributeValueHelper(self._attributes.sim_constraints)
data_view.set(value)
@property
def tetrahedronsRestVolumes(self):
data_view = og.AttributeValueHelper(self._attributes.tetrahedronsRestVolumes)
return data_view.get()
@tetrahedronsRestVolumes.setter
def tetrahedronsRestVolumes(self, value):
if self._setting_locked:
raise og.ReadOnlyError(self._attributes.tetrahedronsRestVolumes)
data_view = og.AttributeValueHelper(self._attributes.tetrahedronsRestVolumes)
data_view.set(value)
self.tetrahedronsRestVolumes_size = data_view.get_array_size()
@property
def velocities(self):
data_view = og.AttributeValueHelper(self._attributes.velocities)
return data_view.get()
@velocities.setter
def velocities(self, value):
if self._setting_locked:
raise og.ReadOnlyError(self._attributes.velocities)
data_view = og.AttributeValueHelper(self._attributes.velocities)
data_view.set(value)
self.velocities_size = data_view.get_array_size()
@property
def velocity_dampening(self):
data_view = og.AttributeValueHelper(self._attributes.velocity_dampening)
return data_view.get()
@velocity_dampening.setter
def velocity_dampening(self, value):
if self._setting_locked:
raise og.ReadOnlyError(self._attributes.velocity_dampening)
data_view = og.AttributeValueHelper(self._attributes.velocity_dampening)
data_view.set(value)
class ValuesForOutputs(og.DynamicAttributeAccess):
"""Helper class that creates natural hierarchical access to output attributes"""
def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface):
"""Initialize simplified access for the attribute data"""
context = node.get_graph().get_default_graph_context()
super().__init__(context, node, attributes, dynamic_attributes)
self.points_size = None
self.velocities_size = None
@property
def points(self):
data_view = og.AttributeValueHelper(self._attributes.points)
return data_view.get(reserved_element_count = self.points_size)
@points.setter
def points(self, value):
data_view = og.AttributeValueHelper(self._attributes.points)
data_view.set(value)
self.points_size = data_view.get_array_size()
@property
def velocities(self):
data_view = og.AttributeValueHelper(self._attributes.velocities)
return data_view.get(reserved_element_count = self.velocities_size)
@velocities.setter
def velocities(self, value):
data_view = og.AttributeValueHelper(self._attributes.velocities)
data_view.set(value)
self.velocities_size = data_view.get_array_size()
class ValuesForState(og.DynamicAttributeAccess):
"""Helper class that creates natural hierarchical access to state attributes"""
def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface):
"""Initialize simplified access for the attribute data"""
context = node.get_graph().get_default_graph_context()
super().__init__(context, node, attributes, dynamic_attributes)
def __init__(self, node):
super().__init__(node)
dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_INPUT)
self.inputs = PBDBasicGravityDatabase.ValuesForInputs(node, self.attributes.inputs, dynamic_attributes)
dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_OUTPUT)
self.outputs = PBDBasicGravityDatabase.ValuesForOutputs(node, self.attributes.outputs, dynamic_attributes)
dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_STATE)
self.state = PBDBasicGravityDatabase.ValuesForState(node, self.attributes.state, dynamic_attributes)
class abi:
"""Class defining the ABI interface for the node type"""
@staticmethod
def get_node_type():
get_node_type_function = getattr(PBDBasicGravityDatabase.NODE_TYPE_CLASS, 'get_node_type', None)
if callable(get_node_type_function):
return get_node_type_function()
return 'mnresearch.tetgen.PBDBasicGravity'
@staticmethod
def compute(context, node):
db = PBDBasicGravityDatabase(node)
try:
db.inputs._setting_locked = True
compute_function = getattr(PBDBasicGravityDatabase.NODE_TYPE_CLASS, 'compute', None)
if callable(compute_function) and compute_function.__code__.co_argcount > 1:
return compute_function(context, node)
return PBDBasicGravityDatabase.NODE_TYPE_CLASS.compute(db)
except Exception as error:
stack_trace = "".join(traceback.format_tb(sys.exc_info()[2].tb_next))
db.log_error(f'Assertion raised in compute - {error}\n{stack_trace}', add_context=False)
finally:
db.inputs._setting_locked = False
return False
@staticmethod
def initialize(context, node):
PBDBasicGravityDatabase._initialize_per_node_data(node)
# Set any default values the attributes have specified
if not node._do_not_use():
db = PBDBasicGravityDatabase(node)
db.inputs.edge = []
db.inputs.edgesRestLengths = []
db.inputs.elem = []
db.inputs.gravity = [0.0, -9.8, 0.0]
db.inputs.ground = -100.0
db.inputs.inverseMasses = []
db.inputs.ks_distance = 1.0
db.inputs.ks_volume = 1.0
db.inputs.num_substeps = 8
db.inputs.points = []
db.inputs.sim_constraints = 1
db.inputs.tetrahedronsRestVolumes = []
db.inputs.velocities = []
db.inputs.velocity_dampening = 0.1
initialize_function = getattr(PBDBasicGravityDatabase.NODE_TYPE_CLASS, 'initialize', None)
if callable(initialize_function):
initialize_function(context, node)
@staticmethod
def release(node):
release_function = getattr(PBDBasicGravityDatabase.NODE_TYPE_CLASS, 'release', None)
if callable(release_function):
release_function(node)
PBDBasicGravityDatabase._release_per_node_data(node)
@staticmethod
def update_node_version(context, node, old_version, new_version):
update_node_version_function = getattr(PBDBasicGravityDatabase.NODE_TYPE_CLASS, 'update_node_version', None)
if callable(update_node_version_function):
return update_node_version_function(context, node, old_version, new_version)
return False
@staticmethod
def initialize_type(node_type):
initialize_type_function = getattr(PBDBasicGravityDatabase.NODE_TYPE_CLASS, 'initialize_type', None)
needs_initializing = True
if callable(initialize_type_function):
needs_initializing = initialize_type_function(node_type)
if needs_initializing:
node_type.set_metadata(og.MetadataKeys.EXTENSION, "mnresearch.tetgen")
node_type.set_metadata(og.MetadataKeys.UI_NAME, "PBDBasicGravity")
node_type.set_metadata(og.MetadataKeys.DESCRIPTION, "PBDBasicGravity")
node_type.set_metadata(og.MetadataKeys.LANGUAGE, "Python")
PBDBasicGravityDatabase.INTERFACE.add_to_node_type(node_type)
@staticmethod
def on_connection_type_resolve(node):
on_connection_type_resolve_function = getattr(PBDBasicGravityDatabase.NODE_TYPE_CLASS, 'on_connection_type_resolve', None)
if callable(on_connection_type_resolve_function):
on_connection_type_resolve_function(node)
NODE_TYPE_CLASS = None
GENERATOR_VERSION = (1, 4, 0)
TARGET_VERSION = (2, 29, 1)
@staticmethod
def register(node_type_class):
PBDBasicGravityDatabase.NODE_TYPE_CLASS = node_type_class
og.register_node_type(PBDBasicGravityDatabase.abi, 1)
@staticmethod
def deregister():
og.deregister_node_type("mnresearch.tetgen.PBDBasicGravity")
| 17,984 | Python | 46.204724 | 141 | 0.62411 |
mnaskret/omni-tetGen/mnresearch/tetgen/ogn/nodes/PBDBasicGravity.py | """
This is the implementation of the OGN node defined in OgnNewNode.ogn
"""
# Array or tuple values are accessed as numpy arrays so you probably need this import
import math
import numpy as np
import warp as wp
import omni.timeline
from pxr import Usd, UsdGeom, Gf, Sdf
@wp.kernel
def boundsKer(predictedPositions: wp.array(dtype=wp.vec3),
groundLevel: float):
tid = wp.tid()
x = predictedPositions[tid]
if(x[1] < groundLevel):
predictedPositions[tid] = wp.vec3(x[0], groundLevel, x[2])
@wp.kernel
def PBDStepKer(positions: wp.array(dtype=wp.vec3),
predictedPositions: wp.array(dtype=wp.vec3),
velocities: wp.array(dtype=wp.vec3),
dT: float):
tid = wp.tid()
x = positions[tid]
xPred = predictedPositions[tid]
v = (xPred - x)*(1.0/dT)
x = xPred
positions[tid] = x
velocities[tid] = v
@wp.kernel
def gravityKer(positions: wp.array(dtype=wp.vec3),
predictedPositions: wp.array(dtype=wp.vec3),
velocities: wp.array(dtype=wp.vec3),
gravityConstant: wp.vec3,
velocityDampening: float,
dt: float):
tid = wp.tid()
x = positions[tid]
v = velocities[tid]
velocityDampening = 1.0 - velocityDampening
v = v + gravityConstant*dt*velocityDampening
xPred = x + v*dt
predictedPositions[tid] = xPred
@wp.kernel
def distanceConstraints(predictedPositions: wp.array(dtype=wp.vec3),
dP: wp.array(dtype=wp.vec3),
constraintsNumber: wp.array(dtype=int),
edgesA: wp.array(dtype=int),
edgesB: wp.array(dtype=int),
edgesRestLengths: wp.array(dtype=float),
inverseMasses: wp.array(dtype=float),
kS: float):
tid = wp.tid()
edgeIndexA = edgesA[tid]
edgeIndexB = edgesB[tid]
edgePositionA = predictedPositions[edgeIndexA]
edgePositionB = predictedPositions[edgeIndexB]
edgeRestLength = edgesRestLengths[tid]
dir = edgePositionA - edgePositionB
len = wp.length(dir)
inverseMass = inverseMasses[edgeIndexA] + inverseMasses[edgeIndexB]
edgeDP = (len-edgeRestLength) * wp.normalize(dir) * kS / inverseMass
wp.atomic_sub(dP, edgeIndexA, edgeDP)
wp.atomic_add(dP, edgeIndexB, edgeDP)
wp.atomic_add(constraintsNumber, edgeIndexA, 1)
wp.atomic_add(constraintsNumber, edgeIndexB, 1)
@wp.kernel
def volumeConstraints(predictedPositions: wp.array(dtype=wp.vec3),
dP: wp.array(dtype=wp.vec3),
constraintsNumber: wp.array(dtype=int),
tetrahedronsA: wp.array(dtype=int),
tetrahedronsB: wp.array(dtype=int),
tetrahedronsC: wp.array(dtype=int),
tetrahedronsD: wp.array(dtype=int),
tetrahedronsRestVolumes: wp.array(dtype=float),
inverseMasses: wp.array(dtype=float),
kS: float):
tid = wp.tid()
tetrahedronIndexA = tetrahedronsA[tid]
tetrahedronIndexB = tetrahedronsB[tid]
tetrahedronIndexC = tetrahedronsC[tid]
tetrahedronIndexD = tetrahedronsD[tid]
tetrahedronPositionA = predictedPositions[tetrahedronIndexA]
tetrahedronPositionB = predictedPositions[tetrahedronIndexB]
tetrahedronPositionC = predictedPositions[tetrahedronIndexC]
tetrahedronPositionD = predictedPositions[tetrahedronIndexD]
tetrahedronRestVolume = tetrahedronsRestVolumes[tid]
p1 = tetrahedronPositionB - tetrahedronPositionA
p2 = tetrahedronPositionC - tetrahedronPositionA
p3 = tetrahedronPositionD - tetrahedronPositionA
q2 = wp.cross(p3, p1)
q1 = wp.cross(p2, p3)
q3 = wp.cross(p1, p2)
q0 = - q1 - q2 - q3
mA = inverseMasses[tetrahedronIndexA]
mB = inverseMasses[tetrahedronIndexB]
mC = inverseMasses[tetrahedronIndexC]
mD = inverseMasses[tetrahedronIndexD]
volume = wp.dot(wp.cross(p1, p2), p3) / 6.0
lambd = mA * wp.dot(q0, q0) + mB * wp.dot(q1, q1) + mC * wp.dot(q2, q2) + mD * wp.dot(q3, q3)
lambd = kS * (volume - tetrahedronRestVolume) / lambd
wp.atomic_sub(dP, tetrahedronIndexA, q0 * lambd * mA)
wp.atomic_sub(dP, tetrahedronIndexB, q1 * lambd * mB)
wp.atomic_sub(dP, tetrahedronIndexC, q2 * lambd * mC)
wp.atomic_sub(dP, tetrahedronIndexD, q3 * lambd * mD)
wp.atomic_add(constraintsNumber, tetrahedronIndexA, 1)
wp.atomic_add(constraintsNumber, tetrahedronIndexB, 1)
wp.atomic_add(constraintsNumber, tetrahedronIndexC, 1)
wp.atomic_add(constraintsNumber, tetrahedronIndexD, 1)
@wp.kernel
def applyConstraints(predictedPositions: wp.array(dtype=wp.vec3),
dP: wp.array(dtype=wp.vec3),
constraintsNumber: wp.array(dtype=int)):
tid = wp.tid()
if(constraintsNumber[tid] > 0):
tmpDP = dP[tid]
N = float(constraintsNumber[tid])
DP = wp.vec3(tmpDP[0]/N, tmpDP[1]/N, tmpDP[2]/N)
predictedPositions[tid] = predictedPositions[tid] + DP
dP[tid] = wp.vec3(0.0, 0.0, 0.0)
constraintsNumber[tid] = 0
class PBDBasicGravity:
@staticmethod
def compute(db) -> bool:
timeline = omni.timeline.get_timeline_interface()
device = "cuda"
# # reset on stop
# if (timeline.is_stopped()):
# context.reset()
# initialization
if (timeline.is_playing()):
with wp.ScopedCudaGuard():
gravity = db.inputs.gravity
velocity_dampening = db.inputs.velocity_dampening
ground = db.inputs.ground
kSDistance = db.inputs.ks_distance
kSVolume = db.inputs.ks_volume
# convert node inputs to a GPU array
positions = wp.array(db.inputs.points, dtype=wp.vec3, device=device)
predictedPositions = wp.zeros_like(positions)
velocities = wp.array(db.inputs.velocities, dtype=wp.vec3, device=device)
inverseMasses = wp.array(db.inputs.inverseMasses, dtype=float, device=device)
dP = wp.zeros_like(positions)
constraintsNumber = wp.zeros(len(dP), dtype=int, device=device)
edgesSplit = np.hsplit(db.inputs.edge, 2)
edgesA = wp.array(edgesSplit[0], dtype=int, device=device)
edgesB = wp.array(edgesSplit[1], dtype=int, device=device)
edgesRestLengths = wp.array(db.inputs.edgesRestLengths, dtype=float, device=device)
tetrahedronsSplit = np.hsplit(db.inputs.elem, 4)
tetrahedronsA = wp.array(tetrahedronsSplit[0], dtype=int, device=device)
tetrahedronsB = wp.array(tetrahedronsSplit[1], dtype=int, device=device)
tetrahedronsC = wp.array(tetrahedronsSplit[2], dtype=int, device=device)
tetrahedronsD = wp.array(tetrahedronsSplit[3], dtype=int, device=device)
tetrahedronsRestVolumes = wp.array(db.inputs.tetrahedronsRestVolumes, dtype=float, device=device)
# step simulation
with wp.ScopedTimer("Simulate", active=False):
# simulate
sim_substeps = db.inputs.num_substeps
sim_constraints = db.inputs.sim_constraints
sim_dt = (1.0/30)/sim_substeps
for i in range(sim_substeps):
# simulate
wp.launch(kernel=gravityKer,
dim=len(positions),
inputs=[positions,
predictedPositions,
velocities,
gravity,
velocity_dampening,
sim_dt],
device=device)
for j in range(sim_constraints):
wp.launch(
kernel=volumeConstraints,
dim=len(tetrahedronsA),
inputs=[predictedPositions,
dP,
constraintsNumber,
tetrahedronsA,
tetrahedronsB,
tetrahedronsC,
tetrahedronsD,
tetrahedronsRestVolumes,
inverseMasses,
kSVolume],
device=device)
wp.launch(
kernel=distanceConstraints,
dim=len(edgesA),
inputs=[predictedPositions,
dP,
constraintsNumber,
edgesA,
edgesB,
edgesRestLengths,
inverseMasses,
kSDistance],
device=device)
wp.launch(
kernel=applyConstraints,
dim=len(positions),
inputs=[predictedPositions,
dP,
constraintsNumber],
device=device)
wp.launch(kernel=boundsKer,
dim=len(predictedPositions),
inputs=[predictedPositions,
ground],
device=device)
wp.launch(kernel=PBDStepKer,
dim=len(positions),
inputs=[positions,
predictedPositions,
velocities,
sim_dt],
device=device)
# write node outputs
db.outputs.points = positions.numpy()
db.outputs.velocities = velocities.numpy()
else:
with wp.ScopedTimer("Write", active=False):
# timeline not playing and sim. not yet initialized, just pass through outputs
db.outputs.points = db.inputs.points
db.outputs.velocities = db.inputs.velocities | 11,017 | Python | 36.349152 | 113 | 0.51829 |