file_path
stringlengths 22
162
| content
stringlengths 19
501k
| size
int64 19
501k
| lang
stringclasses 1
value | avg_line_length
float64 6.33
100
| max_line_length
int64 18
935
| alphanum_fraction
float64 0.34
0.93
|
---|---|---|---|---|---|---|
CesiumGS/cesium-omniverse/exts/cesium.omniverse/cesium/omniverse/models/tests/raster_overlay_to_add_test.py | import carb.events
import omni.kit.test
from unittest.mock import MagicMock
from cesium.omniverse.models.raster_overlay_to_add import RasterOverlayToAdd
TILESET_PATH = "/fake/tileset/path"
RASTER_OVERLAY_NAME = "fake_raster_overlay_name"
RASTER_OVERLAY_ION_ASSET_ID = 2
PAYLOAD_DICT = {
"tileset_path": TILESET_PATH,
"raster_overlay_name": RASTER_OVERLAY_NAME,
"raster_overlay_ion_asset_id": RASTER_OVERLAY_ION_ASSET_ID,
}
class RasterOverlayToAddTest(omni.kit.test.AsyncTestCase):
async def setUp(self):
pass
async def tearDown(self):
pass
async def test_convert_raster_overlay_to_add_to_dict(self):
raster_overlay_to_add = RasterOverlayToAdd(
tileset_path=TILESET_PATH,
raster_overlay_ion_asset_id=RASTER_OVERLAY_ION_ASSET_ID,
raster_overlay_name=RASTER_OVERLAY_NAME,
)
result = raster_overlay_to_add.to_dict()
self.assertEqual(result["tileset_path"], TILESET_PATH)
self.assertEqual(result["raster_overlay_name"], RASTER_OVERLAY_NAME)
self.assertEqual(result["raster_overlay_ion_asset_id"], RASTER_OVERLAY_ION_ASSET_ID)
async def test_create_raster_overlay_to_add_from_event(self):
mock_event = MagicMock(spec=carb.events.IEvent)
mock_event.payload = PAYLOAD_DICT
raster_overlay_to_add = RasterOverlayToAdd.from_event(mock_event)
self.assertIsNotNone(raster_overlay_to_add)
self.assertEqual(raster_overlay_to_add.tileset_path, TILESET_PATH)
self.assertEqual(raster_overlay_to_add.raster_overlay_name, RASTER_OVERLAY_NAME)
self.assertEqual(raster_overlay_to_add.raster_overlay_ion_asset_id, RASTER_OVERLAY_ION_ASSET_ID)
async def test_create_raster_overlay_to_add_from_empty_event(self):
mock_event = MagicMock(spec=carb.events.IEvent)
mock_event.payload = None
raster_overlay_to_add = RasterOverlayToAdd.from_event(mock_event)
self.assertIsNone(raster_overlay_to_add)
| 1,998 | Python | 39.795918 | 104 | 0.707708 |
CesiumGS/cesium-omniverse/exts/cesium.omniverse/cesium/omniverse/models/tests/__init__.py | from .asset_to_add_test import AssetToAddTest # noqa: F401
from .raster_overlay_to_add_test import RasterOverlayToAddTest # noqa: F401
| 137 | Python | 44.999985 | 76 | 0.788321 |
CesiumGS/cesium-omniverse/exts/cesium.omniverse/cesium/omniverse/models/tests/asset_to_add_test.py | import carb.events
import omni.kit.test
from unittest.mock import MagicMock
from cesium.omniverse.models.asset_to_add import AssetToAdd
TILESET_NAME = "fake_tileset_name"
TILESET_ION_ASSET_ID = 1
RASTER_OVERLAY_NAME = "fake_raster_overlay_name"
RASTER_OVERLAY_ION_ASSET_ID = 2
PAYLOAD_DICT = {
"tileset_name": TILESET_NAME,
"tileset_ion_asset_id": TILESET_ION_ASSET_ID,
"raster_overlay_name": RASTER_OVERLAY_NAME,
"raster_overlay_ion_asset_id": RASTER_OVERLAY_ION_ASSET_ID,
}
class AssetToAddTest(omni.kit.test.AsyncTestCase):
async def setUp(self):
pass
async def tearDown(self):
pass
async def test_convert_asset_to_add_to_dict(self):
asset_to_add = AssetToAdd(
tileset_name=TILESET_NAME,
tileset_ion_asset_id=TILESET_ION_ASSET_ID,
raster_overlay_name=RASTER_OVERLAY_NAME,
raster_overlay_ion_asset_id=RASTER_OVERLAY_ION_ASSET_ID,
)
result = asset_to_add.to_dict()
self.assertEqual(result["tileset_name"], TILESET_NAME)
self.assertEqual(result["tileset_ion_asset_id"], TILESET_ION_ASSET_ID)
self.assertEqual(result["raster_overlay_name"], RASTER_OVERLAY_NAME)
self.assertEqual(result["raster_overlay_ion_asset_id"], RASTER_OVERLAY_ION_ASSET_ID)
async def test_create_asset_to_add_from_event(self):
mock_event = MagicMock(spec=carb.events.IEvent)
mock_event.payload = PAYLOAD_DICT
asset_to_add = AssetToAdd.from_event(mock_event)
self.assertIsNotNone(asset_to_add)
self.assertEqual(asset_to_add.tileset_name, TILESET_NAME)
self.assertEqual(asset_to_add.tileset_ion_asset_id, TILESET_ION_ASSET_ID)
self.assertEqual(asset_to_add.raster_overlay_name, RASTER_OVERLAY_NAME)
self.assertEqual(asset_to_add.raster_overlay_ion_asset_id, RASTER_OVERLAY_ION_ASSET_ID)
async def test_create_asset_to_add_from_empty_event(self):
mock_event = MagicMock(spec=carb.events.IEvent)
mock_event.payload = None
asset_to_add = AssetToAdd.from_event(mock_event)
self.assertIsNone(asset_to_add)
| 2,131 | Python | 38.481481 | 95 | 0.689817 |
CesiumGS/cesium-omniverse/exts/cesium.omniverse/cesium/omniverse/tests/__init__.py | # For Python testing within Omniverse, it only looks in the `.tests` submodule in whatever is defined
# as an extensions Python module. For organization purposes, we then import all of our tests from our other
# testing submodules.
from .extension_test import * # noqa: F401 F403
from ..models.tests import * # noqa: F401 F403
from ..ui.tests import * # noqa: F401 F403
from ..ui.models.tests import * # noqa: F401 F403
| 424 | Python | 52.124993 | 107 | 0.742925 |
CesiumGS/cesium-omniverse/exts/cesium.omniverse/cesium/omniverse/tests/utils.py | from pathlib import Path
import omni.kit.app
def get_golden_img_dir():
manager = omni.kit.app.get_app().get_extension_manager()
ext_id = manager.get_extension_id_by_module("cesium.omniverse")
return Path(manager.get_extension_path(ext_id)).joinpath("images/tests/ui/pass_fail_widget")
async def wait_for_update(wait_frames=10):
for _ in range(wait_frames):
await omni.kit.app.get_app().next_update_async()
| 434 | Python | 30.071426 | 96 | 0.709677 |
CesiumGS/cesium-omniverse/exts/cesium.omniverse/cesium/omniverse/tests/extension_test.py | import omni.kit.test
import omni.kit.ui_test as ui_test
import omni.usd
import pxr.Usd
import cesium.usd
from typing import Optional
_window_ref: Optional[ui_test.WidgetRef] = None
class ExtensionTest(omni.kit.test.AsyncTestCase):
async def setUp(self):
global _window_ref
# can be removed (or at least decreased) once there is no delay
# required before spawning the cesium window. See:
# https://github.com/CesiumGS/cesium-omniverse/pull/423
await ui_test.wait_n_updates(24)
_window_ref = ui_test.find("Cesium")
async def tearDown(self):
pass
async def test_cesium_window_opens(self):
global _window_ref
self.assertIsNotNone(_window_ref)
async def test_window_docked(self):
global _window_ref
# docked is false if the window is not in focus,
# as may be the case if other extensions are loaded
await _window_ref.focus()
self.assertTrue(_window_ref.window.docked)
async def test_blank_tileset(self):
global _window_ref
blankTilesetButton = _window_ref.find("**/Button[*].text=='Blank 3D Tiles Tileset'")
self.assertIsNotNone(blankTilesetButton)
stage: pxr.Usd.Stage = omni.usd.get_context().get_stage()
self.assertIsNotNone(stage)
self.assertFalse(any([i.IsA(cesium.usd.plugins.CesiumUsdSchemas.Tileset) for i in stage.Traverse()]))
await blankTilesetButton.click()
await ui_test.wait_n_updates(2) # passes without, but seems prudent
self.assertTrue(any([i.IsA(cesium.usd.plugins.CesiumUsdSchemas.Tileset) for i in stage.Traverse()]))
| 1,657 | Python | 30.283018 | 109 | 0.67411 |
CesiumGS/cesium-omniverse/exts/cesium.omniverse/cesium/omniverse/utils/custom_fields.py | import omni.ui as ui
READ_ONLY_STYLE = {"color": ui.color("#888888")}
def string_field_with_label(label_text, model=None, enabled=True):
with ui.HStack(spacing=4, height=20):
ui.Label(label_text, height=20, width=100)
field = ui.StringField(height=20, enabled=enabled)
if not enabled:
field.style = READ_ONLY_STYLE
if model:
field.model = model
return field
def int_field_with_label(label_text, model=None, enabled=True):
with ui.HStack(spacing=4, height=20):
ui.Label(label_text, height=20, width=100)
field = ui.IntField(height=20, enabled=enabled)
if not enabled:
field.style = READ_ONLY_STYLE
if model:
field.model = model
return field
def float_field_with_label(label_text, model=None, enabled=True):
with ui.HStack(spacing=4, height=20):
ui.Label(label_text, height=20, width=100)
field = ui.FloatField(height=20, enabled=enabled, precision=7)
if not enabled:
field.style = READ_ONLY_STYLE
if model:
field.model = model
return field
| 1,150 | Python | 30.108107 | 70 | 0.616522 |
CesiumGS/cesium-omniverse/exts/cesium.omniverse/cesium/omniverse/utils/utils.py | from typing import Optional, Callable
import omni.usd
import omni.kit
import omni.ui as ui
async def wait_n_frames(n: int) -> None:
for i in range(0, n):
await omni.kit.app.get_app().next_update_async()
async def dock_window_async(
window: Optional[ui.Window], target: str = "Stage", position: ui.DockPosition = ui.DockPosition.SAME
) -> None:
if window is None:
return
# Wait five frame
await wait_n_frames(5)
stage_window = ui.Workspace.get_window(target)
window.dock_in(stage_window, position, 1)
window.focus()
async def perform_action_after_n_frames_async(n: int, action: Callable[[], None]) -> None:
await wait_n_frames(n)
action()
def str_is_empty_or_none(s: Optional[str]) -> bool:
if s is None:
return True
if s == "":
return True
return False
| 848 | Python | 21.342105 | 104 | 0.646226 |
CesiumGS/cesium-omniverse/exts/cesium.omniverse/cesium/omniverse/utils/cesium_interface.py | from ..bindings import acquire_cesium_omniverse_interface
class CesiumInterfaceManager:
def __init__(self):
# Acquires the interface. Is a singleton.
self.interface = acquire_cesium_omniverse_interface()
def __enter__(self):
return self.interface
def __exit__(self, exc_type, exc_val, exc_tb):
# We release the interface when we pull down the plugin.
pass
| 412 | Python | 26.533332 | 64 | 0.657767 |
CesiumGS/cesium-omniverse/apps/exts/cesium.performance.app/cesium/performance/app/extension.py | from functools import partial
import asyncio
import time
from typing import Callable, List, Optional
import logging
import carb.events
import omni.ext
import omni.ui as ui
import omni.usd
import omni.kit.app as app
import omni.kit.ui
from omni.kit.viewport.utility import get_active_viewport
from pxr import Gf, Sdf, UsdGeom
from .performance_window import CesiumPerformanceWindow
from .fps_sampler import FpsSampler
from cesium.omniverse.bindings import acquire_cesium_omniverse_interface, release_cesium_omniverse_interface
from cesium.omniverse.utils import wait_n_frames, dock_window_async
from cesium.usd.plugins.CesiumUsdSchemas import (
Data as CesiumData,
Georeference as CesiumGeoreference,
IonRasterOverlay as CesiumIonRasterOverlay,
Tileset as CesiumTileset,
Tokens as CesiumTokens,
)
ION_ACCESS_TOKEN = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJqdGkiOiI0Y2ZjNzY3NC04MWIyLTQyN2ItODg3Zi0zYzk3MmQxZWYxMmIiLCJpZCI6MjU5LCJpYXQiOjE3MTE5NzkyNzl9.GuvRiyuJO14zjA5_mIwgocOShmF4EUj2xbmikcCeXxs" # noqa: E501
GOOGLE_3D_TILES_ION_ID = 2275207
CESIUM_DATA_PRIM_PATH = "/Cesium"
CESIUM_GEOREFERENCE_PRIM_PATH = "/CesiumGeoreference"
CESIUM_CAMERA_PATH = "/Camera"
class CesiumPerformanceExtension(omni.ext.IExt):
def __init__(self):
super().__init__()
self._logger = logging.getLogger(__name__)
self._performance_window: Optional[CesiumPerformanceWindow] = None
self._view_new_york_city_subscription: Optional[carb.events.ISubscription] = None
self._view_paris_subscription: Optional[carb.events.ISubscription] = None
self._view_grand_canyon_subscription: Optional[carb.events.ISubscription] = None
self._view_tour_subscription: Optional[carb.events.ISubscription] = None
self._view_new_york_city_google_subscription: Optional[carb.events.ISubscription] = None
self._view_paris_google_subscription: Optional[carb.events.ISubscription] = None
self._view_grand_canyon_google_subscription: Optional[carb.events.ISubscription] = None
self._view_tour_google_subscription: Optional[carb.events.ISubscription] = None
self._stop_subscription: Optional[carb.events.ISubscription] = None
self._on_stage_subscription: Optional[carb.events.ISubscription] = None
self._update_frame_subscription: Optional[carb.events.ISubscription] = None
self._tileset_loaded_subscription: Optional[carb.events.ISubscription] = None
self._camera_path: Optional[str] = None
self._tileset_path: Optional[str] = None
self._active: bool = False
self._start_time: float = 0.0
self._fps_sampler: FpsSampler = FpsSampler()
def on_startup(self):
global _cesium_omniverse_interface
_cesium_omniverse_interface = acquire_cesium_omniverse_interface()
self._setup_menus()
self._show_and_dock_startup_windows()
bus = app.get_app().get_message_bus_event_stream()
view_new_york_city_event = carb.events.type_from_string("cesium.performance.VIEW_NEW_YORK_CITY")
self._view_new_york_city_subscription = bus.create_subscription_to_pop_by_type(
view_new_york_city_event, self._view_new_york_city
)
view_paris_event = carb.events.type_from_string("cesium.performance.VIEW_PARIS")
self._view_paris_subscription = bus.create_subscription_to_pop_by_type(view_paris_event, self._view_paris)
view_grand_canyon_event = carb.events.type_from_string("cesium.performance.VIEW_GRAND_CANYON")
self._view_grand_canyon_subscription = bus.create_subscription_to_pop_by_type(
view_grand_canyon_event, self._view_grand_canyon
)
view_tour_event = carb.events.type_from_string("cesium.performance.VIEW_TOUR")
self._view_tour_subscription = bus.create_subscription_to_pop_by_type(view_tour_event, self._view_tour)
view_new_york_city_google_event = carb.events.type_from_string("cesium.performance.VIEW_NEW_YORK_CITY_GOOGLE")
self._view_new_york_city_google_subscription = bus.create_subscription_to_pop_by_type(
view_new_york_city_google_event, self._view_new_york_city_google
)
view_paris_google_event = carb.events.type_from_string("cesium.performance.VIEW_PARIS_GOOGLE")
self._view_paris_google_subscription = bus.create_subscription_to_pop_by_type(
view_paris_google_event, self._view_paris_google
)
view_grand_canyon_google_event = carb.events.type_from_string("cesium.performance.VIEW_GRAND_CANYON_GOOGLE")
self._view_grand_canyon_google_subscription = bus.create_subscription_to_pop_by_type(
view_grand_canyon_google_event, self._view_grand_canyon_google
)
view_tour_google_event = carb.events.type_from_string("cesium.performance.VIEW_TOUR_GOOGLE")
self._view_tour_google_subscription = bus.create_subscription_to_pop_by_type(
view_tour_google_event, self._view_tour_google
)
stop_event = carb.events.type_from_string("cesium.performance.STOP")
self._stop_subscription = bus.create_subscription_to_pop_by_type(stop_event, self._on_stop)
usd_context = omni.usd.get_context()
if usd_context.get_stage_state() == omni.usd.StageState.OPENED:
self._on_stage_opened()
self._on_stage_subscription = usd_context.get_stage_event_stream().create_subscription_to_pop(
self._on_stage_event, name="cesium.performance.ON_STAGE_EVENT"
)
update_stream = app.get_app().get_update_event_stream()
self._update_frame_subscription = update_stream.create_subscription_to_pop(
self._on_update_frame, name="cesium.performance.ON_UPDATE_FRAME"
)
def on_shutdown(self):
self._clear_scene()
if self._view_new_york_city_subscription is not None:
self._view_new_york_city_subscription.unsubscribe()
self._view_new_york_city_subscription = None
if self._view_paris_subscription is not None:
self._view_paris_subscription.unsubscribe()
self._view_paris_subscription = None
if self._view_grand_canyon_subscription is not None:
self._view_grand_canyon_subscription.unsubscribe()
self._view_grand_canyon_subscription = None
if self._view_tour_subscription is not None:
self._view_tour_subscription.unsubscribe()
self._view_tour_subscription = None
if self._view_new_york_city_google_subscription is not None:
self._view_new_york_city_google_subscription.unsubscribe()
self._view_new_york_city_google_subscription = None
if self._view_paris_google_subscription is not None:
self._view_paris_google_subscription.unsubscribe()
self._view_paris_google_subscription = None
if self._view_grand_canyon_google_subscription is not None:
self._view_grand_canyon_google_subscription.unsubscribe()
self._view_grand_canyon_google_subscription = None
if self._view_tour_google_subscription is not None:
self._view_tour_google_subscription.unsubscribe()
self._view_tour_google_subscription = None
if self._stop_subscription is not None:
self._stop_subscription.unsubscribe()
self._stop_subscription = None
if self._on_stage_subscription is not None:
self._on_stage_subscription.unsubscribe()
self._on_stage_subscription = None
if self._update_frame_subscription is not None:
self._update_frame_subscription.unsubscribe()
self._update_frame_subscription = None
self._fps_sampler.destroy()
self._destroy_performance_window()
release_cesium_omniverse_interface(_cesium_omniverse_interface)
def _setup_menus(self):
ui.Workspace.set_show_window_fn(
CesiumPerformanceWindow.WINDOW_NAME, partial(self._show_performance_window, None)
)
editor_menu = omni.kit.ui.get_editor_menu()
if editor_menu:
editor_menu.add_item(
CesiumPerformanceWindow.MENU_PATH, self._show_performance_window, toggle=True, value=True
)
def _show_and_dock_startup_windows(self):
ui.Workspace.show_window(CesiumPerformanceWindow.WINDOW_NAME)
asyncio.ensure_future(dock_window_async(self._performance_window, target="Property"))
def _destroy_performance_window(self):
if self._performance_window is not None:
self._performance_window.destroy()
self._performance_window = None
async def _destroy_window_async(self, path):
# Wait one frame, this is due to the one frame defer in Window::_moveToMainOSWindow()
await wait_n_frames(1)
if path is CesiumPerformanceWindow.MENU_PATH:
self._destroy_performance_window()
def _visibility_changed_fn(self, path, visible):
editor_menu = omni.kit.ui.get_editor_menu()
if editor_menu:
editor_menu.set_value(path, visible)
if not visible:
asyncio.ensure_future(self._destroy_window_async(path))
def _show_performance_window(self, _menu, value):
if value:
self._performance_window = CesiumPerformanceWindow(_cesium_omniverse_interface, width=300, height=400)
self._performance_window.set_visibility_changed_fn(
partial(self._visibility_changed_fn, CesiumPerformanceWindow.MENU_PATH)
)
elif self._performance_window is not None:
self._performance_window.visible = False
def _on_update_frame(self, _e: carb.events.IEvent):
if self._active is True:
duration = self._get_duration()
self._update_duration_ui(duration)
self._update_fps_ui(self._fps_sampler.get_fps())
def _on_stage_event(self, _e: carb.events.IEvent):
usd_context = omni.usd.get_context()
if usd_context.get_stage_state() == omni.usd.StageState.OPENED:
self._on_stage_opened()
def _on_stage_opened(self):
self._camera_path = self._create_camera(CESIUM_CAMERA_PATH)
@staticmethod
def _create_tileset_ion(path: str, asset_id: int, access_token: str) -> str:
stage = omni.usd.get_context().get_stage()
tileset_path = omni.usd.get_stage_next_free_path(stage, path, False)
tileset = CesiumTileset.Define(stage, tileset_path)
assert tileset.GetPrim().IsValid()
tileset.GetIonAssetIdAttr().Set(asset_id)
tileset.GetIonAccessTokenAttr().Set(access_token)
tileset.GetSourceTypeAttr().Set(CesiumTokens.ion)
return tileset_path
@staticmethod
def _create_tileset_google() -> str:
stage = omni.usd.get_context().get_stage()
tileset_path = omni.usd.get_stage_next_free_path(stage, "/Google_3D_Tiles", False)
tileset = CesiumTileset.Define(stage, tileset_path)
tileset.GetIonAssetIdAttr().Set(GOOGLE_3D_TILES_ION_ID)
tileset.GetIonAccessTokenAttr().Set(ION_ACCESS_TOKEN)
tileset.GetSourceTypeAttr().Set(CesiumTokens.ion)
return tileset_path
@staticmethod
def _create_raster_overlay_ion(path: str, asset_id: int, access_token: str) -> str:
stage = omni.usd.get_context().get_stage()
raster_overlay_path = omni.usd.get_stage_next_free_path(stage, path, False)
raster_overlay = CesiumIonRasterOverlay.Define(stage, raster_overlay_path)
assert raster_overlay.GetPrim().IsValid()
parent = raster_overlay.GetPrim().GetParent()
assert parent.IsA(CesiumTileset)
tileset_prim = CesiumTileset.Get(stage, parent.GetPath())
tileset_prim.GetRasterOverlayBindingRel().AddTarget(raster_overlay_path)
raster_overlay.GetIonAssetIdAttr().Set(asset_id)
raster_overlay.GetIonAccessTokenAttr().Set(access_token)
return raster_overlay_path
@staticmethod
def _create_camera(path: str) -> str:
stage = omni.usd.get_context().get_stage()
if stage.GetPrimAtPath(path):
return path
camera = UsdGeom.Camera.Define(stage, path)
assert camera.GetPrim().IsValid()
camera.GetClippingRangeAttr().Set(Gf.Vec2f(1.0, 100000000.0))
return path
@staticmethod
def _get_raster_overlay_path(tileset_path: str, raster_overlay_name: str) -> str:
return Sdf.Path(tileset_path).AppendPath(raster_overlay_name).pathString
@staticmethod
def _set_georeference(longitude: float, latitude: float, height: float):
stage = omni.usd.get_context().get_stage()
cesium_georeference = CesiumGeoreference.Get(stage, CESIUM_GEOREFERENCE_PRIM_PATH)
assert cesium_georeference.GetPrim().IsValid()
cesium_georeference.GetGeoreferenceOriginLongitudeAttr().Set(longitude)
cesium_georeference.GetGeoreferenceOriginLatitudeAttr().Set(latitude)
cesium_georeference.GetGeoreferenceOriginHeightAttr().Set(height)
def _set_camera(
self,
translate: Gf.Vec3d,
rotate: Gf.Vec3f,
focal_length: float,
horizontal_aperture: float,
vertical_aperture: float,
):
stage = omni.usd.get_context().get_stage()
viewport = get_active_viewport()
viewport.set_active_camera(self._camera_path)
camera = UsdGeom.Camera.Get(stage, self._camera_path)
camera.GetFocalLengthAttr().Set(focal_length)
camera.GetHorizontalApertureAttr().Set(horizontal_aperture)
camera.GetVerticalApertureAttr().Set(vertical_aperture)
xform_common_api = UsdGeom.XformCommonAPI(camera.GetPrim())
xform_common_api.SetTranslate(translate)
xform_common_api.SetRotate(rotate, UsdGeom.XformCommonAPI.RotationOrderYXZ)
@staticmethod
def _get_tileset(path: str) -> CesiumTileset:
stage = omni.usd.get_context().get_stage()
tileset = CesiumTileset.Get(stage, path)
assert tileset.GetPrim().IsValid()
return tileset
@staticmethod
def _get_cesium_data() -> CesiumData:
stage = omni.usd.get_context().get_stage()
cesium_data = CesiumData.Get(stage, CESIUM_DATA_PRIM_PATH)
assert cesium_data.GetPrim().IsValid()
return cesium_data
@staticmethod
def _remove_prim(path: str):
stage = omni.usd.get_context().get_stage()
stage.RemovePrim(path)
def _setup_location_new_york_city(self):
self._set_georeference(-74.0060, 40.7128, 50.0)
self._set_camera(
Gf.Vec3d(-176516.8372437113, 33877.019622553846, 197777.19771945066),
Gf.Vec3f(-7.9392824, -37.71652, -6.0970836),
18.14756,
20.955,
15.2908,
)
def _setup_location_paris(self):
self._set_georeference(2.3522, 48.8566, 100.0)
self._set_camera(
Gf.Vec3d(-285275.1368718885, 780.3607448845705, 35392.91845506678),
Gf.Vec3f(0.46399376, 65.245544, -1.0061567),
18.14756,
20.955,
15.2908,
)
def _setup_location_grand_canyon(self):
self._set_georeference(-112.3535, 36.2679, 2100.0)
self._set_camera(
Gf.Vec3d(-339866.7567928189, 27967.440239271935, -59650.894693908194),
Gf.Vec3f(5.532731, -129.35608, -6.704948),
18.14756,
20.955,
15.2908,
)
def _view_new_york_city(self, _e: carb.events.IEvent):
self._logger.warning("View New York City")
self._clear_scene()
self._setup_location_new_york_city()
tileset_path = self._create_tileset_ion("/Cesium_World_Terrain", 1, ION_ACCESS_TOKEN)
self._create_raster_overlay_ion(
self._get_raster_overlay_path(tileset_path, "Bing_Maps_Aerial_Imagery"),
2,
ION_ACCESS_TOKEN,
)
self._load_tileset(tileset_path, self._tileset_loaded)
def _view_paris(self, _e: carb.events.IEvent):
self._logger.warning("View Paris")
self._clear_scene()
self._setup_location_paris()
tileset_path = self._create_tileset_ion("/Cesium_World_Terrain", 1, ION_ACCESS_TOKEN)
self._create_raster_overlay_ion(
self._get_raster_overlay_path(tileset_path, "Bing_Maps_Aerial_Imagery"),
2,
ION_ACCESS_TOKEN,
)
self._load_tileset(tileset_path, self._tileset_loaded)
def _view_grand_canyon(self, _e: carb.events.IEvent):
self._logger.warning("View Grand Canyon")
self._clear_scene()
self._setup_location_grand_canyon()
tileset_path = self._create_tileset_ion("/Cesium_World_Terrain", 1, ION_ACCESS_TOKEN)
self._create_raster_overlay_ion(
self._get_raster_overlay_path(tileset_path, "Bing_Maps_Aerial_Imagery"),
2,
ION_ACCESS_TOKEN,
)
self._load_tileset(tileset_path, self._tileset_loaded)
def _view_tour(self, _e: carb.events.IEvent):
self._logger.warning("View Tour")
self._clear_scene()
tileset_path = self._create_tileset_ion("/Cesium_World_Terrain", 1, ION_ACCESS_TOKEN)
self._create_raster_overlay_ion(
self._get_raster_overlay_path(tileset_path, "Bing_Maps_Aerial_Imagery"),
2,
ION_ACCESS_TOKEN,
)
def tour_stop_0():
self._setup_location_new_york_city()
def tour_stop_1():
self._setup_location_paris()
def tour_stop_2():
self._setup_location_grand_canyon()
tour = Tour(self, [tour_stop_0, tour_stop_1, tour_stop_2], self._tileset_loaded)
self._load_tileset(tileset_path, tour.tour_stop_loaded)
def _view_new_york_city_google(self, _e: carb.events.IEvent):
self._logger.warning("View New York City Google")
self._clear_scene()
self._setup_location_new_york_city()
tileset_path = self._create_tileset_google()
self._load_tileset(tileset_path, self._tileset_loaded)
def _view_paris_google(self, _e: carb.events.IEvent):
self._logger.warning("View Paris Google")
self._clear_scene()
self._setup_location_paris()
tileset_path = self._create_tileset_google()
self._load_tileset(tileset_path, self._tileset_loaded)
def _view_grand_canyon_google(self, _e: carb.events.IEvent):
self._logger.warning("View Grand Canyon Google")
self._clear_scene()
self._setup_location_grand_canyon()
tileset_path = self._create_tileset_google()
self._load_tileset(tileset_path, self._tileset_loaded)
def _view_tour_google(self, _e: carb.events.IEvent):
self._logger.warning("View Tour Google")
self._clear_scene()
tileset_path = self._create_tileset_google()
def tour_stop_0():
self._setup_location_new_york_city()
def tour_stop_1():
self._setup_location_paris()
def tour_stop_2():
self._setup_location_grand_canyon()
tour = Tour(self, [tour_stop_0, tour_stop_1, tour_stop_2], self._tileset_loaded)
self._load_tileset(tileset_path, tour.tour_stop_loaded)
def _load_tileset(self, tileset_path: str, tileset_loaded: Callable):
tileset = self._get_tileset(tileset_path)
cesium_data = self._get_cesium_data()
assert self._performance_window is not None
bus = app.get_app().get_message_bus_event_stream()
tileset_loaded_event = carb.events.type_from_string("cesium.omniverse.TILESET_LOADED")
self._tileset_loaded_subscription = bus.create_subscription_to_pop_by_type(
tileset_loaded_event, tileset_loaded
)
random_colors = self._performance_window.get_random_colors()
forbid_holes = self._performance_window.get_forbid_holes()
frustum_culling = self._performance_window.get_frustum_culling()
main_thread_loading_time_limit = self._performance_window.get_main_thread_loading_time_limit_model()
cesium_data.GetDebugRandomColorsAttr().Set(random_colors)
tileset.GetForbidHolesAttr().Set(forbid_holes)
tileset.GetEnableFrustumCullingAttr().Set(frustum_culling)
tileset.GetMainThreadLoadingTimeLimitAttr().Set(main_thread_loading_time_limit)
self._tileset_path = tileset_path
self._active = True
self._start_time = time.time()
self._fps_sampler.start()
def _tileset_loaded(self, _e: carb.events.IEvent):
self._stop()
duration = self._get_duration()
self._update_duration_ui(duration)
self._update_fps_mean_ui(self._fps_sampler.get_mean())
self._update_fps_median_ui(self._fps_sampler.get_median())
self._update_fps_low_ui(self._fps_sampler.get_low())
self._update_fps_high_ui(self._fps_sampler.get_high())
self._logger.warning("Loaded in {} seconds".format(duration))
def _get_duration(self) -> float:
current_time = time.time()
duration = current_time - self._start_time
return duration
def _update_duration_ui(self, value: float):
if self._performance_window is not None:
self._performance_window.set_duration(value)
def _update_fps_ui(self, value: float):
if self._performance_window is not None:
self._performance_window.set_fps(value)
def _update_fps_mean_ui(self, value: float):
if self._performance_window is not None:
self._performance_window.set_fps_mean(value)
def _update_fps_median_ui(self, value: float):
if self._performance_window is not None:
self._performance_window.set_fps_median(value)
def _update_fps_low_ui(self, value: float):
if self._performance_window is not None:
self._performance_window.set_fps_low(value)
def _update_fps_high_ui(self, value: float):
if self._performance_window is not None:
self._performance_window.set_fps_high(value)
def _clear_scene(self):
self._stop()
self._update_duration_ui(0.0)
self._update_fps_ui(0.0)
self._update_fps_mean_ui(0.0)
self._update_fps_median_ui(0.0)
self._update_fps_low_ui(0.0)
self._update_fps_high_ui(0.0)
if self._tileset_path is not None:
self._remove_prim(self._tileset_path)
def _on_stop(self, _e: carb.events.IEvent):
self._stop()
def _stop(self):
self._active = False
self._fps_sampler.stop()
if self._tileset_loaded_subscription is not None:
self._tileset_loaded_subscription.unsubscribe()
self._tileset_loaded_subscription = None
class Tour:
def __init__(self, ext: CesiumPerformanceExtension, tour_stops: List[Callable], tour_complete: Callable):
self._ext: CesiumPerformanceExtension = ext
self._tour_stops: List[Callable] = tour_stops
self._tour_complete: Callable = tour_complete
self._current_stop: int = 0
self._duration: float = 0.0
assert len(tour_stops) > 0
tour_stops[0]()
def tour_stop_loaded(self, _e: carb.events.IEvent):
duration = self._ext._get_duration()
current_duration = duration - self._duration
self._duration = duration
self._ext._logger.warning("Tour stop {} loaded in {} seconds".format(self._current_stop, current_duration))
if self._current_stop == len(self._tour_stops) - 1:
self._tour_complete(_e)
else:
self._current_stop += 1
self._tour_stops[self._current_stop]()
| 23,749 | Python | 39.667808 | 212 | 0.652659 |
CesiumGS/cesium-omniverse/apps/exts/cesium.performance.app/cesium/performance/app/performance_window.py | import logging
import carb.events
import omni.kit.app as app
import omni.ui as ui
from cesium.omniverse.bindings import ICesiumOmniverseInterface
RANDOM_COLORS_TEXT = "Random colors"
FORBID_HOLES_TEXT = "Forbid holes"
FRUSTUM_CULLING_TEXT = "Frustum culling"
TRACING_ENABLED_TEXT = "Tracing enabled"
MAIN_THREAD_LOADING_TIME_LIMIT_TEXT = "Main thread loading time limit (ms)"
NEW_YORK_CITY_TEXT = "New York City"
PARIS_TEXT = "Paris"
GRAND_CANYON_TEXT = "Grand Canyon"
TOUR_TEXT = "Tour"
NEW_YORK_CITY_GOOGLE_TEXT = "New York City (Google)"
PARIS_GOOGLE_TEXT = "Paris (Google)"
GRAND_CANYON_GOOGLE_TEXT = "Grand Canyon (Google)"
TOUR_GOOGLE_TEXT = "Tour (Google)"
DURATION_TEXT = "Duration (seconds)"
FPS_TEXT = "FPS"
FPS_MEAN_TEXT = "FPS (mean)"
FPS_MEDIAN_TEXT = "FPS (median)"
FPS_LOW_TEXT = "FPS (low)"
FPS_HIGH_TEXT = "FPS (high)"
class CesiumPerformanceWindow(ui.Window):
WINDOW_NAME = "Cesium Performance Testing"
MENU_PATH = f"Window/Cesium/{WINDOW_NAME}"
def __init__(self, cesium_omniverse_interface: ICesiumOmniverseInterface, **kwargs):
super().__init__(CesiumPerformanceWindow.WINDOW_NAME, **kwargs)
self._cesium_omniverse_interface = cesium_omniverse_interface
self._logger = logging.getLogger(__name__)
self._random_colors_checkbox_model = ui.SimpleBoolModel(False)
self._forbid_holes_checkbox_model = ui.SimpleBoolModel(False)
self._frustum_culling_checkbox_model = ui.SimpleBoolModel(True)
self._main_thread_loading_time_limit_model = ui.SimpleFloatModel(0.0)
self._duration_model = ui.SimpleFloatModel(0.0)
self._fps_model = ui.SimpleFloatModel(0.0)
self._fps_mean_model = ui.SimpleFloatModel(0.0)
self._fps_median_model = ui.SimpleFloatModel(0.0)
self._fps_low_model = ui.SimpleFloatModel(0.0)
self._fps_high_model = ui.SimpleFloatModel(0.0)
self.frame.set_build_fn(self._build_fn)
def destroy(self) -> None:
super().destroy()
def _build_fn(self):
with ui.VStack(spacing=10):
with ui.VStack(spacing=4):
with ui.HStack(height=16):
ui.Label("Options", height=0)
ui.Spacer()
for label, model in [
(RANDOM_COLORS_TEXT, self._random_colors_checkbox_model),
(FORBID_HOLES_TEXT, self._forbid_holes_checkbox_model),
(FRUSTUM_CULLING_TEXT, self._frustum_culling_checkbox_model),
]:
with ui.HStack(height=0):
ui.Label(label, height=0)
ui.CheckBox(model)
with ui.HStack(height=0):
ui.Label(MAIN_THREAD_LOADING_TIME_LIMIT_TEXT, height=0)
ui.StringField(self._main_thread_loading_time_limit_model)
with ui.HStack(height=16):
tracing_label = ui.Label(TRACING_ENABLED_TEXT, height=0)
tracing_label.set_tooltip(
"Enabled when the project is configured with -D CESIUM_OMNI_ENABLE_TRACING=ON"
)
enabled_string = "ON" if self._cesium_omniverse_interface.is_tracing_enabled() else "OFF"
ui.Label(enabled_string, height=0)
with ui.VStack(spacing=0):
ui.Label("Scenarios", height=16)
for label, callback in [
(NEW_YORK_CITY_TEXT, self._view_new_york_city),
(PARIS_TEXT, self._view_paris),
(GRAND_CANYON_TEXT, self._view_grand_canyon),
(TOUR_TEXT, self._view_tour),
(NEW_YORK_CITY_GOOGLE_TEXT, self._view_new_york_city_google),
(PARIS_GOOGLE_TEXT, self._view_paris_google),
(GRAND_CANYON_GOOGLE_TEXT, self._view_grand_canyon_google),
(TOUR_GOOGLE_TEXT, self._view_tour_google),
]:
ui.Button(label, height=20, clicked_fn=callback)
with ui.VStack(spacing=4):
with ui.HStack(height=16):
ui.Label("Stats", height=0)
ui.Spacer()
for label, model in [
(DURATION_TEXT, self._duration_model),
(FPS_TEXT, self._fps_model),
(FPS_MEAN_TEXT, self._fps_mean_model),
(FPS_MEDIAN_TEXT, self._fps_median_model),
(FPS_LOW_TEXT, self._fps_low_model),
(FPS_HIGH_TEXT, self._fps_high_model),
]:
with ui.HStack(height=0):
ui.Label(label, height=0)
ui.StringField(model=model, height=0, read_only=True)
with ui.VStack(spacing=0):
ui.Button("Stop", height=16, clicked_fn=self._stop)
def _view_new_york_city(self):
bus = app.get_app().get_message_bus_event_stream()
view_new_york_city_event = carb.events.type_from_string("cesium.performance.VIEW_NEW_YORK_CITY")
bus.push(view_new_york_city_event)
def _view_paris(self):
bus = app.get_app().get_message_bus_event_stream()
view_paris_event = carb.events.type_from_string("cesium.performance.VIEW_PARIS")
bus.push(view_paris_event)
def _view_grand_canyon(self):
bus = app.get_app().get_message_bus_event_stream()
view_grand_canyon_event = carb.events.type_from_string("cesium.performance.VIEW_GRAND_CANYON")
bus.push(view_grand_canyon_event)
def _view_tour(self):
bus = app.get_app().get_message_bus_event_stream()
view_tour_event = carb.events.type_from_string("cesium.performance.VIEW_TOUR")
bus.push(view_tour_event)
def _view_new_york_city_google(self):
bus = app.get_app().get_message_bus_event_stream()
view_new_york_city_google_event = carb.events.type_from_string("cesium.performance.VIEW_NEW_YORK_CITY_GOOGLE")
bus.push(view_new_york_city_google_event)
def _view_paris_google(self):
bus = app.get_app().get_message_bus_event_stream()
view_paris_google_event = carb.events.type_from_string("cesium.performance.VIEW_PARIS_GOOGLE")
bus.push(view_paris_google_event)
def _view_grand_canyon_google(self):
bus = app.get_app().get_message_bus_event_stream()
view_grand_canyon_google_event = carb.events.type_from_string("cesium.performance.VIEW_GRAND_CANYON_GOOGLE")
bus.push(view_grand_canyon_google_event)
def _view_tour_google(self):
bus = app.get_app().get_message_bus_event_stream()
view_tour_google_event = carb.events.type_from_string("cesium.performance.VIEW_TOUR_GOOGLE")
bus.push(view_tour_google_event)
def _stop(self):
bus = app.get_app().get_message_bus_event_stream()
stop_event = carb.events.type_from_string("cesium.performance.STOP")
bus.push(stop_event)
def get_random_colors(self) -> bool:
return self._random_colors_checkbox_model.get_value_as_bool()
def get_forbid_holes(self) -> bool:
return self._forbid_holes_checkbox_model.get_value_as_bool()
def get_frustum_culling(self) -> bool:
return self._frustum_culling_checkbox_model.get_value_as_bool()
def get_main_thread_loading_time_limit_model(self) -> float:
return self._main_thread_loading_time_limit_model.get_value_as_float()
def set_duration(self, value: float):
self._duration_model.set_value(value)
def set_fps(self, value: float):
self._fps_model.set_value(value)
def set_fps_mean(self, value: float):
self._fps_mean_model.set_value(value)
def set_fps_median(self, value: float):
self._fps_median_model.set_value(value)
def set_fps_low(self, value: float):
self._fps_low_model.set_value(value)
def set_fps_high(self, value: float):
self._fps_high_model.set_value(value)
| 8,013 | Python | 40.097436 | 118 | 0.606639 |
CesiumGS/cesium-omniverse/apps/exts/cesium.performance.app/cesium/performance/app/fps_sampler.py | import array
import time
import carb.events
import omni.kit.app as app
import statistics
from omni.kit.viewport.utility import get_active_viewport
FREQUENCY_IN_SECONDS: float = 0.025
class FpsSampler:
def __init__(
self,
):
self._last_time: float = 0.0
self._active: bool = False
self._fps = 0.0
self._samples = array.array("f")
self._median: float = 0.0
self._mean: float = 0.0
self._low: float = 0.0
self._high: float = 0.0
self._viewport = get_active_viewport()
update_stream = app.get_app().get_update_event_stream()
self._update_frame_subscription = update_stream.create_subscription_to_pop(
self._on_update_frame, name="cesium.performance.ON_UPDATE_FRAME"
)
def __del__(self):
self.destroy()
def destroy(self):
if self._update_frame_subscription is not None:
self._update_frame_subscription.unsubscribe()
self._update_frame_subscription = None
def start(self):
self._last_time = time.time()
self._active = True
def stop(self):
self._active = False
if len(self._samples) > 0:
self._mean = statistics.mean(self._samples)
self._median = statistics.median(self._samples)
self._low = min(self._samples)
self._high = max(self._samples)
self._samples = array.array("f")
def get_mean(self):
assert not self._active
return self._mean
def get_median(self):
assert not self._active
return self._median
def get_low(self):
assert not self._active
return self._low
def get_high(self):
assert not self._active
return self._high
def get_fps(self):
assert self._active
return self._fps
def _on_update_frame(self, _e: carb.events.IEvent):
if not self._active:
return
current_time = time.time()
elapsed = current_time - self._last_time
if elapsed > FREQUENCY_IN_SECONDS:
fps = self._viewport.fps
self._samples.append(fps)
self._last_time = current_time
self._fps = fps
| 2,243 | Python | 25.093023 | 83 | 0.576014 |
CesiumGS/cesium-omniverse/scripts/copy_to_exts.py | """
This file is a post build step run by cmake that copies over the CHANGES.md and related resources to
the exts/docs folder for packaging.
"""
import re
import shutil
from dataclasses import dataclass
from pathlib import Path
from typing import List
@dataclass
class PathPair:
"""
Represents a source and relative destination pair.
:arg source: The source path for the file.
:arg relative_destination: The relative destination for the file.
"""
source: Path
relative_destination: str = ""
def find_resources(path: Path) -> List[PathPair]:
"""
Finds all resources within a file and returns them as a list of PathPairs. The search is done using a regular
expression looking for all links that contain the substring "docs/resources".
NOTE: This **only** works with relative paths. Absolute paths in the file read will fail.
:param path: The file to search.
:return: A list containing PathPairs of all resources found in the file.
"""
regex = re.compile(r"!\[.*]\((.*docs/(resources.*?))\)")
root_path = path.parent.resolve()
resources: List[PathPair] = []
with open(path.resolve(), "r") as f:
for line in f.readlines():
match = regex.search(line)
if match is not None:
source = root_path.joinpath(match.group(1))
relative_destination = match.group(2)
resources.append(PathPair(source, relative_destination))
return resources
def copy_to_destination(pair: PathPair, destination: Path) -> None:
"""
Copies the file based on the path and relative destination contained in the pair.
NOTE: This uses shutils so if you're on a version of Python older than 3.8 this will be slow.
:param pair: The PathPair for the copy operation.
:param destination: The path of the destination directory.
"""
true_destination = (
destination.joinpath(pair.relative_destination) if pair.relative_destination != "" else destination
)
# In the event that true_destination isn't a direct file path, we need to take the source filename and append it
# to true_destination.
if true_destination.is_dir():
true_destination = true_destination.joinpath(pair.source.name)
true_destination.parent.mkdir(parents=True, exist_ok=True)
shutil.copyfile(pair.source, true_destination)
def main() -> int:
project_root = Path(__file__).parent.parent
destination = project_root.joinpath("exts/cesium.omniverse/doc")
changes_path = project_root.joinpath("CHANGES.md")
try:
# Turning off formatting here for readability.
# fmt: off
paths_to_copy: List[PathPair] = [
PathPair(changes_path),
*find_resources(changes_path)
]
# fmt: on
for pair in paths_to_copy:
copy_to_destination(pair, destination)
except Exception as e:
print(e)
return 1
return 0
exit(main())
| 2,987 | Python | 29.489796 | 116 | 0.665216 |
CesiumGS/cesium-omniverse/scripts/vscode_build.py | #!/usr/bin/env python3
import sys
import subprocess
import multiprocessing
import os
import platform
import shutil
try:
import pty
except Exception:
pass
import webbrowser
from typing import List, NamedTuple
def is_windows():
return platform.system() == "Windows"
def is_linux():
return platform.system() == "Linux"
def process(cmd: List[str]):
print("Run: " + " ".join(cmd))
if is_linux():
# Using pty instead of subprocess to get terminal colors
result = pty.spawn(cmd)
if result != 0:
sys.exit(result)
else:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True)
for line in p.stdout:
print(line, end="")
p.communicate()
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, p.args)
def open_browser(html: str):
html = os.path.realpath(html)
html = "file://{}".format(html)
webbrowser.open(html, new=2)
class Args(NamedTuple):
task: str
build_folder: str
build_type: str
compiler_name: str
tracing: bool
verbose: bool
kit_debug: bool
parallel: bool
build_only: bool
def c_compiler_to_cpp_compiler(compiler_name: str):
cpp = compiler_name
cpp = cpp.replace("gcc", "g++")
cpp = cpp.replace("clang", "clang++")
return cpp
def get_cmake_configure_command(args: Args):
cmd = ["cmake", "-B", args.build_folder]
# Release is the default build type, so no need to pass CMAKE_BUILD_TYPE
if args.build_type != "Release":
cmd.extend(("-D", "CMAKE_BUILD_TYPE={}".format(args.build_type)))
if args.tracing:
cmd.extend(("-D", "CESIUM_OMNI_ENABLE_TRACING=ON"))
if args.kit_debug:
cmd.extend(("-D", "CESIUM_OMNI_USE_NVIDIA_DEBUG_LIBRARIES=ON"))
if is_windows():
cmd.extend(("-G", "Ninja Multi-Config", "-D", "CMAKE_C_COMPILER=cl", "-D", "CMAKE_CXX_COMPILER=cl"))
return cmd
if args.compiler_name == "default":
return cmd
c_compiler = args.compiler_name
cpp_compiler = c_compiler_to_cpp_compiler(args.compiler_name)
cmd.extend(("-D", "CMAKE_C_COMPILER={}".format(c_compiler)))
cmd.extend(("-D", "CMAKE_CXX_COMPILER={}".format(cpp_compiler)))
return cmd
def get_cmake_build_command(args: Args, target: str):
cmd = ["cmake", "--build", args.build_folder]
if is_windows():
cmd.extend(("--config", args.build_type))
if target:
cmd.extend(("--target", target))
if args.verbose:
cmd.append("--verbose")
if args.parallel:
# use every core except one so that computer doesn't go too slow
cores = max(1, multiprocessing.cpu_count() - 1)
cmd.extend(("--parallel", str(cores)))
return cmd
def get_cmake_install_command(args: Args):
cmd = ["cmake", "--install", args.build_folder]
if is_windows():
cmd.extend(("--config", args.build_type))
return cmd
def configure(args: Args):
configure_cmd = get_cmake_configure_command(args)
process(configure_cmd)
def build(args: Args):
build_cmd = get_cmake_build_command(args, None)
install_kit_cmd = get_cmake_install_command(args)
if not args.build_only:
configure_cmd = get_cmake_configure_command(args)
process(configure_cmd)
process(build_cmd)
process(install_kit_cmd)
def coverage(args: Args):
if is_windows():
print("Coverage is not supported for Windows")
return
configure_cmd = get_cmake_configure_command(args)
build_cmd = get_cmake_build_command(args, "generate-coverage")
html = "{}/coverage/index.html".format(args.build_folder)
process(configure_cmd)
process(build_cmd)
open_browser(html)
def documentation(args: Args):
configure_cmd = get_cmake_configure_command(args)
documentation_cmd = get_cmake_build_command(args, "generate-documentation")
html = "{}/docs/html/index.html".format(args.build_folder)
process(configure_cmd)
process(documentation_cmd)
open_browser(html)
def install(args: Args):
configure_cmd = get_cmake_configure_command(args)
install_cmd = get_cmake_build_command(args, "install")
process(configure_cmd)
process(install_cmd)
def clean(args: Args):
if os.path.exists(args.build_folder) and os.path.isdir(args.build_folder):
shutil.rmtree(args.build_folder)
def format(args: Args):
format_cmd = get_cmake_build_command(args, "clang-format-fix-all")
process(format_cmd)
def lint(args: Args):
clang_tidy_cmd = get_cmake_build_command(args, "clang-tidy")
process(clang_tidy_cmd)
def lint_fix(args: Args):
clang_tidy_cmd = get_cmake_build_command(args, "clang-tidy-fix")
process(clang_tidy_cmd)
def dependency_graph(args: Args):
configure_cmd = get_cmake_configure_command(args)
conan_packages_path = os.path.join(args.build_folder, "Conan_Packages")
dependency_html = os.path.join(args.build_folder, "dependency_graph.html")
dependency_cmd = ["conan", "info", args.build_folder, "-if", conan_packages_path, "--graph", dependency_html]
process(configure_cmd)
process(dependency_cmd)
open_browser(dependency_html)
def get_build_folder_name(build_type: str, compiler_name: str):
folder_name = "build"
if is_windows():
return folder_name
if build_type != "Release":
folder_name += "-{}".format(build_type.lower())
if compiler_name != "default":
folder_name += "-{}".format(compiler_name)
return folder_name
def get_bin_folder_name(build_type: str, compiler_name: str):
build_folder_name = get_build_folder_name(build_type, compiler_name)
if is_windows():
bin_folder_name = "{}/bin/{}".format(build_folder_name, build_type)
else:
bin_folder_name = "{}/bin".format(build_folder_name)
return bin_folder_name
def main(av: List[str]):
print(av)
task = av[0]
build_type = av[1] if len(av) >= 2 else "Release"
compiler_name = av[2] if len(av) >= 3 else "default"
build_folder = get_build_folder_name(build_type, compiler_name)
tracing = True if len(av) >= 4 and av[3] == "--tracing" else False
verbose = True if len(av) >= 4 and av[3] == "--verbose" else False
kit_debug = True if len(av) >= 4 and av[3] == "--kit-debug" else False
parallel = False if len(av) >= 5 and av[4] == "--no-parallel" else True
build_only = True if len(av) >= 4 and av[3] == "--build-only" else False
args = Args(task, build_folder, build_type, compiler_name, tracing, verbose, kit_debug, parallel, build_only)
if task == "configure":
configure(args)
elif task == "build":
build(args)
elif task == "clean":
clean(args)
elif task == "coverage":
coverage(args)
elif task == "documentation":
documentation(args)
elif task == "install":
install(args)
elif task == "format":
format(args)
elif task == "lint":
lint(args)
elif task == "lint-fix":
lint_fix(args)
elif task == "dependency-graph":
dependency_graph(args)
if __name__ == "__main__":
try:
main(sys.argv[1:])
except Exception as e:
print(e)
exit(1)
| 7,274 | Python | 25.845018 | 113 | 0.63129 |
CesiumGS/cesium-omniverse/scripts/clang_tidy.py | #!/usr/bin/env python3
import argparse
import sys
import shutil
from utils import utils
from typing import List
def parse_args(av: List[str]):
parser = argparse.ArgumentParser(description="Run / check clang-tidy on staged cpp files.")
parser.add_argument(
"--clang-tidy-executable", help="Specific clang-tidy binary to use.", action="store", required=False
)
return parser.parse_known_args(av)
def main(av: List[str]):
known_args, clang_tidy_args = parse_args(av)
project_root = utils.get_project_root()
clang_tidy_executable = known_args.clang_tidy_executable
if not clang_tidy_executable:
clang_tidy_executable = shutil.which("clang-tidy")
project_root = utils.get_project_root()
candidate_files = [
f.as_posix() for f in utils.get_staged_git_files(project_root) if f.suffix in utils.CPP_EXTENSIONS
]
cmd = [clang_tidy_executable] + clang_tidy_args + candidate_files
if len(candidate_files) > 0:
print("Running clang-tidy")
utils.run_command_and_echo_on_error(cmd)
else:
print("Skipping clang-tidy (no cpp files staged)")
if __name__ == "__main__":
main(sys.argv[1:])
| 1,188 | Python | 27.999999 | 108 | 0.672559 |
CesiumGS/cesium-omniverse/scripts/update_certs.py | #! /usr/bin/python3
"""
Intended to be called in the
This script updates the certificates used for any requests that use the core
Context class. While some certs are available on the system, they may not be
consistent or updated. This ensures all certs are uniform and up to date
see: https://github.com/CesiumGS/cesium-omniverse/issues/306
"""
import requests
import sys
import os
def main():
# --- establish source/destination for certs ---
if len(sys.argv) < 2:
print("must provide a filepath for the updated certs")
return -1
CERT_URL = "https://curl.se/ca/cacert.pem"
CERT_FILE_PATH = sys.argv[1]
# --- ensure directory structure exists ----
os.makedirs(os.path.dirname(CERT_FILE_PATH), exist_ok=True)
# --- fetch and write the cert file ----
req = requests.get(CERT_URL)
if req.status_code != 200:
print(f"failed to fetch certificates from {CERT_URL}")
return -1
# explicit encoding is required for windows
with open(CERT_FILE_PATH, "w", encoding="utf-8") as f:
f.write(req.text)
return 0
if __name__ == "__main__":
sys.exit(main())
| 1,144 | Python | 24.444444 | 76 | 0.659091 |
CesiumGS/cesium-omniverse/scripts/generate_third_party_license_json.py | #!/usr/bin/env python3
import json
import os
import shlex
import subprocess
import argparse
from typing import List
import sys
from pathlib import Path
def main(argv: List[str]):
args = parse_args(argv)
project_dir = args.project_dir
build_dir = args.build_dir
libraries_to_skip = args.skip.split(',')
cmd = "conan info {} -if {} -j".format(build_dir,
os.path.join(build_dir, 'Conan_Packages'))
cmd = shlex.split(cmd, posix=(os.name == 'posix'))
try:
output = subprocess.check_output(cmd).decode('utf-8')
json_output = output.split(os.linesep, 2)[1]
third_party_json = json.loads(json_output)
except subprocess.CalledProcessError as error:
cmd_string = ' '.join(error.cmd)
raise RuntimeError('Conan command \'{}\' failed with error {}. Third party JSON creation aborted.'
.format(cmd_string, error.returncode))
third_party_json = generate_conan_third_party_json(
third_party_json, libraries_to_skip)
third_party_extra_json = json.loads(Path(project_dir).joinpath(
'ThirdParty.extra.json').read_text())
# Handle ThirdParty.extra.json
for element in third_party_extra_json:
if 'override' in element:
found_match = False
for match in third_party_json:
if match['name'] == element['name']:
found_match = True
break
if found_match:
del element['override']
third_party_json.remove(match)
combined = {**match, **element}
third_party_json.append(combined)
else:
raise RuntimeError('Could not find library to override: \'{}\'. Third party JSON creation aborted.'
.format(element.name))
else:
third_party_json.append(element)
third_party_json.sort(key=lambda obj: obj['name'].lower())
third_party_json_path = os.path.join(project_dir, 'ThirdParty.json')
with open(third_party_json_path, 'w', newline='\n') as json_file:
json.dump(third_party_json, json_file, indent=4)
json_file.write('\n')
def parse_args(argv: List[str]):
parser = argparse.ArgumentParser(
description='Create third party license json from Conan info and ThirdParty.extra.json.'
)
parser.add_argument('--project-dir',
help='The project directory.',
action='store',
required='true'
)
parser.add_argument('--build-dir',
help='The CMake build directory. From CMake variable PROJECT_BINARY_DIR.',
action='store',
required='true'
)
parser.add_argument('--skip',
help='Comma separated list of libraries to skip.',
action='store',
)
return parser.parse_args(argv)
def generate_conan_third_party_json(third_party_json, libraries_to_skip):
result = []
for library in third_party_json:
# skip the `conanfile` object, as its NOT a real third party library
if library['reference'] == 'conanfile.txt':
continue
display_name = library['display_name']
url = library['homepage']
license = library['license']
licenses = []
for lc in license:
licenses.extend(lc.split(', '))
display_name_pieces = display_name.split('/')
name = display_name_pieces[0]
version = display_name_pieces[1]
# skip libraries that aren't included in the executable
if name in libraries_to_skip:
continue
result.append({
'name': name,
'license': licenses,
'version': version,
'url': url
})
return result
if __name__ == '__main__':
main(sys.argv[1:])
| 4,043 | Python | 32.421487 | 115 | 0.558001 |
CesiumGS/cesium-omniverse/scripts/clang_format.py | #!/usr/bin/env python3
import argparse
import sys
import subprocess
import shutil
import shlex
from utils import utils
from pathlib import Path
from typing import List
def clang_format_on_path(clang_format_binary: str, absolute_path: Path) -> str:
cmd = "{} -style=file {}".format(shlex.quote(clang_format_binary), shlex.quote(str(absolute_path)))
cmd = shlex.split(cmd)
result = subprocess.check_output(cmd)
return result.decode("utf-8", "replace")
def clang_format_in_place(clang_format_binary: str, absolute_path: Path):
cmd = "{} -style=file -i {}".format(shlex.quote(clang_format_binary), shlex.quote(str(absolute_path)))
cmd = shlex.split(cmd)
subprocess.check_output(cmd)
def parse_args(av: List[str]):
parser = argparse.ArgumentParser(description="Run / check clang-formatting.")
parser.add_argument(
"--clang-format-executable", help="Specific clang-format binary to use.", action="store", required=False
)
parser.add_argument(
"--source-directories",
help='Directories (relative to project root) to recursively scan for cpp files (e.g "src", "include"...)',
nargs="+",
required=True,
)
run_type = parser.add_mutually_exclusive_group(required=True)
run_type.add_argument(
"--fix", help="Apply clang-format formatting to source in-place (destructive)", action="store_true"
)
run_type.add_argument("--check", help="Check if source matches clang-format rules", action="store_true")
scope_type = parser.add_mutually_exclusive_group(required=True)
scope_type.add_argument("--all", help="Process all valid source files.", action="store_true")
scope_type.add_argument("--staged", help="Process only staged source files.", action="store_true")
return parser.parse_args(av)
def main(av: List[str]):
if not shutil.which("git"):
raise RuntimeError("Could not find git in path")
project_root_directory = utils.get_project_root()
args = parse_args(av)
# Use user provided clang_format binary if provided
clang_format_binary = args.clang_format_executable
if clang_format_binary:
clang_format_binary = shutil.which(clang_format_binary)
if not clang_format_binary:
clang_format_binary = shutil.which("clang-format")
if not clang_format_binary:
raise RuntimeError("Could not find clang-format in system path")
mode = "all" if args.all else "staged"
source_directories = args.source_directories
# Generate list of source_files to check / fix.
source_files: List[utils.SourceFile] = utils.get_source_files(source_directories, args.all)
failed_files: List[utils.FailedFile] = []
# Fix or check formatting for each file
for src in source_files:
absolute_path = project_root_directory.joinpath(src.relative_path)
if args.check:
old_text = (
absolute_path.read_text(encoding="utf-8")
if not src.staged
else utils.get_staged_file_text(src.relative_path)
)
new_text = clang_format_on_path(clang_format_binary, absolute_path)
diff = utils.unidiff_output(old_text, new_text)
if diff != "":
failed_files.append(utils.FailedFile(src.relative_path, diff))
else:
clang_format_in_place(clang_format_binary, absolute_path)
if len(source_files) == 0:
print("clang-format ({} files): No files found, nothing to do.".format(mode))
sys.exit(0)
if args.fix:
print("Ran clang-format -style=file -i on {} files".format(mode))
sys.exit(0)
if len(failed_files) == 0:
print("clang-format ({} files) passes.".format(mode))
sys.exit(0)
print("clang-format ({} files) failed on the following files: ".format(mode))
for failure in failed_files:
print("{}".format(failure.relative_path))
print(failure.diff)
sys.exit(len(failed_files))
if __name__ == "__main__":
main(sys.argv[1:])
| 4,036 | Python | 36.379629 | 114 | 0.655104 |
CesiumGS/cesium-omniverse/scripts/copy_from_dir.py | import sys
from pathlib import Path
from shutil import copy2
# Broken out for formatting reasons, since tabs within HEREDOCs will be output.
usage_message = """Invalid arguments.
Usage: copy_from_dir.py <glob-pattern> <source-dir-path> <destination-dir-path>
Please fix your command and try again.
"""
def main():
if len(sys.argv) < 4:
print(usage_message)
return 1
glob_pattern: str = sys.argv[1]
source_dir = Path(sys.argv[2]).resolve()
dest_dir = Path(sys.argv[3]).resolve()
print(f'Performing file copy with glob pattern "{glob_pattern}"')
print(f"\tSource: {source_dir}")
print(f"\tDestination: {dest_dir}\n")
source_files = source_dir.glob(glob_pattern)
for f in source_files:
source_path = source_dir / f
copy2(source_path, dest_dir, follow_symlinks=True)
print(f"Copied {source_path}")
return 0
if __name__ == "__main__":
sys.exit(main())
| 948 | Python | 22.724999 | 83 | 0.647679 |
CesiumGS/cesium-omniverse/scripts/utils/utils.py | #!/usr/bin/env python3
import subprocess
import shlex
import os
import glob
import sys
from pathlib import Path
from typing import List, NamedTuple, Set
import difflib
CPP_EXTENSIONS = [".cpp", ".h", ".cxx", ".hxx", ".hpp", ".cc", ".inl"]
def get_project_root() -> Path:
try:
cmd = shlex.split('git rev-parse --show-toplevel')
output = subprocess.check_output(
cmd).strip().decode('utf-8', 'replace')
return Path(output)
except subprocess.CalledProcessError:
raise RuntimeError('command must be ran inside .git repo')
def get_staged_git_files(project_root: Path) -> List[Path]:
cmd = shlex.split("git diff --cached --name-only --diff-filter=ACMRT")
paths = subprocess.check_output(cmd).decode('utf-8').splitlines()
return [project_root.joinpath(p) for p in paths]
def get_cmake_build_directory(project_root: Path):
glob_pattern = project_root.joinpath("**/CMakeCache.txt").as_posix()
results = glob.glob(glob_pattern, recursive=True)
if len(results) == 0:
err = "Could not find CMakeCache.txt in {}. Generate CMake configuration first.".format(
project_root)
raise RuntimeError(err)
cmake_build_directory = os.path.realpath(
os.path.join(project_root, results[0], ".."))
return cmake_build_directory
def run_cmake_target(cmake_build_directory, target):
path = shlex.quote(cmake_build_directory)
cmd = shlex.split("cmake --build {} --target {}".format(path, target))
run_command_and_echo_on_error(cmd)
def run_command_and_echo_on_error(cmd: List[str]):
try:
subprocess.run(cmd, check=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print("Command \"{}\" failed:".format(' '.join(cmd)))
print(e.output.decode('utf-8'))
sys.exit(1)
class SourceFile(NamedTuple):
relative_path: Path
staged: bool
class FailedFile(NamedTuple):
relative_path: Path
diff: str
def get_source_files(source_directories: List[str], modeIsAll: bool) -> List[SourceFile]:
project_root = get_project_root()
staged_rel_paths = get_staged_rel_paths()
source_files = []
for directory in source_directories:
for extension in CPP_EXTENSIONS:
glob_pattern = os.path.join(
project_root, directory, "**/*" + extension)
glob_results = glob.glob(glob_pattern, recursive=True)
for abs_path in glob_results:
rel_path = Path(abs_path).relative_to(project_root)
source_files.append(SourceFile(
rel_path, rel_path in staged_rel_paths))
return list(filter(lambda source_file: source_file.staged or modeIsAll, source_files))
def get_staged_rel_paths() -> Set[str]:
cmd = shlex.split("git diff --cached --name-only --diff-filter=ACMRT")
staged_rel_paths = subprocess.check_output(cmd)
staged_rel_paths = staged_rel_paths.decode('utf-8', 'replace')
return set([Path(path) for path in staged_rel_paths.splitlines()])
def get_staged_file_text(relative_path: Path) -> str:
cmd = "git show :{}".format(shlex.quote(str(relative_path.as_posix())))
cmd = shlex.split(cmd)
output = subprocess.check_output(cmd).decode('utf-8', 'replace')
return output
COLOR_SUPPORT = False
try:
import colorama
colorama.init()
COLOR_SUPPORT = True
def color_diff(diff):
for line in diff:
if line.startswith('+'):
yield colorama.Fore.GREEN + line + colorama.Fore.RESET
elif line.startswith('-'):
yield colorama.Fore.RED + line + colorama.Fore.RESET
elif line.startswith('^'):
yield colorama.Fore.BLUE + line + colorama.Fore.RESET
else:
yield line
except ImportError:
pass
def unidiff_output(expected: str, actual: str):
expected = expected.splitlines(1)
actual = actual.splitlines(1)
diff = difflib.unified_diff(expected, actual)
if COLOR_SUPPORT:
diff = color_diff(diff)
return ''.join(diff)
| 4,139 | Python | 31.857143 | 96 | 0.639526 |
CesiumGS/cesium-omniverse/extern/nvidia/scripts/install.py | import os
import packmanapi
import sys
REPO_ROOT = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..")
KIT_SDK_FILE = os.path.join(REPO_ROOT, "deps/kit-sdk.packman.xml")
TARGET_DEPS_FILE = os.path.join(REPO_ROOT, "deps/target-deps.packman.xml")
if __name__ == "__main__":
platform = sys.argv[2]
packmanapi.pull(KIT_SDK_FILE, platform=platform)
packmanapi.pull(TARGET_DEPS_FILE, platform=platform)
| 424 | Python | 29.357141 | 75 | 0.698113 |
XiaomingY/omni-ies-viewer/exts/IESViewer/IESViewer/extension.py | import omni.ext
import omni.ui as ui
from omni.kit.viewport.utility import get_active_viewport_window
from .viewport_scene import ViewportSceneInfo
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class AimingToolExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def __init__(self) -> None:
super().__init__()
self.viewport_scene = None
def on_startup(self, ext_id):
viewport_window = get_active_viewport_window()
self.viewport_scene = ViewportSceneInfo(viewport_window, ext_id)
def on_shutdown(self):
if self.viewport_scene:
self.viewport_scene.destroy()
self.viewport_scene = None
| 1,023 | Python | 38.384614 | 119 | 0.7087 |
XiaomingY/omni-ies-viewer/exts/IESViewer/IESViewer/viewport_scene.py | from omni.ui import scene as sc
import omni.ui as ui
from .object_info_manipulator import ObjInfoManipulator
from .object_info_model import ObjInfoModel
class ViewportSceneInfo():
"""The Object Info Manipulator, placed into a Viewport"""
def __init__(self, viewport_window, ext_id) -> None:
self.scene_view = None
self.viewport_window = viewport_window
# NEW: Create a unique frame for our SceneView
with self.viewport_window.get_frame(ext_id):
# Create a default SceneView (it has a default camera-model)
self.scene_view = sc.SceneView()
# Add the manipulator into the SceneView's scene
with self.scene_view.scene:
ObjInfoManipulator(model=ObjInfoModel())
# Register the SceneView with the Viewport to get projection and view updates
self.viewport_window.viewport_api.add_scene_view(self.scene_view)
def __del__(self):
self.destroy()
def destroy(self):
if self.scene_view:
# Empty the SceneView of any elements it may have
self.scene_view.scene.clear()
# un-register the SceneView from Viewport updates
if self.viewport_window:
self.viewport_window.viewport_api.remove_scene_view(self.scene_view)
# Remove our references to these objects
self.viewport_window = None
self.scene_view = None | 1,422 | Python | 39.657142 | 89 | 0.656118 |
XiaomingY/omni-ies-viewer/exts/IESViewer/IESViewer/object_info_model.py | from pxr import Tf
from pxr import Gf
from pxr import Usd
from pxr import UsdGeom
from pxr import UsdShade
from pxr import UsdLux
from .IESReader import IESLight
import os.path
import numpy as np
from omni.ui import scene as sc
import omni.usd
def _flatten_matrix(matrix: Gf.Matrix4d):
m0, m1, m2, m3 = matrix[0], matrix[1], matrix[2], matrix[3]
return [
m0[0],
m0[1],
m0[2],
m0[3],
m1[0],
m1[1],
m1[2],
m1[3],
m2[0],
m2[1],
m2[2],
m2[3],
m3[0],
m3[1],
m3[2],
m3[3],
]
class ObjInfoModel(sc.AbstractManipulatorModel):
"""
The model tracks the position and info of the selected object.
"""
class MatrixItem(sc.AbstractManipulatorItem):
"""
The Model Item represents the tranformation. It doesn't contain anything
because we take the tranformation directly from USD when requesting.
"""
identity = [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]
def __init__(self):
super().__init__()
self.value = self.identity.copy()
class PositionItem(sc.AbstractManipulatorItem):
"""
The Model Item represents the position. It doesn't contain anything
because we take the position directly from USD when requesting.
"""
def __init__(self) -> None:
super().__init__()
self.value = [0, 0, 0]
class PositionList(sc.AbstractManipulatorItem):
"""
The Model Item represents the position. It doesn't contain anything
because we take the position directly from USD when requesting.
"""
def __init__(self) -> None:
super().__init__()
self.value = [[0,0,0]]
def __init__(self) -> None:
super().__init__()
# Current selected prim list
self.prim = []
self.current_path = []
self.material_name = []
self.stage_listener = None
self.horizontal_step = 15
self.vertical_step = 15
self.IESPoints = [ObjInfoModel.PositionList()]
self.transformation = [ObjInfoModel.MatrixItem()]
# Save the UsdContext name (we currently only work with a single Context)
self.usd_context = self._get_context()
# Track selection changes
self.events = self.usd_context.get_stage_event_stream()
self.stage_event_delegate = self.events.create_subscription_to_pop(
self.on_stage_event, name="Object Info Selection Update"
)
@property
def _time(self):
return Usd.TimeCode.Default()
def _get_context(self) -> Usd.Stage:
# Get the UsdContext we are attached to
return omni.usd.get_context()
#Update when light are transformed or modified
def notice_changed(self, notice: Usd.Notice, stage: Usd.Stage) -> None:
"""Called by Tf.Notice. Used when the current selected object changes in some way."""
light_path = self.current_path
if not light_path:
return
for p in notice.GetChangedInfoOnlyPaths():
prim_path = p.GetPrimPath().pathString
#check if prim_path not in selected list but parent of prim_path is in selected list
if prim_path not in light_path:
if (True in (light_path_item.startswith(prim_path) for light_path_item in light_path)):
if UsdGeom.Xformable.IsTransformationAffectedByAttrNamed(p.name):
self._item_changed(self.transformation[0])
continue
if UsdGeom.Xformable.IsTransformationAffectedByAttrNamed(p.name):
self._item_changed(self.transformation[0])
#if light property changed such as ies file changed, update profile
self._item_changed(self.transformation[0])
def _get_transform(self, time: Usd.TimeCode):
"""Returns world transform of currently selected object"""
if not self.prim:
return [ObjInfoModel.MatrixItem.identity.copy()]
# Compute matrix from world-transform in USD
#get transform matrix for each selected light
world_xform_list = [UsdGeom.BasisCurves(prim).ComputeLocalToWorldTransform(time) for prim in self.prim]
# Flatten Gf.Matrix4d to list
return [_flatten_matrix(world_xform) for world_xform in world_xform_list]
def get_item(self, identifier):
if identifier == "IESPoints":
return self.IESPoints
if identifier == "transformation":
return self.transformation
def get_as_floats(self, item):
if item == self.transformation:
return self._get_transform(self._time)
if item == self.IESPoints:
return self.get_points(self._time)
return []
#get ies points for each selected light
def get_points(self, time: Usd.TimeCode):
if not self.prim:
return [[0,0,0]]
allIESPoint = []
for prim in self.prim:
iesFile = prim.GetAttribute('shaping:ies:file').Get()
allIESPoint.append(IESLight(str(iesFile).replace('@', '')).points)
return allIESPoint
def on_stage_event(self, event):
"""Called by stage_event_stream. We only care about selection changes."""
if event.type == int(omni.usd.StageEventType.SELECTION_CHANGED):
self.current_path = []
self.prim = []
primList = []
primPathList = []
usd_context = self._get_context()
stage = usd_context.get_stage()
if not stage:
return
prim_paths = usd_context.get_selection().get_selected_prim_paths()
if not prim_paths:
# This turns off the manipulator when everything is deselected
self._item_changed(self.transformation[0])
return
#select light with ies file applied.
lightCount = 0
for i in prim_paths:
prim = stage.GetPrimAtPath(i)
if(UsdLux.Light(prim) and prim.GetAttribute('shaping:ies:file').Get() and not (prim.IsA(UsdLux.DistantLight))):
primList.append(prim)
primPathList.append(i)
lightCount = lightCount +1
if(lightCount==0):
if self.stage_listener:
self.stage_listener.Revoke()
self.stage_listener = None
self._item_changed(self.transformation[0])
return
if not self.stage_listener:
# This handles camera movement
self.stage_listener = Tf.Notice.Register(Usd.Notice.ObjectsChanged, self.notice_changed, stage)
self.prim = primList
self.current_path = primPathList
# Position is changed because new selected object has a different position
self._item_changed(self.transformation[0])
def destroy(self):
self.events = None
self.stage_event_delegate.unsubscribe() | 7,171 | Python | 33.985366 | 127 | 0.585553 |
XiaomingY/omni-ies-viewer/exts/IESViewer/IESViewer/object_info_manipulator.py | from __future__ import division
from omni.ui import scene as sc
from omni.ui import color as cl
import omni.ui as ui
import numpy as np
class ObjInfoManipulator(sc.Manipulator):
"""Manipulator that displays the object path and material assignment
with a leader line to the top of the object's bounding box.
"""
def on_build(self):
"""Called when the model is changed and rebuilds the whole manipulator"""
if not self.model:
return
IESPoints = self.model.get_as_floats(self.model.IESPoints)
numHorizontal = int((360/self.model.horizontal_step)+1)
primCount = 0
for transformation in self.model.get_as_floats(self.model.transformation):
self.__root_xf = sc.Transform(transformation)
with self.__root_xf:
self._x_xform = sc.Transform()
with self._x_xform:
self._shape_xform = sc.Transform()
IESPoint = IESPoints[primCount]
numVertical = int(len(IESPoint)/numHorizontal)
for index in range(0,numHorizontal):
points = IESPoint[index*numVertical:(index+1)*numVertical]
if(len(points)>0):
sc.Curve(points.tolist(), thicknesses=[1.0], colors=[cl.yellow],tessellation=9)
primCount = primCount+1
def on_model_updated(self, item):
# Regenerate the manipulator
self.invalidate() | 1,526 | Python | 37.174999 | 107 | 0.589122 |
XiaomingY/omni-ies-viewer/exts/IESViewer/IESViewer/IESReader.py | import numpy as np
import re
import math
#import matplotlib.pyplot as plt
from scipy import interpolate
import os.path
#from mpl_toolkits.mplot3d.axes3d import Axes3D
import omni.ext
import omni.ui as ui
omni.kit.pipapi.install("astropy")
from astropy.coordinates import spherical_to_cartesian
DEFAULT_HORIZONTAL_STEP = 15
DEFAULT_VERTICAL_STEP = 15
IES_MaxLength = 80
class IESLight():
def __init__(self,iesFile):
# Current selected prim
if iesFile and os.path.exists(iesFile):
self.file = iesFile
else:
return
self.width = 0
self.length = 0
self.radius = 0
all_values = self.readIESfile(self.file)
verticalAngles,horizontalAngles,intensities,self.width,self.length,self.radius = self.getIESproperties(all_values)
horizontalAnglesMirrored, intensityMirrored = self.mirrorAngles(horizontalAngles,intensities)
horizontalResampled = np.arange(0, 361, DEFAULT_HORIZONTAL_STEP)
verticalResampled = np.arange(0, verticalAngles[-1]+1, DEFAULT_VERTICAL_STEP)
resampledIntensity = self.interpolateIESValues(np.array(horizontalAnglesMirrored),np.array(verticalAngles),horizontalResampled,verticalResampled,intensityMirrored)
self.points = self.IESCoord2XYZ(horizontalResampled,verticalResampled,resampledIntensity,IES_MaxLength)
#read ies files and return vertical angles, horizontal angles, intensities, width, length, radius.
#based on the symmetry, horizontal angles and resampled
def readIESfile(self, fileName):
f=open(fileName, encoding = "ISO-8859-1")#need rb to read \r\n correctly. Otherwise universial newline function ignores carriage return.
startReading = 0
line = f.readline()
allValues = ""
while line:
if( not(line.strip())):
break
else:
#after this line, there are actual useful values
if("TILT=NONE" in line.strip()):
line = f.readline()
startReading = 1
#read all number to one string
if(startReading):
allValues = allValues+line
line = f.readline()
f.close()
#one array with all values
dimentions = re.split('\s+',allValues.strip())
return dimentions
def getIESproperties(self, allValues):
#return
FEET2METER = 0.3048
verticalAngles = []
horizontalAngles = []
width = 0
length = 0
radius = 0
intensityMultiplier = 1
numberVerticalAngle = 0
numberHorizontalAngle = 0
unit = 1 #1 for feet, 2 for meter
#number of vertical angles and horizontal angles measured
numberVerticalAngle = int(allValues[3])
numberHorizontalAngle = int(allValues[4])
#check if shape is rectangle or disk
if(float(allValues[7])<0):
radius = allValues[7]*-1
else:
width = allValues[7]
length = allValues[8]
#convert dimentions to meter if measured in feet
if(float(allValues[6])==1):
radius = radius*FEET2METER
width = width *FEET2METER
length = length * FEET2METER
#the actual vertical angles and horizontal angles in list
verticalAngles = list(map(float, allValues[13:13+numberVerticalAngle]))
horizontalAngles = list(map(float,allValues[13+numberVerticalAngle:13+numberVerticalAngle+numberHorizontalAngle]))
#read intensities and convert it to 2d array
intensities = np.array(allValues[13+numberVerticalAngle+numberHorizontalAngle:len(allValues)])
intensities = intensities.reshape(numberHorizontalAngle,numberVerticalAngle).astype(np.float16)
return verticalAngles,horizontalAngles,intensities,width,length,radius
#ies could have several symmetry:
#(1)only measured in one horizontal angle (0) which need to be repeated to all horizontal angle from 0 to 360
#(2)only measured in horizontal angles (0~90) which need to be mirrored twice to horizontal angle from 0 to 360
#(3)only measured in horizontal angles (0~180) which need to be mirrored to horizontal angle from 0 to 360
#(4)only measured in horizontal angles (0~360) which could be used directly
def mirrorAngles(self, horizontalAngles,intensities):
#make use of symmetry in the file and produce horizontal angles from 0~360
if(horizontalAngles[-1]==0):
horizontalAnglesMirrored = list(np.arange(0,361,DEFAULT_HORIZONTAL_STEP))
else:
horizontalAnglesMirrored = list(np.arange(0,361,horizontalAngles[-1]/(len(horizontalAngles)-1)))
#make use of symmetry in the file and copy intensitys for horizontal angles from 0~360
if(horizontalAngles[-1]==90):
#mirror results [90:180]
a = np.concatenate((intensities, np.flip(intensities, 0)[1:]), axis=0)
intensityMirrored = np.concatenate((a, np.flip(a, 0)[1:]), axis=0)
elif(horizontalAngles[-1]==180):
intensityMirrored = np.concatenate((intensities, np.flip(intensities, 0)[1:]), axis=0)
elif(horizontalAngles[-1]==0):
intensityMirrored = np.array(([intensities[0],]*len(np.arange(0,361,DEFAULT_HORIZONTAL_STEP))))
else:
#print("Symmetry 360")
intensityMirrored = intensities
return horizontalAnglesMirrored, intensityMirrored
def IESCoord2XYZ(self, horizontalAngles,verticalAngles,intensity,maxLength):
maxValue = np.amax(intensity)
if(maxValue>maxLength):
intensity = intensity*(maxLength/maxValue)
for index, horizontalAngle in enumerate(horizontalAngles):
if(index ==0):
#Omniverse and 3ds Max makes the light upside down, horizontal angle rotation direction need to be flipped.
points = np.array(spherical_to_cartesian(intensity[index].tolist(), [math.radians(90-x) for x in verticalAngles], [math.radians(-1*horizontalAngle)]*len(verticalAngles))).transpose()
else:
newPoints = np.array(spherical_to_cartesian(intensity[index], [math.radians(90-x) for x in verticalAngles], [math.radians(-1*horizontalAngle)]*len(verticalAngles))).transpose()
points = np.concatenate((points, newPoints), axis=0)
#Omniverse and 3ds Max makes the light upside down, so flip z.
points[:,2] *= -1
return points
def interpolateIESValues(self, originalHorizontalAngles, originalVerticalAngles, newHorizontalAngles,newVerticalAngles, intensity):
fun = interpolate.interp2d(originalVerticalAngles, originalHorizontalAngles, intensity, kind='linear') # kind could be {'linear', 'cubic', 'quintic'}
interpolatedIntensity = fun(newVerticalAngles,newHorizontalAngles)
return interpolatedIntensity | 7,030 | Python | 47.489655 | 198 | 0.661024 |
Ekozmaster/NvidiaOmniverseRTXRemixTools/exts/ekozerski.rtxremixtools/ekozerski/rtxremixtools/add_model.py | import os
from pathlib import Path
from typing import List
import omni
from omni.client import make_relative_url
from omni.kit.window.file_importer import get_file_importer
from omni.kit.window.file_exporter import get_file_exporter
import omni.usd as usd
from pxr import UsdGeom, Usd, Sdf
from ekozerski.rtxremixtools.utils import find_inst_hash_prim, find_source_mesh_hash_prim
from ekozerski.rtxremixtools.commons import log_info
from ekozerski.rtxremixtools import mesh_utils
class UserCache:
LAST_OPENED_MODEL = None
def open_export_dialog_for_captured_mesh(prim_path, mesh):
def setup_references_in_stage(current_stage, reference_file_location):
_, mesh_hash, __ = Usd.Prim.GetName(mesh.GetParent()).split('_')
xform_prim_path = f'/RootNode/meshes/mesh_{mesh_hash}/Xform_{mesh_hash}_0'
omni.kit.commands.execute('CreatePrim', prim_type='Xform', prim_path=xform_prim_path)
editing_layer = current_stage.GetEditTarget().GetLayer()
relative_file_path = make_relative_url(editing_layer.realPath, reference_file_location)
omni.kit.commands.execute('AddReference',
stage=current_stage,
prim_path=Sdf.Path(xform_prim_path),
reference=Sdf.Reference(relative_file_path)
)
selection = omni.usd.get_context().get_selection()
selection.clear_selected_prim_paths()
source_layer = mesh.GetPrimStack()[-1].layer
source_layer.Reload()
selection.set_selected_prim_paths([xform_prim_path], False)
def file_export_handler(filename: str, dirname: str, extension: str = "", selections: List[str] = []):
stage = Usd.Stage.CreateInMemory()
root_xform = UsdGeom.Xform.Define(stage, '/root').GetPrim()
stage.SetDefaultPrim(root_xform)
new_mesh = UsdGeom.Mesh.Define(stage, f'/root/{prim_path.rsplit("/", 1)[-1]}')
needed_attr_names = ['doubleSided', 'extent', 'faceVertexCounts', 'faceVertexIndices', 'normals', 'points', 'primvars:st']
[
new_mesh.GetPrim().CreateAttribute(attr.GetName(), attr.GetTypeName()).Set(attr.Get())
for attr in mesh.GetAttributes()
if attr.Get() and attr.GetName() in needed_attr_names
]
mesh_utils.convert_mesh_to_vertex_interpolation_mode(new_mesh)
ctx = usd.get_context()
current_stage = ctx.get_stage()
upAxis = UsdGeom.GetStageUpAxis(current_stage)
UsdGeom.SetStageUpAxis(stage, upAxis)
save_location = dirname + filename + extension
stage.Export(save_location)
setup_references_in_stage(current_stage, save_location)
log_info(f"> Exporting {prim_path} in '{save_location}'")
source_layer = mesh.GetPrimStack()[-1].layer
rtx_remix_path_parts = source_layer.realPath.split(os.path.join("rtx-remix"), 1)
if len(rtx_remix_path_parts) > 1:
rtx_remix_path = os.path.join(rtx_remix_path_parts[0], "rtx-remix", "mods", "gameReadyAssets")
else:
rtx_remix_path = source_layer.realPath
rtx_remix_path = os.path.join(rtx_remix_path, "CustomMesh")
file_exporter = get_file_exporter()
file_exporter.show_window(
title=f'Export "{prim_path}"',
export_button_label="Save",
export_handler=file_export_handler,
filename_url=rtx_remix_path,
)
def copy_original_mesh(prim_path, mesh, output_path):
stage = Usd.Stage.CreateInMemory()
root_xform = UsdGeom.Xform.Define(stage, '/root').GetPrim()
stage.SetDefaultPrim(root_xform)
new_mesh = UsdGeom.Mesh.Define(stage, f'/root/{prim_path.rsplit("/", 1)[-1]}')
needed_attr_names = ['doubleSided', 'extent', 'faceVertexCounts', 'faceVertexIndices', 'normals', 'points', 'primvars:st']
[
new_mesh.GetPrim().CreateAttribute(attr.GetName(), attr.GetTypeName()).Set(attr.Get())
for attr in mesh.GetAttributes()
if attr.Get() and attr.GetName() in needed_attr_names
]
mesh_utils.convert_mesh_to_vertex_interpolation_mode(new_mesh)
ctx = usd.get_context()
current_stage = ctx.get_stage()
upAxis = UsdGeom.GetStageUpAxis(current_stage)
UsdGeom.SetStageUpAxis(stage, upAxis)
stage.Export(output_path)
def setup_references_in_stage(mesh, current_stage, reference_file_location):
inst_hash_prim = find_inst_hash_prim(mesh)
_, mesh_hash, __ = Usd.Prim.GetName(inst_hash_prim).split('_')
export_prim_name = os.path.basename(reference_file_location).split('.', 1)[0]
xform_prim_path = f'/RootNode/meshes/mesh_{mesh_hash}/{export_prim_name}'
omni.kit.commands.execute('CreatePrim', prim_type='Xform', prim_path=xform_prim_path)
editing_layer = current_stage.GetEditTarget().GetLayer()
relative_file_path = make_relative_url(editing_layer.realPath, reference_file_location)
omni.kit.commands.execute('AddReference',
stage=current_stage,
prim_path=Sdf.Path(xform_prim_path),
reference=Sdf.Reference(relative_file_path)
)
source_layer = mesh.GetPrimStack()[-1].layer
source_layer.Reload()
selection = omni.usd.get_context().get_selection()
selection.clear_selected_prim_paths()
selection.set_selected_prim_paths([xform_prim_path], False)
def open_export_dialog_for_captured_mesh(prim_path, mesh):
def export_mesh(filename: str, dirname: str, extension: str = "", selections: List[str] = []):
file_location = dirname + filename + extension
copy_original_mesh(prim_path, mesh, file_location)
ctx = usd.get_context()
current_stage = ctx.get_stage()
setup_references_in_stage(mesh, current_stage, file_location)
source_layer = mesh.GetPrimStack()[-1].layer
rtx_remix_path_parts = source_layer.realPath.split(os.path.join("rtx-remix"), 1)
rtx_remix_path = source_layer.realPath
if len(rtx_remix_path_parts) > 1:
rtx_remix_path = os.path.join(rtx_remix_path_parts[0], "rtx-remix", "mods", "gameReadyAssets")
rtx_remix_path = os.path.join(rtx_remix_path, "CustomMesh")
file_exporter = get_file_exporter()
file_exporter.show_window(
title=f'Export "{prim_path}"',
export_button_label="Save",
export_handler=export_mesh,
filename_url=rtx_remix_path,
)
def open_import_dialog_for_add_models(prim_path):
def import_mesh(filename: str, dirname: str, selections: List[str] = []):
# TODO: Loop through all selections and add them all to the mesh_HASH with their respective xforms correctly named without collisions.
mesh_path = mesh.GetPath().pathString
new_selection = list()
counter = 0
for reference_file in selections:
xform_name = Path(reference_file).stem
new_mesh_path = mesh_path + f'/{xform_name}_{counter}'
while current_stage.GetPrimAtPath(new_mesh_path).IsValid():
counter += 1
new_mesh_path = mesh_path + f'/{xform_name}_{counter}'
omni.kit.commands.execute('CreatePrim', prim_type='Xform', prim_path=new_mesh_path)
editing_layer = current_stage.GetEditTarget().GetLayer()
relative_file_path = make_relative_url(editing_layer.realPath, reference_file)
omni.kit.commands.execute('AddReference',
stage=current_stage,
prim_path=Sdf.Path(new_mesh_path),
reference=Sdf.Reference(relative_file_path)
)
new_selection.append(new_mesh_path)
UserCache.LAST_OPENED_MODEL = os.path.dirname(reference_file)
counter += 1
source_layer = mesh.GetPrimStack()[-1].layer
source_layer.Reload()
selection = omni.usd.get_context().get_selection()
selection.clear_selected_prim_paths()
selection.set_selected_prim_paths(new_selection, False)
ctx = usd.get_context()
current_stage = ctx.get_stage()
inst_prim = current_stage.GetPrimAtPath(prim_path)
mesh = find_source_mesh_hash_prim(current_stage, inst_prim)
source_layer = mesh.GetPrimStack()[-1].layer
filename_url = UserCache.LAST_OPENED_MODEL if UserCache.LAST_OPENED_MODEL is not None else source_layer.realPath
file_importer = get_file_importer()
file_importer.show_window(
title=f'Import Models',
import_button_label="Import",
import_handler=import_mesh,
filename_url=filename_url,
)
def open_add_model_dialog():
for path in usd.get_context().get_selection().get_selected_prim_paths():
open_import_dialog_for_add_models(path)
def open_mesh_replacement_setup_dialog():
for path, mesh in mesh_utils.get_selected_mesh_prims().items():
if mesh_utils.is_a_captured_mesh(mesh):
open_export_dialog_for_captured_mesh(path, mesh)
| 8,826 | Python | 41.23445 | 142 | 0.660775 |
Ekozmaster/NvidiaOmniverseRTXRemixTools/exts/ekozerski.rtxremixtools/ekozerski/rtxremixtools/commons.py | import carb
def log_info(msg: str):
carb.log_info(f"[RTX Remix Tool] {msg}")
def log_warn(msg: str):
carb.log_warn(f"[RTX Remix Tool] {msg}")
def log_error(msg: str):
carb.log_error(f"[RTX Remix Tool] {msg}")
| 227 | Python | 15.285713 | 45 | 0.621145 |
Ekozmaster/NvidiaOmniverseRTXRemixTools/exts/ekozerski.rtxremixtools/ekozerski/rtxremixtools/extension.py | import omni.ext
import omni.ui as ui
from omni.kit import context_menu
from omni.kit.hotkeys.core import get_hotkey_registry
from omni.kit.actions.core import get_action_registry
from . import commons
from .rtx_context_menu import build_rtx_remix_menu
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class RtxRemixTools(omni.ext.IExt):
def on_startup(self, ext_id):
self.ext_id = ext_id
commons.log_info(f"Starting Up")
menu = {"name": "RTX Remix", "populate_fn": build_rtx_remix_menu}
self._context_menu_subscription = context_menu.add_menu(menu, "MENU", "")
self.hotkey_registry = get_hotkey_registry()
register_actions(self.ext_id)
self.select_source_mesh_hotkey = self.hotkey_registry.register_hotkey(
self.ext_id,
"SHIFT + F",
self.ext_id,
"select_source_mesh",
filter=None,
)
def on_shutdown(self):
commons.log_info(f"Shutting Down")
# remove event
self._context_menu_subscription.release()
self.hotkey_registry.deregister_hotkey(
self.select_source_mesh_hotkey,
)
deregister_actions(self.ext_id)
def register_actions(extension_id):
from . import select_source_mesh
action_registry = get_action_registry()
actions_tag = "RTX Remix Tools Actions"
action_registry.register_action(
extension_id,
"select_source_mesh",
select_source_mesh.select_source_meshes,
display_name="Select Source Mesh",
description="Selects the corresponding mesh_HASH the prim is related to.",
tag=actions_tag,
)
def deregister_actions(extension_id):
action_registry = get_action_registry()
action_registry.deregister_all_actions_for_extension(extension_id)
| 2,043 | Python | 31.444444 | 118 | 0.664709 |
Ekozmaster/NvidiaOmniverseRTXRemixTools/exts/ekozerski.rtxremixtools/ekozerski/rtxremixtools/mesh_utils.py | from collections import OrderedDict
import os
from pxr import UsdGeom, Usd, Sdf
import omni.usd as usd
from ekozerski.rtxremixtools.commons import log_error
def get_selected_mesh_prims():
ctx = usd.get_context()
current_stage = ctx.get_stage()
selection = ctx.get_selection().get_selected_prim_paths()
selected_prims = {
path: current_stage.GetPrimAtPath(path)
for path in selection
}
meshes = {
prim_path: prim
for prim_path, prim in selected_prims.items()
if UsdGeom.Mesh(prim)
}
return meshes
def convert_mesh_to_vertex_interpolation_mode(mesh):
"""
This method attemps to convert Remix meshes' interpolation mode from constant or faceVarying to vertex.
If there is any faceVarying attribute, it means the data arrays (points, uvs, normals...) will have different
lengths, so this script will copy data around using the faceVertexIndices array to ensure they all end up with the
same length.
"""
# TODO: Study interpolation modes in depth to implement a decent conversion script.
prim = mesh.GetPrim()
primvar_api = UsdGeom.PrimvarsAPI(prim)
primvars = {var for var in primvar_api.GetPrimvars()}
face_varying_primvars = [v for v in primvars if v.GetInterpolation() == UsdGeom.Tokens.faceVarying]
if face_varying_primvars or mesh.GetNormalsInterpolation() == UsdGeom.Tokens.faceVarying:
non_face_varying_primvars = list(primvars.difference(face_varying_primvars))
non_face_varying_primvars = [var for var in non_face_varying_primvars if var.GetInterpolation() != 'uniform']
indices = prim.GetAttribute("faceVertexIndices")
# Settings points separately since it doesn't have a "SetInterpolation" like primvars.
points = prim.GetAttribute("points")
points_arr = points.Get()
new_arr = [points_arr[i] for i in indices.Get()]
points.Set(new_arr)
for var in non_face_varying_primvars:
original_arr = var.Get()
if original_arr:
new_arr = [original_arr[i] for i in indices.Get()]
var.Set(new_arr)
indices.Set([i for i in range(len(indices.Get()))])
[var.SetInterpolation(UsdGeom.Tokens.vertex) for var in primvars]
mesh.SetNormalsInterpolation(UsdGeom.Tokens.vertex)
def convert_uv_primvars_to_st(mesh):
# https://github.com/NVIDIAGameWorks/dxvk-remix/blob/ebb0ecfd638d6a32ab5f10708b5b07bc763cf79b/src/dxvk/rtx_render/rtx_mod_usd.cpp#L696
# https://github.com/Kim2091/RTXRemixTools/blob/8ae25224ef8d1d284f3e208f671b2ce6a35b82af/RemixMeshConvert/For%20USD%20Composer/RemixMeshConvert_OV.py#L4
known_uv_names = [
'primvars:st',
'primvars:uv',
'primvars:st0',
'primvars:st1',
'primvars:st2',
'primvars:UVMap',
'primvars:UVChannel_1',
'primvars:map1',
]
# Preserving the order of found primvars to use the first one, in case a primvars:st can't be found.
primvar_api = UsdGeom.PrimvarsAPI(mesh)
uv_primvars = OrderedDict(
(primvar.GetName(), primvar)
for primvar in primvar_api.GetPrimvars()
if primvar.GetTypeName().role == 'TextureCoordinate'
or primvar.GetName() in known_uv_names
)
if not uv_primvars:
return
# Picking only one UV and blowing up everything else as the runtime only reads the first anyway.
considered_uv = uv_primvars.get('primvars:st') or next(iter(uv_primvars.values()))
uv_data = considered_uv.Get()
[primvar_api.RemovePrimvar(uv_name) for uv_name in uv_primvars.keys()]
# Recreating the primvar with appropriate name, type and role
new_uv_primvar = primvar_api.CreatePrimvar('primvars:st', Sdf.ValueTypeNames.TexCoord2fArray, UsdGeom.Tokens.vertex)
new_uv_primvar.Set(uv_data)
def remove_unused_primvars(mesh):
unused_primvar_names = [
'primvars:displayColor',
'primvars:displayOpacity',
]
primvar_api = UsdGeom.PrimvarsAPI(mesh)
[primvar_api.RemovePrimvar(uv_name) for uv_name in unused_primvar_names]
def fix_meshes_in_file(usd_file_path):
stage = Usd.Stage.Open(usd_file_path)
mesh_prims = [prim for prim in stage.TraverseAll() if UsdGeom.Mesh(prim)]
for prim in mesh_prims:
faceVertices = prim.GetAttribute("faceVertexCounts").Get()
if not faceVertices or not all({x == 3 for x in faceVertices}):
log_error(f"Mesh {prim.GetPath()} in '{usd_file_path}' hasn't been triangulated and this tools doesn't do that for you yet :(")
continue
convert_mesh_to_vertex_interpolation_mode(UsdGeom.Mesh(prim))
convert_uv_primvars_to_st(UsdGeom.Mesh(prim))
remove_unused_primvars(UsdGeom.Mesh(prim))
stage.Save()
def is_a_captured_mesh(mesh):
"""
Returns True if the Mesh's defining USD file is located in the captures folder.
"""
return os.path.normpath("captures/meshes") in os.path.normpath(mesh.GetPrimStack()[-1].layer.realPath)
def fix_meshes_geometry():
meshes = {k: v for k,v in get_selected_mesh_prims().items() if not is_a_captured_mesh(v)}
for path, mesh in meshes.items():
source_layer = mesh.GetPrimStack()[-1].layer
fix_meshes_in_file(source_layer.realPath)
source_layer.Reload()
| 5,330 | Python | 39.082706 | 156 | 0.67955 |
Ekozmaster/NvidiaOmniverseRTXRemixTools/exts/ekozerski.rtxremixtools/ekozerski/rtxremixtools/add_material.py | import os
from typing import List
from omni import usd, kit
from omni.kit.window.file_importer import get_file_importer
from omni.client import make_relative_url
from ekozerski.rtxremixtools.utils import find_source_mesh_hash_prim
def open_add_material_dialog_for_prim(mesh_hash, ctx, current_stage):
def create_material_from_mdl_file(filename: str, dirname: str, selections: List[str] = []):
if not filename.endswith('mdl'):
raise ValueError(f"The selected file '{filename}' doesn't have a mdl extension.")
mesh_hash_path = mesh_hash.GetPath().pathString
counter = 0
material_name = os.path.basename(filename).replace('.mdl', '')
new_material_path = mesh_hash_path + f'/{material_name}_{counter}'
while current_stage.GetPrimAtPath(new_material_path).IsValid():
counter += 1
new_material_path = mesh_hash_path + f'/{material_name}_{counter}'
# TODO: Get material name by inspecting the MDL file rather than guessing from it's name, so users can
# rename it at will.
mtl_name = 'AperturePBR_Opacity' if 'Opacity' in filename else 'AperturePBR_Translucent'
editing_layer = current_stage.GetEditTarget().GetLayer()
relative_file_path = make_relative_url(editing_layer.realPath, os.path.join(dirname, filename))
success, _ = kit.commands.execute('CreateMdlMaterialPrimCommand',
mtl_url=relative_file_path,
mtl_name=mtl_name,
mtl_path=new_material_path,
select_new_prim=True,
)
def filter_handler(filename: str, _, extension_option):
if extension_option == '.mdl':
return filename.lower().endswith('.mdl')
return True
file_importer = get_file_importer()
file_importer.show_window(
title=f'Select MDL File',
import_button_label="Select",
import_handler=create_material_from_mdl_file,
file_extension_types=[(".mdl", "Opacity or Translucent MDL file")],
file_filter_handler=filter_handler,
)
def open_add_material_dialog():
ctx = usd.get_context()
current_stage = ctx.get_stage()
selection = ctx.get_selection().get_selected_prim_paths()
selected_prims = {
path: current_stage.GetPrimAtPath(path)
for path in selection
}
source_meshes = [find_source_mesh_hash_prim(current_stage, prim) for prim in selected_prims.values()]
source_meshes = set([mesh for mesh in source_meshes if mesh is not None])
for mesh_hash in list(source_meshes):
open_add_material_dialog_for_prim(mesh_hash, ctx, current_stage)
| 2,650 | Python | 41.079364 | 112 | 0.659245 |
Ekozmaster/NvidiaOmniverseRTXRemixTools/exts/ekozerski.rtxremixtools/ekozerski/rtxremixtools/utils.py | from pxr import Usd
from omni import usd
def find_source_mesh_hash_prim(current_stage, prim):
if not current_stage.GetPrimAtPath('/RootNode/meshes'):
return prim
search_prim = prim
valid_paths = ['/RootNode/meshes', '/RootNode/instances']
while search_prim.GetParent().IsValid() and search_prim.GetParent().GetPath().pathString not in valid_paths:
search_prim = search_prim.GetParent()
if not search_prim:
return None
if 'mesh_' in Usd.Prim.GetName(search_prim):
return search_prim
_, mesh_hash, __ = Usd.Prim.GetName(search_prim).split('_')
mesh_prim_path = f'/RootNode/meshes/mesh_{mesh_hash}'
return current_stage.GetPrimAtPath(mesh_prim_path)
def find_inst_hash_prim(instance_mesh):
search_prim = instance_mesh
root_path = '/RootNode/instances'
while search_prim.GetParent().IsValid() and search_prim.GetParent().GetPath().pathString != root_path:
search_prim = search_prim.GetParent()
if not search_prim:
return None
return search_prim
| 1,075 | Python | 29.742856 | 112 | 0.665116 |
Ekozmaster/NvidiaOmniverseRTXRemixTools/exts/ekozerski.rtxremixtools/ekozerski/rtxremixtools/preserve_draw_calls.py | from omni import usd, kit
from pxr import Sdf
from ekozerski.rtxremixtools.utils import find_source_mesh_hash_prim
def set_preserve_original_draw_call(enabled: bool = False):
ctx = usd.get_context()
current_stage = ctx.get_stage()
selection = ctx.get_selection().get_selected_prim_paths()
selected_prims = {
path: current_stage.GetPrimAtPath(path)
for path in selection
}
source_meshes = [find_source_mesh_hash_prim(current_stage, prim) for prim in selected_prims.values()]
source_meshes = set([mesh for mesh in source_meshes if mesh is not None])
for mesh_prim in source_meshes:
kit.commands.execute(
'CreateUsdAttributeCommand',
prim=mesh_prim,
attr_name='preserveOriginalDrawCall',
attr_type=Sdf.ValueTypeNames.Int,
attr_value=1 if enabled else 0
)
| 880 | Python | 32.884614 | 105 | 0.664773 |
Ekozmaster/NvidiaOmniverseRTXRemixTools/exts/ekozerski.rtxremixtools/ekozerski/rtxremixtools/rtx_context_menu.py | from omni.kit.ui import get_custom_glyph_code
from omni import usd
import omni.ui as ui
from . import mesh_utils
from . import add_model
from . import add_material
from . import preserve_draw_calls
from . import select_source_mesh
def _build_fix_mesh_geometry_menu_item():
tooltip = ''.join([
'Interpolation Mode\n',
'OBS: Operation Can\'t be undone\n',
' RTX Remix runtime only supports "vertex" interpolation mode, in which "points", "normals" and "uvs" arrays ',
'must have the same length, but DCC tools usually export the mesh using "faceVarying" interpolation mode.',
'This operation reorganizes the geometry to be compatible with the runtime. See:\n',
' "Interpolation of Geometric Primitive Variables" - https://openusd.org/dev/api/class_usd_geom_primvar.html',
'\n\nThis operation only applies for meshes inside the mods folder, not the captured ones.',
])
ui.MenuItem(
"Fix Meshes Geometry",
triggered_fn=mesh_utils.fix_meshes_geometry,
enabled=any([
not mesh_utils.is_a_captured_mesh(mesh)
for mesh in mesh_utils.get_selected_mesh_prims().values()
]),
tooltip=tooltip
)
def _build_setup_for_mesh_replacements_menu_item():
tooltip = ''.join([
"Export the original mesh to a selected location and setup the references to work within the runtime so you",
" can focus on remodeling the mesh and export back at the same location."
])
ui.MenuItem(
"Setup for Mesh Replacement",
triggered_fn=add_model.open_mesh_replacement_setup_dialog,
enabled=any([
mesh_utils.is_a_captured_mesh(mesh)
for mesh in mesh_utils.get_selected_mesh_prims().values()
]),
tooltip=tooltip
)
def _build_add_model_menu_item():
tooltip = ''.join([
"Add external authored meshes to the prim, setting up properly to work within the runtime."
])
ui.MenuItem(
"Add Model",
triggered_fn=add_model.open_add_model_dialog,
tooltip=tooltip,
enabled=bool(usd.get_context().get_selection().get_selected_prim_paths())
)
def _build_add_material_menu_item():
tooltip = ''.join([
"Add a material defined from an external MDL file to the selected prim."
])
ui.MenuItem(
"Add Material",
triggered_fn=add_material.open_add_material_dialog,
tooltip=tooltip,
enabled=bool(usd.get_context().get_selection().get_selected_prim_paths())
)
def _build_preserve_original_draw_call_menu_item():
tooltip = ''.join([
"Add a 'custom int preserveOriginalDrawCall' attribute set to '1' to the mesh_HASH prim. Used to indicate to",
" the runtime whether it should keep rendering the original mesh or not. Should be set when adding custom ",
" lights without removing the original mesh from rendering."
])
ui.MenuItem(
"Preserve",
triggered_fn=lambda: preserve_draw_calls.set_preserve_original_draw_call(True),
tooltip=tooltip,
enabled=bool(usd.get_context().get_selection().get_selected_prim_paths())
)
def _build_dont_preserve_original_draw_call_menu_item():
tooltip = ''.join([
"Add a 'custom int preserveOriginalDrawCall' attribute set to '0' to the mesh_HASH prim. Used to indicate to",
" the runtime whether it should keep rendering the original mesh or not. Should be set when adding custom ",
" lights without removing the original mesh from rendering."
])
ui.MenuItem(
"Don't Preserve",
triggered_fn=lambda: preserve_draw_calls.set_preserve_original_draw_call(False),
tooltip=tooltip,
enabled=bool(usd.get_context().get_selection().get_selected_prim_paths())
)
def _build_select_source_meshes_menu():
tooltip = ''.join([
"Selects the corresponding mesh_HASH the prim is related to."
])
ui.MenuItem(
"Select Source Mesh (Shift + F)",
triggered_fn=select_source_mesh.select_source_meshes,
tooltip=tooltip,
enabled=bool(usd.get_context().get_selection().get_selected_prim_paths())
)
def build_rtx_remix_menu(event):
icon = get_custom_glyph_code("${glyphs}/menu_create.svg")
with ui.Menu(f' {icon} RTX Remix'):
_build_fix_mesh_geometry_menu_item()
_build_setup_for_mesh_replacements_menu_item()
_build_add_model_menu_item()
_build_add_material_menu_item()
with ui.Menu(f'Original Draw Call Preservation'):
_build_preserve_original_draw_call_menu_item()
_build_dont_preserve_original_draw_call_menu_item()
_build_select_source_meshes_menu()
| 4,736 | Python | 37.201613 | 122 | 0.652872 |
Ekozmaster/NvidiaOmniverseRTXRemixTools/exts/ekozerski.rtxremixtools/ekozerski/rtxremixtools/select_source_mesh.py | from omni import usd
from ekozerski.rtxremixtools.utils import find_source_mesh_hash_prim
def select_source_meshes():
ctx = usd.get_context()
current_stage = ctx.get_stage()
selection = ctx.get_selection().get_selected_prim_paths()
selected_prims = {
path: current_stage.GetPrimAtPath(path)
for path in selection
}
source_meshes = [find_source_mesh_hash_prim(current_stage, prim) for prim in selected_prims.values()]
source_meshes = set([mesh for mesh in source_meshes if mesh is not None])
paths = [mesh.GetPath().pathString for mesh in source_meshes]
selection = usd.get_context().get_selection()
selection.clear_selected_prim_paths()
selection.set_selected_prim_paths(paths, False)
| 749 | Python | 34.714284 | 105 | 0.70494 |
rcervellione-nv/omni.rhinocompute/exts/cerver.util.rhinocompute/cerver/util/rhinocompute/extension.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import omni.ext
import omni.ui as ui
import omni.usd
from .RhinoComputeFunctions import RhinoFunctions, GrasshopperFunctions
from .RhinoComputUtil import SaveSelectedAs3dm
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class MyExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def __init__(self):
self.computeUrl="http://localhost:6500/"
self.progressbarprog = 0
self.progbarwindow = None
self.excludeLastGroupAsLayer = False
def on_startup(self, ext_id):
#print("[omni.RhinoCompute] MyExtension startup")
def serverAddrChanged(addr):
self.computeUrl = addr
self._window = ui.Window("Rhino Compute Functions", width=300, height=400)
with self._window.frame:
with ui.VStack():
ui.Label("Compute Server Address")
serverAddrUi = ui.StringField(height = 30)
serverAddrUi.model.set_value(self.computeUrl)
serverAddrUi.model.add_value_changed_fn(lambda m:serverAddrChanged(m.get_value_as_string()))
with ui.CollapsableFrame("Util Functions", height = 0):
with ui.VStack():
ui.Button("save sel as 3dm", clicked_fn=lambda: SaveSelectedAs3dm(self,"S:/test.3dm"), height=40)
ui.Button("save all as 3dm", clicked_fn=lambda: RhinoFunctions.SaveAllAs3DM_UI(self), height=40)
with ui.CollapsableFrame("Mesh Functions", height = 0):
with ui.VStack():
ui.Button("Volume", clicked_fn=lambda: RhinoFunctions.MeshVolume(self), height=40)
ui.Button("Mesh Bool Union", clicked_fn=lambda: RhinoFunctions.MeshBoolUnion(self), height=40)
ui.Button("Quad Remesh", clicked_fn=lambda: RhinoFunctions.MeshQuadRemesh(self), height=40)
ui.Button("Mesh Offset", clicked_fn=lambda: RhinoFunctions.MeshOffset(self), height=40)
with ui.CollapsableFrame("Grasshopper Functions", height = 0):
with ui.VStack():
ui.Button("Random Diamonds Script", clicked_fn=lambda: GrasshopperFunctions.randomDiamonds_UI(self), height=40)
def on_shutdown(self):
print("[omni.RhinoCompute] MyExtension shutdown")
| 3,121 | Python | 51.915253 | 135 | 0.664851 |
rcervellione-nv/omni.rhinocompute/exts/cerver.util.rhinocompute/cerver/util/rhinocompute/RhinoComputeFunctions.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import string
import omni.ext
import omni.ui as ui
from pxr import Usd, UsdGeom
import omni.usd
import carb.events
import omni.kit.app
import os
import json
import time
omni.kit.pipapi.install("rhino3dm")
from rhino3dm import *
omni.kit.pipapi.install("compute-rhino3d")
import compute_rhino3d.Util
import compute_rhino3d.Mesh
import compute_rhino3d.Grasshopper as gh
from .RhinoComputUtil import *
omni.kit.pipapi.install("plotly==5.4.0")
import plotly.graph_objects as go
class RhinoFunctions:
def ComputeServerUrl(self):
return self.computeUrl
def MeshVolume(self):
#add the compute server location
compute_rhino3d.Util.url = self.computeUrl
#convert selected items to rhino mesh
meshes = convertSelectedUsdMeshToRhino()
vols = []
names = []
rhinoMeshes = []
#for each mesh compute the volume and then add the volume and name to a list
for m in meshes:
rhinoMeshes.append(m["Mesh"])
vol = compute_rhino3d.Mesh.Volume(m["Mesh"])
vols.append(vol)
names.append(m["Name"])
#use plotly to plot the volumes as a pie chart
fig = go.Figure(
data=[go.Pie(values=vols, labels=names)],
layout_title_text="the Volumes"
)
fig.show()
def MeshBoolUnion(self) -> None:
#add the compute server location
compute_rhino3d.Util.url = self.computeUrl
#convert selected items to rhino mesh
meshes = convertSelectedUsdMeshToRhino()
#for each mesh compute the bool union
rhinoMeshes = []
for m in meshes:
rhinoMeshes.append(m["Mesh"])
rhinoMeshes = compute_rhino3d.Mesh.CreateBooleanUnion(rhinoMeshes)
#add to the stage after converting back from rhino to USD mesh
#ToDo: add UI to define prim path and names
ct=0
for rm in rhinoMeshes:
RhinoMeshToUsdMesh("/World/rhinoComputed/",f"BoolUnion_{ct}",rm)
def MeshQuadRemesh(self)-> None:
compute_rhino3d.Util.url = self.computeUrl
meshes = convertSelectedUsdMeshToRhino()
#setup all the params for quad remesh
#ToDo: make this a UI for user
parameters = {
'AdaptiveQuadCount': True,
'AdaptiveSize': 50.0,
'DetectHardEdges': True,
'GuideCurveInfluence': 0,
'PreserveMeshArrayEdgesMode': 0,
'TargetQuadCount': 2000
}
names = []
rhinoMeshes = []
for m in meshes:
weldVerts = compute_rhino3d.Mesh.Weld(m["Mesh"],0.5)
qrm =compute_rhino3d.Mesh.QuadRemesh(weldVerts,parameters)
name = m["Name"]
if qrm is not None:
rhinoMeshes.append(qrm)
names.append(name)
RhinoMeshToUsdMesh("/World/rhinoComputed/",name+"_QuadRemesh",qrm)
else:
warning(f"QuadRemesh Failed on {name}")
def MeshWeld(self, tol)-> None:
compute_rhino3d.Util.url = self.computeUrl
meshes = convertSelectedUsdMeshToRhino()
names = []
rhinoMeshes = []
for m in meshes:
weldVerts = compute_rhino3d.Mesh.Weld(m["Mesh"],tol)
name = m["Name"]
if weldVerts is not None:
rhinoMeshes.append(weldVerts)
names.append(name)
RhinoMeshToUsdMesh("/World/rhinoComputed/",name+"_Weld",weldVerts)
else:
warning(f"Weld Failed on {name}")
def MeshOffset(self)-> None:
compute_rhino3d.Util.url = self.computeUrl
meshes = convertSelectedUsdMeshToRhino()
names = []
rhinoMeshes = []
for m in meshes:
macf = compute_rhino3d.Mesh.Offset1(m["Mesh"],1,True)
rhinoMeshes.append(macf)
name = m["Name"]
names.append(name)
RhinoMeshToUsdMesh("/World/rhinoComputed/",name+"_offset",macf)
def SaveAllAs3DM_UI(self):
window_flags = ui.WINDOW_FLAGS_NO_SCROLLBAR
#window_flags |= ui.WINDOW_FLAGS_NO_TITLE_BAR
self.export3dmwindow = ui.Window("Export Stage As 3DM", width=300, height=130, flags=window_flags)
with self.export3dmwindow.frame:
with ui.VStack():
with ui.HStack():
ui.Label("Path", width=50, height = 25)
path = ui.StringField( height = 25, tooltip = "Set the location and name of the file i.e c:/temp/myRhinofile.3dm")
with ui.HStack( height = 35):
def exLastGrpAsLayCb_changed(self, val):
self.excludeLastGroupAsLayer = val
print(val)
exLastGrpAsLayCb = ui.CheckBox(width = 30)
exLastGrpAsLayCb.model.add_value_changed_fn(lambda cb: exLastGrpAsLayCb_changed(self,cb.get_value_as_bool() ) )
ui.Label("Exlude last group as layer", width=50, height = 15)
def exportbt():
SaveAllas3DM(self,path.model.get_value_as_string())
ui.Line()
ui.Button("Export", clicked_fn=lambda: exportbt(), height=25)
class GrasshopperFunctions:
def randomDiamonds(self,uCt,vCt,rrA,rrB):
compute_rhino3d.Util.url = self.computeUrl
ghFile = os.path.dirname(os.path.dirname(__file__)) + "/rhinocompute/gh/randomDiamonds.ghx"
selectedMeshes = convertSelectedUsdMeshToRhino()
inputMesh = selectedMeshes[0]["Mesh"]
# create list of input trees
ghMesh = json.dumps(inputMesh.Encode())
mesh_tree = gh.DataTree("baseMesh")
mesh_tree.Append([0], [ghMesh])
srfU_tree = gh.DataTree("srfU")
srfU_tree.Append([0], [uCt])
srfV_tree = gh.DataTree("srfV")
srfV_tree.Append([0], [vCt])
rrA_tree = gh.DataTree("RR_A")
rrA_tree.Append([0], [rrA])
rrB_tree = gh.DataTree("RR_B")
rrB_tree.Append([0], [rrB])
inputs = [mesh_tree, srfU_tree, srfV_tree, rrA_tree, rrB_tree]
results = gh.EvaluateDefinition(ghFile, inputs)
# decode results
data = results['values'][0]['InnerTree']['{0}']
outMeshes = [rhino3dm.CommonObject.Decode(json.loads(item['data'])) for item in data]
ct = 0
for m in outMeshes:
RhinoMeshToUsdMesh("/World",f"/randomDiamonds/randomDiamonds_{ct}",m)
ct+=1
def randomDiamonds_UI(self):
def run(uCt,vCt,rrA,rrB):
GrasshopperFunctions.randomDiamonds(self,uCt, vCt, rrA,rrB)
#window_flags = ui.WINDOW_FLAGS_NO_RESIZE
sliderStyle = {"border_radius":15, "background_color": 0xFFDDDDDD, "secondary_color":0xFFAAAAAA, "color":0xFF111111, "margin":3}
window_flags = ui.WINDOW_FLAGS_NO_SCROLLBAR
self.theWindow = ui.Window("Random Diamonds", width=300, height=200, flags=window_flags)
with self.theWindow.frame:
with ui.VStack():
with ui.HStack():
ui.Label("U Ct", width=40)
srfU = ui.IntSlider(height= 20, min=1, max=50, style= sliderStyle )
with ui.HStack():
ui.Label("V Ct", width=40)
srfV = ui.IntSlider(height= 20, min=1, max=50, style= sliderStyle )
with ui.HStack():
ui.Label("min D", width=40)
rrA = ui.FloatSlider(height= 20, min=0.1, max=150, style= sliderStyle )
with ui.HStack():
ui.Label("max D", width=40)
rrB = ui.FloatSlider(height= 20, min=0.1, max=150, style= sliderStyle )
srfU.model.set_value(4)
srfV.model.set_value(4)
rrA.model.set_value(4)
rrB.model.set_value(75)
srfU.model.add_value_changed_fn(lambda m:run(srfU.model.get_value_as_int(),srfV.model.get_value_as_int(),rrA.model.get_value_as_float(),rrB.model.get_value_as_float()))
srfV.model.add_value_changed_fn(lambda m:run(srfU.model.get_value_as_int(),srfV.model.get_value_as_int(),rrA.model.get_value_as_float(),rrB.model.get_value_as_float()))
rrA.model.add_value_changed_fn(lambda m:run(srfU.model.get_value_as_int(),srfV.model.get_value_as_int(),rrA.model.get_value_as_float(),rrB.model.get_value_as_float()))
rrB.model.add_value_changed_fn(lambda m:run(srfU.model.get_value_as_int(),srfV.model.get_value_as_int(),rrA.model.get_value_as_float(),rrB.model.get_value_as_float()))
ui.Line(height=10)
ui.Button("Run >>", clicked_fn=lambda: GrasshopperFunctions.randomDiamonds(self,
srfU.model.get_value_as_int(),
srfV.model.get_value_as_int(),
rrA.model.get_value_as_float(),
rrB.model.get_value_as_float(),
), height=30) | 9,607 | Python | 36.976284 | 184 | 0.584678 |
rcervellione-nv/omni.rhinocompute/exts/cerver.util.rhinocompute/cerver/util/rhinocompute/RhinoComputUtil.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import compute_rhino3d.Util
import compute_rhino3d.Mesh
import compute_rhino3d.Grasshopper as gh
import rhino3dm
import json
import omni.ext
import omni.ui as ui
from pxr import Usd, UsdGeom, Gf
import omni.usd
import asyncio
def convertSelectedUsdMeshToRhino():
context = omni.usd.get_context()
stage = omni.usd.get_context().get_stage()
prims = [stage.GetPrimAtPath(m) for m in context.get_selection().get_selected_prim_paths() ]
#filter out prims that are not mesh
selected_prims = [
prim for prim
in prims
if UsdGeom.Mesh(prim)]
#setup var to hold the mesh, its name in the dict
sDict = []
#add the converted prims to the dict
for m in selected_prims:
sDict.append({"Name": m.GetName(), "Mesh":UsdMeshToRhinoMesh(m)})
return sDict
def UsdMeshToRhinoMesh(usdMesh):
#array for the mesh items
vertices = []
faces = []
#get the USD points
points = UsdGeom.Mesh(usdMesh).GetPointsAttr().Get()
#setup the items needed to deal with world and local transforms
xform_cache = UsdGeom.XformCache()
mtrx_world = xform_cache.GetLocalToWorldTransform(usdMesh)
#create the rhino mesh
mesh = rhino3dm.Mesh()
#convert the USD points to rhino points
for p in points:
world_p = mtrx_world.Transform(p)
mesh.Vertices.Add(world_p[0],world_p[1],world_p[2])
#faces we can extend directly into the aray becaue they are just ints
faces.extend( UsdGeom.Mesh(usdMesh).GetFaceVertexIndicesAttr().Get())
faceCount = UsdGeom.Mesh(usdMesh).GetFaceVertexCountsAttr().Get()
ct = 0
#add the face verts, USD uses a flat list of ints so we need to deal with
#3 or 4 sided faces. USD supports ngons but that is not accounted for
#ToDo: Deal with ngons
for i in range(0,len(faceCount)):
fc=faceCount[i]
if fc is 3:
mesh.Faces.AddFace(faces[ct], faces[ct+1], faces[ct+2])
if fc is 4:
mesh.Faces.AddFace(faces[ct], faces[ct+1], faces[ct+2], faces[ct+3])
ct+=fc
#compute normals, i dont use the USD normals here but you could
mesh.Normals.ComputeNormals()
mesh.Compact()
return mesh
def save_stage():
stage = omni.usd.get_context().get_stage()
stage.GetRootLayer().Save()
omni.client.usd_live_process()
def RhinoMeshToUsdMesh( rootUrl, meshName, rhinoMesh: rhino3dm.Mesh , primPath=None):
#get the stage
stage = omni.usd.get_context().get_stage()
# Create the geometry inside of "Root"
meshPrimPath = rootUrl + meshName
mesh = UsdGeom.Mesh.Define(stage, meshPrimPath)
# Add all of the vertices
points = []
for i in range(0,len(rhinoMesh.Vertices)):
v = rhinoMesh.Vertices[i]
points.append(Gf.Vec3f(v.X, v.Y, v.Z))
mesh.CreatePointsAttr(points)
# Calculate indices for each triangle
faceIndices = []
faceVertexCounts = []
for i in range(0, rhinoMesh.Faces.Count):
fcount=3
curf = rhinoMesh.Faces[i]
faceIndices.append(curf[0])
faceIndices.append(curf[1])
faceIndices.append(curf[2])
if curf[2] != curf[3]:
faceIndices.append(curf[3])
fcount=4
#print(f"{fcount} : {curf}")
faceVertexCounts.append(fcount)
mesh.CreateFaceVertexIndicesAttr(faceIndices)
mesh.CreateFaceVertexCountsAttr(faceVertexCounts)
# Add vertex normals
meshNormals = []
for n in rhinoMesh.Normals:
meshNormals.append(Gf.Vec3f(n.X,n.Y,n.Z))
mesh.CreateNormalsAttr(meshNormals)
def SaveRhinoFile(rhinoMeshes, path):
model = rhino3dm.File3dm()
[ model.Objects.AddMesh(m) for m in rhinoMeshes]
model.Write(path)
def SaveSelectedAs3dm(self,path):
selectedMeshes = convertSelectedUsdMeshToRhino()
meshobj = [d['Mesh'] for d in selectedMeshes]
SaveRhinoFile(meshobj, path)
def SaveAllas3DM(self, path):
#get the stage
stage = omni.usd.get_context().get_stage()
#get all prims that are meshes
meshPrims = [stage.GetPrimAtPath(prim.GetPath()) for prim in stage.Traverse() if UsdGeom.Mesh(prim)]
#make a rhino file
rhinoFile = rhino3dm.File3dm()
uniqLayers = {}
#figure out how many elements there are (to implament progress bar in future)
numPrims = len(meshPrims)
curPrim = 0
#loop over all the meshes
for mp in meshPrims:
#convert from usd mesh to rhino mesh
rhinoMesh = UsdMeshToRhinoMesh(mp)
objName = mp.GetName()
rhinoAttr = rhino3dm.ObjectAttributes()
dataOnParent = False
#get the properties on the prim
bimProps = None
parentPrim = mp.GetParent()
#see if this prim has BIM properties (from revit)
if parentPrim:
bimProps = mp.GetPropertiesInNamespace("BIM")
dataOnParent = False
#see if this prims parent has BIM properties (from revit)
if not bimProps:
bimProps = parentPrim.GetPropertiesInNamespace("BIM")
dataOnParent = True
#if no bim properties just add regular ones
if not bimProps :
bimProps = mp.GetProperties()
dataOnParent = False
for p in bimProps:
try:
pName = p.GetBaseName()
var = p.Get()
rhinoAttr.SetUserString(pName, str(var))
except Exception :
pass
# get the prims path and use that to create nested layers in rhino
primpath = str(mp.GetPath())
sepPrimPath = primpath.split('/')
sepPrimPath.pop(0)
sepPrimPath.pop()
# this will ajust the layer structure if the data is from the revit connector
# or if you just want to prune the last group in the export dialogue
if dataOnParent or self.excludeLastGroupAsLayer:
sepPrimPath.pop()
nestedLayerName = '::'.join(sepPrimPath)
ct=0
curLayer = ""
#loop over all the prim paths to created the nested layers in rhino
for pp in sepPrimPath:
if ct == 0:
curLayer += pp
else:
curLayer += f"::{pp}"
#check if the layer exists, if not make it
if not curLayer in uniqLayers :
layer = rhino3dm.Layer()
if ct>0:
prevLayer = curLayer.split('::')
prevLayer.pop()
prevLayer = '::'.join(prevLayer)
layer.ParentLayerId = rhinoFile.Layers.FindIndex(uniqLayers[prevLayer]).Id
layer.Color = (255,255,255,255)
layer.Name = pp
idx = rhinoFile.Layers.Add(layer)
uniqLayers[curLayer]= int(idx)
ct+=1
rhinoAttr.Name = objName
#print(str(uniqLayers[nestedLayerName]))
rhinoAttr.LayerIndex = int(str(uniqLayers[nestedLayerName]))
#add the mesh and its attributes to teh rhino file
rhinoFile.Objects.AddMesh(rhinoMesh, rhinoAttr)
curPrim += 1
self.progressbarprog = curPrim/numPrims
#save it all
rhinoFile.Write(path)
print("completed saving")
| 7,771 | Python | 30.983539 | 104 | 0.625402 |
vinjn/llm-metahuman/audio-client/gen_protoc.py | import os
import subprocess
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
proto_src_root = os.path.normpath(os.path.join(ROOT_DIR, "proto/"))
proto_dst_root = os.path.normpath(os.path.join(ROOT_DIR, "."))
proto_fpath = os.path.normpath(os.path.join(ROOT_DIR, "proto", "audio2face.proto"))
cmd = [
"python",
"-m",
"grpc_tools.protoc",
"-I",
f"{proto_src_root}",
f"--python_out={proto_dst_root}",
f"--grpc_python_out={proto_dst_root}",
f"{proto_fpath}",
]
print(cmd)
subprocess.call(cmd)
| 530 | Python | 22.086956 | 83 | 0.633962 |
vinjn/llm-metahuman/audio-client/llm.py | from openai import OpenAI
from pydub import AudioSegment
import gradio as gr
import requests
import os
from litellm import completion
import time
import threading
import queue
import gradio_client as gc
# XXX: increase requests speed
# https://stackoverflow.com/a/72440253
requests.packages.urllib3.util.connection.HAS_IPV6 = False
args = None
CWD = os.getcwd()
print("CWD:", CWD)
VOICE_ACTORS = ["nova", "alloy", "echo", "fable", "onyx", "shimmer"]
def timing_decorator(func):
def wrapper(*args, **kwargs):
start_time = time.time()
result = func(*args, **kwargs)
end_time = time.time()
elapsed_time = end_time - start_time
print(f"{func.__name__} cost: {elapsed_time:.2f} seconds.")
return result
return wrapper
class A2fInstance:
files_to_delete = []
instaces = []
def __init__(self, index) -> None:
self.SERVICE_HEALTHY = False
self.LIVELINK_SERVICE_HEALTHY = False
self.index = index
@timing_decorator
def post(self, end_point, data=None, verbose=True):
if not self.SERVICE_HEALTHY:
return None
if verbose:
print(f"++ {end_point}")
api_url = f"{self.base_url}/{end_point}"
try:
response = requests.post(api_url, json=data)
if response and response.status_code == 200:
if verbose:
print(response.json())
return response.json()
else:
if verbose:
print(f"Error: {response.status_code} - {response.text}")
return {"Error": response.status_code, "Reason": response.text}
except Exception as e:
print(e)
self.SERVICE_HEALTHY = False
return None
@timing_decorator
def get(self, end_point, data=None, verbose=True):
if not self.SERVICE_HEALTHY:
return None
if verbose:
print(f"++ {end_point}")
api_url = f"{self.base_url}/{end_point}"
try:
response = requests.get(api_url, json=data)
if response.status_code == 200:
if verbose:
print(response.json())
return response.json()
else:
if verbose:
print(f"Error: {response.status_code} - {response.text}")
return {"Error": response.status_code, "Reason": response.text}
except Exception as e:
print(e)
self.SERVICE_HEALTHY = False
return None
def player_setlooping(self, flag=True):
self.post(
"A2F/Player/SetLooping",
{"a2f_player": args.a2f_player_id, "loop_audio": flag},
)
def player_play(self):
self.post("A2F/Player/Play", {"a2f_player": args.a2f_player_id})
def player_pause(self):
self.post("A2F/Player/Pause", {"a2f_player": args.a2f_player_id})
def player_setrootpath(self, dir_path):
self.post(
"A2F/Player/SetRootPath",
{"a2f_player": args.a2f_player_id, "dir_path": dir_path},
)
def player_settrack(self, file_name):
self.post(
"A2F/Player/SetTrack",
{"a2f_player": args.a2f_player_id, "file_name": file_name},
)
def player_gettracks(self):
self.post("A2F/Player/GetTracks", {"a2f_player": args.a2f_player_id})
def player_gettime(self):
response = self.post(
"A2F/Player/GetTime", {"a2f_player": args.a2f_player_id}, False
)
if response and response["status"] == "OK":
return response["result"]
else:
return 0
def player_getrange(self):
response = self.post(
"A2F/Player/GetRange", {"a2f_player": args.a2f_player_id}, False
)
if response and response["status"] == "OK":
return response["result"]["work"]
else:
return (0, 0)
def generatekeys(self):
self.post("A2F/A2E/GenerateKeys", {"a2f_instance": args.a2f_instance_id})
def ActivateStreamLivelink(self, flag):
self.post(
"A2F/Exporter/ActivateStreamLivelink",
{"node_path": args.a2f_livelink_id, "value": flag},
)
def IsStreamLivelinkConnected(self):
response = self.post(
"A2F/Exporter/IsStreamLivelinkConnected",
{"node_path": args.a2f_livelink_id},
)
if response and response["status"] == "OK":
return response["result"]
else:
return False
def enable_audio_stream(self, flag):
self.post(
"A2F/Exporter/SetStreamLivelinkSettings",
{
"node_path": args.a2f_livelink_id,
"values": {"enable_audio_stream": flag},
},
)
def set_livelink_ports(
self,
livelink_host,
livelink_subject,
livelink_port,
livelink_audio_port,
):
self.post(
"A2F/Exporter/SetStreamLivelinkSettings",
{
"node_path": args.a2f_livelink_id,
"values": {
"livelink_host": livelink_host,
"livelink_subject": livelink_subject,
"livelink_port": livelink_port,
"audio_port": livelink_audio_port,
},
},
)
def get_preprocessing(self):
response = self.post(
"A2F/PRE/GetSettings",
{"a2f_instance": args.a2f_instance_id},
)
if response and response["status"] == "OK":
return response["result"]
else:
return {}
def set_preprocessing(self, settings):
settings["a2f_instance"] = args.a2f_instance_id
self.post("A2F/PRE/SetSettings", settings)
def get_postprocessing(self):
response = self.post(
"A2F/POST/GetSettings",
{"a2f_instance": args.a2f_instance_id},
)
if response and response["status"] == "OK":
return response["result"]
else:
return {}
def set_postprocessing(self, settings):
self.post(
"A2F/POST/SetSettings",
{"a2f_instance": args.a2f_instance_id, "settings": settings},
)
def setup(self):
self.base_url = f"http://{args.a2f_host}:{args.a2f_port+self.index}"
self.tts_voice = args.tts_voice
if self.index > 0:
# TODO: make it elegant
self.tts_voice = VOICE_ACTORS[self.index % len(VOICE_ACTORS)]
# always ping SERVICE_HEALTHY again in setup()
self.SERVICE_HEALTHY = True
self.ActivateStreamLivelink(True)
if not self.SERVICE_HEALTHY:
return
self.player_setrootpath(CWD)
self.player_setlooping(False)
self.LIVELINK_SERVICE_HEALTHY = self.IsStreamLivelinkConnected()
if not self.LIVELINK_SERVICE_HEALTHY:
return
self.enable_audio_stream(True)
self.set_livelink_ports(
args.livelink_host,
f"{args.livelink_subject}-{self.index}",
args.livelink_port + 10 * self.index,
args.livelink_audio_port + 10 * self.index,
)
pre_settings = self.get_preprocessing()
pre_settings["prediction_delay"] = 0
pre_settings["blink_interval"] = 1.5
self.set_preprocessing(pre_settings)
post_settings = self.get_postprocessing()
post_settings["skin_strength"] = 1.3
self.set_postprocessing(post_settings)
A2fInstance.instaces = []
openai_client = OpenAI()
gc_client: gc.Client = None
chat_ui: gr.ChatInterface = None
def run_single_pipeline(a2f, answer, a2f_peer=None):
global stop_current_a2f_play
if not a2f_peer:
a2f_peer = a2f
# print(answer)
mp3_file = text_to_mp3(answer, a2f.tts_voice)
wav_file = mp3_to_wav(mp3_file)
duration = a2f_peer.player_getrange()[1]
position = a2f_peer.player_gettime()
while position > 0 and position < duration:
print(position, duration)
if stop_current_a2f_play:
print("stop_current_a2f_play")
stop_current_a2f_play = False
return
time.sleep(1)
position = a2f_peer.player_gettime()
print("z")
time.sleep(1)
a2f.player_setrootpath(CWD)
a2f.player_settrack(wav_file)
# a2f_generatekeys()
a2f.player_play()
for file in A2fInstance.files_to_delete:
try:
os.remove(file)
except Exception:
pass
A2fInstance.files_to_delete.clear()
A2fInstance.files_to_delete.append(mp3_file)
A2fInstance.files_to_delete.append(wav_file)
current_speaker = -1
@timing_decorator
def run_pipeline(answer):
if args.a2f_instance_count == 1:
run_single_pipeline(A2fInstance.instaces[0], answer)
return
global current_speaker
if answer.startswith("("):
current_speaker = -1
elif answer.startswith("A:"):
current_speaker = 0
answer = answer[2:]
elif answer.startswith("B:"):
current_speaker = 1
answer = answer[2:]
if current_speaker < 0 or current_speaker >= args.a2f_instance_count:
return
a2f = A2fInstance.instaces[current_speaker]
if not a2f.SERVICE_HEALTHY:
return
run_single_pipeline(a2f, answer)
@timing_decorator
def text_to_mp3(text, voice):
response = openai_client.audio.speech.create(
model=args.tts_model,
voice=voice,
speed=args.tts_speed,
input=text,
)
timestamp = time.time()
mp3_filename = f"{timestamp}.mp3"
response.stream_to_file(mp3_filename)
return mp3_filename
@timing_decorator
def mp3_to_wav(mp3_filename):
sound = AudioSegment.from_mp3(mp3_filename)
sound = sound.set_frame_rate(22050)
wav_filename = f"{mp3_filename}.wav"
sound.export(wav_filename, format="wav")
return wav_filename
@timing_decorator
def get_completion(chat_history):
response = completion(
model=args.llm_model,
messages=chat_history,
api_base=args.llm_url,
stream=args.llm_streaming,
)
print(response)
return response
q = queue.Queue()
cleanup_queue = False
stop_current_a2f_play = False
def pipeline_worker():
while True:
print("--------------------------")
global cleanup_queue
global stop_current_a2f_play
if cleanup_queue:
while not q.empty():
item = q.get()
q.task_done()
if item == "cleanup_queue_token":
break
cleanup_queue = False
stop_current_a2f_play = True
item = q.get()
if item == "cleanup_queue_token":
continue
print(f"Begin: {item}")
run_pipeline(item)
print(f"End: {item}")
q.task_done()
def talk_to_peer(message):
if not gc_client:
return
result = gc_client.predict(
message, api_name="/chat" # str in 'Message' Textbox component
)
print(f"from peer: {result}")
# chat_ui.textbox.submit(None, [result, result])
# chat_ui.textbox.submit()
def predict(message, history):
print("==========================")
if message == "setup":
str = ""
for a2f in A2fInstance.instaces:
a2f.setup()
str += f"A2F running: {a2f.SERVICE_HEALTHY}\n"
str += f"Live Link running: {a2f.LIVELINK_SERVICE_HEALTHY}\n"
yield str
return
if message == "ping":
for a2f in A2fInstance.instaces:
a2f.post("")
a2f.get("")
yield "A2F ping"
return
if message == "redo":
for a2f in A2fInstance.instaces:
a2f.player_play()
yield "A2F redo"
return
if message == "stop":
global cleanup_queue
cleanup_queue = True
q.put("cleanup_queue_token")
yield "stopped"
return
if message.startswith("peer"):
items = message.split()
if len(items) >= 2:
gradio_port = int(items[1])
# TODO: support non localhost
args.gradio_peer_url = f"http://{args.gradio_host}:{gradio_port}/"
global gc_client
gc_client = gc.Client(args.gradio_peer_url)
yield f"I will chat with another llm-metahuman: {args.gradio_peer_url}"
return
history_openai_format = []
for human, assistant in history:
history_openai_format.append({"role": "user", "content": human})
history_openai_format.append({"role": "assistant", "content": assistant})
history_openai_format.append({"role": "user", "content": message})
# start_time = time.time()
response = get_completion(history_openai_format)
yield ".."
# global cleanup_queue
# cleanup_queue = True
# q.put("cleanup_queue_token")
if args.llm_streaming:
# create variables to collect the stream of chunks
UNUSED_collected_chunks = []
collected_messages = []
complete_sentences = ""
# iterate through the stream of events
for chunk in response:
# chunk_time = (
# time.time() - start_time
# ) # calculate the time delay of the chunk
UNUSED_collected_chunks.append(chunk) # save the event response
chunk_message = chunk.choices[0].delta.content # extract the message
if not chunk_message:
continue
collected_messages.append(chunk_message) # save the message
# print(
# f"Message {chunk_time:.2f} s after request: {chunk_message}"
# ) # print the delay and text
print(chunk_message)
if chunk_message in [
".",
"!",
"?",
"。",
"!",
"?",
] or chunk_message.endswith("\n"):
# if not chunk_message or "\n" in chunk_message:
one_sentence = "".join([m for m in collected_messages if m is not None])
if len(one_sentence) < 10:
# ignore short sentences
continue
collected_messages = []
complete_sentences += one_sentence
q.put(one_sentence)
# run_pipeline(one_sentence)
yield complete_sentences
talk_to_peer(one_sentence)
# print the time delay and text received
# print(f"Full response received {chunk_time:.2f} seconds after request")
# # clean None in collected_messages
# collected_messages = [m for m in collected_messages if m is not None]
# full_reply_content = "".join([m for m in collected_messages])
# print(f"Full conversation received: {full_reply_content}")
# yield full_reply_content
else:
if len(response.choices[0].message.content) == 0:
return
answer = response.choices[0].message.content
yield answer
run_pipeline(answer)
def main():
import argparse
parser = argparse.ArgumentParser(description="llm.py arguments")
# gradio settings
parser.add_argument("--a2f_instance_count", type=int, default=1)
parser.add_argument("--gradio_host", default="localhost")
parser.add_argument("--gradio_port", type=int, default=7860)
parser.add_argument(
"--gradio_peer_url",
default=None,
help="the gradio peer that this gradio instance will chat with. Default value is None, which means chat with a human.",
)
# llm / litellm settings
parser.add_argument("--llm_engine", default="gpt", choices=["gpt", "llama2"])
parser.add_argument(
"--llm_model", default=None, help="https://docs.litellm.ai/docs/providers"
)
parser.add_argument("--llm_url", default=None)
parser.add_argument(
"--llm_streaming", default=True, action=argparse.BooleanOptionalAction
)
# audio2face settings
parser.add_argument("--a2f_host", default="localhost")
parser.add_argument("--a2f_port", default=8011, type=int)
parser.add_argument("--a2f_instance_id", default="/World/audio2face/CoreFullface")
parser.add_argument("--a2f_player_id", default="/World/audio2face/Player")
parser.add_argument("--a2f_livelink_id", default="/World/audio2face/StreamLivelink")
# tts settings
parser.add_argument("--tts_model", default="tts-1", choices=["tts-1", "tts-1-hd"])
parser.add_argument("--tts_speed", default=1.1, type=float)
# livelink settings
parser.add_argument("--livelink_host", default="localhost")
parser.add_argument("--livelink_port", default=12030, type=int)
parser.add_argument("--livelink_subject", default="Audio2Face")
parser.add_argument("--livelink_audio_port", default=12031, type=int)
parser.add_argument(
"--tts_voice",
default="nova",
choices=VOICE_ACTORS,
help="https://platform.openai.com/docs/guides/text-to-speech",
)
global args
args = parser.parse_args()
if not args.llm_model:
if args.llm_engine == "gpt":
args.llm_model = args.llm_model or "gpt-3.5-turbo"
elif args.llm_engine == "llama2":
args.llm_model = args.llm_model or "ollama/llama2"
args.llm_url = args.llm_url or "http://localhost:11434"
threading.Thread(target=pipeline_worker, daemon=True).start()
for i in range(args.a2f_instance_count):
a2f = A2fInstance(i)
a2f.setup()
A2fInstance.instaces.append(a2f)
global chat_ui
chat_ui = gr.ChatInterface(
predict,
title=f"llm-metahuman @{args.gradio_port}",
examples=["hello", "tell me 3 jokes", "what's the meaning of life?"],
)
chat_ui.queue().launch(server_name=args.gradio_host, server_port=args.gradio_port)
q.join()
if __name__ == "__main__":
main()
| 18,138 | Python | 28.736066 | 127 | 0.573327 |
vinjn/llm-metahuman/audio-client/ref/pytts-demo.py | import pyttsx3
engine = pyttsx3.init() # object creation
""" RATE"""
rate = engine.getProperty("rate") # getting details of current speaking rate
print(rate) # printing current voice rate
engine.setProperty("rate", 125) # setting up new voice rate
"""VOLUME"""
volume = engine.getProperty(
"volume"
) # getting to know current volume level (min=0 and max=1)
print(volume) # printing current volume level
engine.setProperty("volume", 1.0) # setting up volume level between 0 and 1
"""VOICE"""
voices = engine.getProperty("voices") # getting details of current voice
print(voices)
engine.setProperty("voice", voices[0].id) # changing index, changes voices. o for male
# engine.setProperty('voice', voices[1].id) #changing index, changes voices. 1 for female
engine.say("Hello World!")
engine.say("说什么 current speaking rate is " + str(rate))
engine.runAndWait()
engine.stop()
"""Saving Voice to a file"""
# On linux make sure that 'espeak' and 'ffmpeg' are installed
engine.save_to_file("Hello World", "test.mp3")
engine.runAndWait()
| 1,054 | Python | 30.969696 | 91 | 0.721063 |
vinjn/llm-metahuman/audio-client/ref/minimal-chatbot.py | import random
import gradio as gr
def alternatingly_agree(message, history):
if len(history) % 2 == 0:
return f"Yes, I do think that '{message}'"
else:
return "I don't think so"
count = 0
def textbox_update(chatui_textbox):
global count
count += 1
if count % 10 == 0:
return "z"
else:
return chatui_textbox
if __name__ == "__main__":
with gr.ChatInterface(alternatingly_agree) as chat_ui:
chat_ui.textbox.change(
textbox_update,
chat_ui.textbox,
chat_ui.textbox,
every=1,
trigger_mode="once",
)
chat_ui.launch()
| 660 | Python | 18.441176 | 58 | 0.554545 |
vinjn/llm-metahuman/audio-client/ref/portal.py | import gradio as gr
def task1(input_text):
return "Task 1 Result: " + input_text
def task2(input_image):
return "Task 2 Result"
def task3(input_image):
return "Task 2 Result"
# interface one
iface1 = gr.Interface(
fn=task1, inputs="text", outputs="text", title="Multi-Page Interface"
)
# interface two
iface2 = gr.Interface(
fn=task2, inputs="image", outputs="text", title="Multi-Page Interface"
)
tts_examples = [
"I love learning machine learning",
"How do you do?",
]
tts_demo = gr.load(
"huggingface/facebook/fastspeech2-en-ljspeech",
title=None,
examples=tts_examples,
description="Give me something to say!",
cache_examples=False,
)
stt_demo = gr.load(
"huggingface/facebook/wav2vec2-base-960h",
title=None,
inputs="mic",
description="Let me try to guess what you're saying!",
)
demo = gr.TabbedInterface(
[iface1, iface2, tts_demo, stt_demo],
["Text-to-text", "image-to-text", "Text-to-speech", "Speech-to-text"],
)
# Run the interface
demo.launch(share=True)
| 1,054 | Python | 18.537037 | 74 | 0.666034 |
vinjn/llm-metahuman/audio-client/ref/sine-curve.py | import math
import gradio as gr
import plotly.express as px
import numpy as np
plot_end = 2 * math.pi
def get_plot(period=1):
global plot_end
x = np.arange(plot_end - 2 * math.pi, plot_end, 0.02)
y = np.sin(2*math.pi*period * x)
fig = px.line(x=x, y=y)
plot_end += 2 * math.pi
if plot_end > 1000:
plot_end = 2 * math.pi
return fig
with gr.Blocks() as demo:
with gr.Row():
with gr.Column():
gr.Markdown("Change the value of the slider to automatically update the plot")
period = gr.Slider(label="Period of plot", value=1, minimum=0, maximum=10, step=1)
plot = gr.Plot(label="Plot (updates every half second)")
dep = demo.load(get_plot, None, plot, every=1)
period.change(get_plot, period, plot, every=1, cancels=[dep])
if __name__ == "__main__":
demo.queue().launch() | 871 | Python | 25.424242 | 94 | 0.6062 |
mnaskret/omni-tetGen/mnresearch/tetgen/extension.py | import omni.ext
import omni.ui as ui
import omni.kit.commands as commands
import pxr
from pxr import Sdf
import numpy as np
import tetgenExt
import os
import math
import warp as wp
class MyExtension(omni.ext.IExt):
fileUrl = ''
def drop_accept(url, ext):
# Accept drops of specific extension only
print("File dropped")
return url.endswith(ext)
def drop(widget, event):
widget.text = event.mime_data
MyExtension.fileUrl = event.mime_data
def drop_area(self, ext):
# If drop is acceptable, the rectangle is blue
style = {}
style["Rectangle"] = {"background_color": 0xFF999999}
style["Rectangle:drop"] = {"background_color": 0xFF994400}
stack = ui.ZStack()
with stack:
ui.Rectangle(style=style)
text = ui.Label(f"Accepts {ext}", alignment=ui.Alignment.CENTER, word_wrap=True)
self.fileUrl = stack.set_accept_drop_fn(lambda d, e=ext: MyExtension.drop_accept(d, e))
stack.set_drop_fn(lambda a, w=text: MyExtension.drop(w, a))
def createMesh(usd_context, stage, meshName):
commands.execute('CreateReferenceCommand',
usd_context=usd_context,
path_to='/World/' + meshName,
asset_path=MyExtension.fileUrl,
instanceable=True)
prim = stage.GetPrimAtPath('/World/' + meshName + '/' + meshName + '/' + meshName)
return prim
def addAttributes(stage, prim, node, elem, face, edge, normals, colors, meshName):
numberOfTris = int(face.shape[0] / 3)
faceCount = np.full((numberOfTris), 3)
mesh = pxr.PhysicsSchemaTools.createMesh(stage,
pxr.Sdf.Path('/World/' + meshName + 'Mesh'),
node.tolist(),
normals.tolist(),
face.tolist(),
faceCount.tolist())
newPrim = stage.GetPrimAtPath('/World/' + meshName + 'Mesh')
velocitiesNP = np.zeros_like(node)
inverseMasses = np.ones(len(node), dtype=float)
edgesRestLengths = np.zeros(len(edge), dtype=float)
tetrahedronsRestVolumes = np.zeros(len(elem), dtype=float)
for i in range(len(edge)):
edgesRestLengths[i] = np.linalg.norm(node[edge[i][0]] - node[edge[i][1]])
for i in range(len(elem)):
tetrahedronPositionA = node[elem[i][0]]
tetrahedronPositionB = node[elem[i][1]]
tetrahedronPositionC = node[elem[i][2]]
tetrahedronPositionD = node[elem[i][3]]
p1 = tetrahedronPositionB - tetrahedronPositionA
p2 = tetrahedronPositionC - tetrahedronPositionA
p3 = tetrahedronPositionD - tetrahedronPositionA
volume = wp.dot(wp.cross(p1, p2), p3) / 6.0
tetrahedronsRestVolumes[i] = volume
velocitiesValue = pxr.Vt.Vec3fArray().FromNumpy(velocitiesNP)
elemValue = pxr.Vt.Vec4iArray().FromNumpy(elem)
edgeValue = pxr.Vt.Vec2iArray().FromNumpy(edge)
edgesRestLengthsValue = pxr.Vt.FloatArray().FromNumpy(edgesRestLengths)
inverseMassesValue = pxr.Vt.FloatArray().FromNumpy(inverseMasses)
tetrahedronsRestVolumesValue = pxr.Vt.FloatArray().FromNumpy(tetrahedronsRestVolumes)
elemAtt = newPrim.CreateAttribute('elem', Sdf.ValueTypeNames.Int4Array)
edgeAtt = newPrim.CreateAttribute('edge', Sdf.ValueTypeNames.Int2Array)
edgesRestLengthsAtt = newPrim.CreateAttribute('edgesRestLengths', Sdf.ValueTypeNames.FloatArray)
inverseMassesAtt = newPrim.CreateAttribute('inverseMasses', Sdf.ValueTypeNames.FloatArray)
tetrahedronsRestVolumesAtt = newPrim.CreateAttribute('tetrahedronsRestVolumes', Sdf.ValueTypeNames.FloatArray)
velocitiesAtt = newPrim.GetAttribute('velocities')
velocitiesAtt.Set(velocitiesValue)
elemAtt.Set(elemValue)
edgeAtt.Set(edgeValue)
edgesRestLengthsAtt.Set(edgesRestLengthsValue)
inverseMassesAtt.Set(inverseMassesValue)
tetrahedronsRestVolumesAtt.Set(tetrahedronsRestVolumesValue)
return mesh, newPrim
def extractMeshDataToNP(prim):
points = prim.GetAttribute('points').Get()
faces = prim.GetAttribute('faceVertexIndices').Get()
pointsNP = np.array(points, dtype=float)
facesNP = np.array(faces, dtype=int)
facesNP = facesNP.reshape((-1, 3))
return pointsNP, facesNP
def setPLC(self, value):
self.PLC = value
def setQuality(self, value):
self.Quality = value
def cross(a, b):
c = [a[1]*b[2] - a[2]*b[1],
a[2]*b[0] - a[0]*b[2],
a[0]*b[1] - a[1]*b[0]]
return c
def calculateNormals(node, face):
numberOfTris = int(face.shape[0] / 3)
normals = np.empty_like(node)
for i in range(numberOfTris):
pIdA = face[i][0]
pIdB = face[i][1]
pIdC = face[i][2]
pA = node[pIdA]
pB = node[pIdB]
pC = node[pIdC]
vA = pB - pA
vB = pC - pA
normal = MyExtension.cross(vA, vB)
normalized = np.linalg.norm(normal)
normals[pIdA] += normalized
normals[pIdB] += normalized
normals[pIdC] += normalized
return normals
def on_startup(self, ext_id):
print("[mnresearch.tetgen] MyExtension startup")
self._window = ui.Window("Tetrahedralizer", width=300, height=300)
with self._window.frame:
self.PLC = False
self.Quality = False
with ui.VStack():
MyExtension.drop_area(self, ".obj")
with ui.HStack():
ui.Label("PLC", height=0)
plcCB = ui.CheckBox(width=20)
plcCB.model.add_value_changed_fn(
lambda a: MyExtension.setPLC(self, a.get_value_as_bool()))
with ui.HStack():
ui.Label("Quality", height=0)
qualityCB = ui.CheckBox(width=20)
qualityCB.model.add_value_changed_fn(
lambda a: MyExtension.setQuality(self, a.get_value_as_bool()))
def on_click():
print("clicked!")
self.usd_context = omni.usd.get_context()
self.stage = self.usd_context.get_stage()
if MyExtension.fileUrl != "":
meshName = MyExtension.fileUrl.split(os.sep)[-1][:-4]
prim = MyExtension.createMesh(self.usd_context, self.stage, meshName)
points, faces = MyExtension.extractMeshDataToNP(prim)
tet = tetgenExt.TetGen(points, faces)
print('Running tetGen on: ', MyExtension.fileUrl,
'\nwith options:',
'PLC: ', self.PLC,
'\nQuality: ', self.Quality)
node, elem, face, edge = tet.tetrahedralize(quality=True,
plc=True,
facesout=1,
edgesout=1)
normals = MyExtension.calculateNormals(node, face)
colors = np.ones_like(normals)
face = face.ravel()
mesh, newPrim = MyExtension.addAttributes(self.stage,
prim,
node,
elem,
face,
edge,
normals,
colors,
meshName)
pxr.Usd.Stage.RemovePrim(self.stage, '/World/' + meshName)
ui.Button("Generate tetrahedral mesh", clicked_fn=lambda: on_click())
def on_shutdown(self):
print("[mnresearch.tetgen] MyExtension shutdown")
| 8,644 | Python | 38.474886 | 118 | 0.518047 |
mnaskret/omni-tetGen/mnresearch/tetgen/PBDBasicGravityDatabase.py | """Support for simplified access to data on nodes of type mnresearch.tetgen.PBDBasicGravity
PBDBasicGravity
"""
import omni.graph.core as og
import traceback
import sys
import numpy
class PBDBasicGravityDatabase(og.Database):
"""Helper class providing simplified access to data on nodes of type mnresearch.tetgen.PBDBasicGravity
Class Members:
node: Node being evaluated
Attribute Value Properties:
Inputs:
inputs.edge
inputs.edgesRestLengths
inputs.elem
inputs.gravity
inputs.ground
inputs.inverseMasses
inputs.ks_distance
inputs.ks_volume
inputs.num_substeps
inputs.points
inputs.sim_constraints
inputs.tetrahedronsRestVolumes
inputs.velocities
inputs.velocity_dampening
Outputs:
outputs.points
outputs.velocities
"""
# This is an internal object that provides per-class storage of a per-node data dictionary
PER_NODE_DATA = {}
# This is an internal object that describes unchanging attributes in a generic way
# The values in this list are in no particular order, as a per-attribute tuple
# Name, Type, ExtendedTypeIndex, UiName, Description, Metadata, Is_Required, DefaultValue
# You should not need to access any of this data directly, use the defined database interfaces
INTERFACE = og.Database._get_interface([
('inputs:edge', 'int2[]', 0, None, 'Input edges', {og.MetadataKeys.DEFAULT: '[]'}, True, []),
('inputs:edgesRestLengths', 'float[]', 0, None, 'Input edges rest lengths', {og.MetadataKeys.DEFAULT: '[]'}, True, []),
('inputs:elem', 'int4[]', 0, None, 'Input tetrahedrons', {og.MetadataKeys.DEFAULT: '[]'}, True, []),
('inputs:gravity', 'vector3f', 0, None, 'Gravity constant', {og.MetadataKeys.DEFAULT: '[0.0, -9.8, 0.0]'}, True, [0.0, -9.8, 0.0]),
('inputs:ground', 'float', 0, None, 'Ground level', {og.MetadataKeys.DEFAULT: '-100.0'}, True, -100.0),
('inputs:inverseMasses', 'float[]', 0, None, 'Inverse masses', {og.MetadataKeys.DEFAULT: '[]'}, True, []),
('inputs:ks_distance', 'float', 0, None, '', {og.MetadataKeys.DEFAULT: '1.0'}, True, 1.0),
('inputs:ks_volume', 'float', 0, None, '', {og.MetadataKeys.DEFAULT: '1.0'}, True, 1.0),
('inputs:num_substeps', 'int', 0, None, '', {og.MetadataKeys.DEFAULT: '8'}, True, 8),
('inputs:points', 'point3f[]', 0, None, 'Input points', {og.MetadataKeys.DEFAULT: '[]'}, True, []),
('inputs:sim_constraints', 'int', 0, None, '', {og.MetadataKeys.DEFAULT: '1'}, True, 1),
('inputs:tetrahedronsRestVolumes', 'float[]', 0, None, 'Input tetrahedrons rest volumes', {og.MetadataKeys.DEFAULT: '[]'}, True, []),
('inputs:velocities', 'vector3f[]', 0, None, 'Input velocities', {og.MetadataKeys.DEFAULT: '[]'}, True, []),
('inputs:velocity_dampening', 'float', 0, None, '', {og.MetadataKeys.DEFAULT: '0.1'}, True, 0.1),
('outputs:points', 'point3f[]', 0, None, 'Output points', {}, True, None),
('outputs:velocities', 'vector3f[]', 0, None, 'Output velocities', {}, True, None),
])
@classmethod
def _populate_role_data(cls):
"""Populate a role structure with the non-default roles on this node type"""
role_data = super()._populate_role_data()
role_data.inputs.gravity = og.Database.ROLE_VECTOR
role_data.inputs.points = og.Database.ROLE_POINT
role_data.inputs.velocities = og.Database.ROLE_VECTOR
role_data.outputs.points = og.Database.ROLE_POINT
role_data.outputs.velocities = og.Database.ROLE_VECTOR
return role_data
class ValuesForInputs(og.DynamicAttributeAccess):
"""Helper class that creates natural hierarchical access to input attributes"""
def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface):
"""Initialize simplified access for the attribute data"""
context = node.get_graph().get_default_graph_context()
super().__init__(context, node, attributes, dynamic_attributes)
@property
def edge(self):
data_view = og.AttributeValueHelper(self._attributes.edge)
return data_view.get()
@edge.setter
def edge(self, value):
if self._setting_locked:
raise og.ReadOnlyError(self._attributes.edge)
data_view = og.AttributeValueHelper(self._attributes.edge)
data_view.set(value)
self.edge_size = data_view.get_array_size()
@property
def edgesRestLengths(self):
data_view = og.AttributeValueHelper(self._attributes.edgesRestLengths)
return data_view.get()
@edgesRestLengths.setter
def edgesRestLengths(self, value):
if self._setting_locked:
raise og.ReadOnlyError(self._attributes.edgesRestLengths)
data_view = og.AttributeValueHelper(self._attributes.edgesRestLengths)
data_view.set(value)
self.edgesRestLengths_size = data_view.get_array_size()
@property
def elem(self):
data_view = og.AttributeValueHelper(self._attributes.elem)
return data_view.get()
@elem.setter
def elem(self, value):
if self._setting_locked:
raise og.ReadOnlyError(self._attributes.elem)
data_view = og.AttributeValueHelper(self._attributes.elem)
data_view.set(value)
self.elem_size = data_view.get_array_size()
@property
def gravity(self):
data_view = og.AttributeValueHelper(self._attributes.gravity)
return data_view.get()
@gravity.setter
def gravity(self, value):
if self._setting_locked:
raise og.ReadOnlyError(self._attributes.gravity)
data_view = og.AttributeValueHelper(self._attributes.gravity)
data_view.set(value)
@property
def ground(self):
data_view = og.AttributeValueHelper(self._attributes.ground)
return data_view.get()
@ground.setter
def ground(self, value):
if self._setting_locked:
raise og.ReadOnlyError(self._attributes.ground)
data_view = og.AttributeValueHelper(self._attributes.ground)
data_view.set(value)
@property
def inverseMasses(self):
data_view = og.AttributeValueHelper(self._attributes.inverseMasses)
return data_view.get()
@inverseMasses.setter
def inverseMasses(self, value):
if self._setting_locked:
raise og.ReadOnlyError(self._attributes.inverseMasses)
data_view = og.AttributeValueHelper(self._attributes.inverseMasses)
data_view.set(value)
self.inverseMasses_size = data_view.get_array_size()
@property
def ks_distance(self):
data_view = og.AttributeValueHelper(self._attributes.ks_distance)
return data_view.get()
@ks_distance.setter
def ks_distance(self, value):
if self._setting_locked:
raise og.ReadOnlyError(self._attributes.ks_distance)
data_view = og.AttributeValueHelper(self._attributes.ks_distance)
data_view.set(value)
@property
def ks_volume(self):
data_view = og.AttributeValueHelper(self._attributes.ks_volume)
return data_view.get()
@ks_volume.setter
def ks_volume(self, value):
if self._setting_locked:
raise og.ReadOnlyError(self._attributes.ks_volume)
data_view = og.AttributeValueHelper(self._attributes.ks_volume)
data_view.set(value)
@property
def num_substeps(self):
data_view = og.AttributeValueHelper(self._attributes.num_substeps)
return data_view.get()
@num_substeps.setter
def num_substeps(self, value):
if self._setting_locked:
raise og.ReadOnlyError(self._attributes.num_substeps)
data_view = og.AttributeValueHelper(self._attributes.num_substeps)
data_view.set(value)
@property
def points(self):
data_view = og.AttributeValueHelper(self._attributes.points)
return data_view.get()
@points.setter
def points(self, value):
if self._setting_locked:
raise og.ReadOnlyError(self._attributes.points)
data_view = og.AttributeValueHelper(self._attributes.points)
data_view.set(value)
self.points_size = data_view.get_array_size()
@property
def sim_constraints(self):
data_view = og.AttributeValueHelper(self._attributes.sim_constraints)
return data_view.get()
@sim_constraints.setter
def sim_constraints(self, value):
if self._setting_locked:
raise og.ReadOnlyError(self._attributes.sim_constraints)
data_view = og.AttributeValueHelper(self._attributes.sim_constraints)
data_view.set(value)
@property
def tetrahedronsRestVolumes(self):
data_view = og.AttributeValueHelper(self._attributes.tetrahedronsRestVolumes)
return data_view.get()
@tetrahedronsRestVolumes.setter
def tetrahedronsRestVolumes(self, value):
if self._setting_locked:
raise og.ReadOnlyError(self._attributes.tetrahedronsRestVolumes)
data_view = og.AttributeValueHelper(self._attributes.tetrahedronsRestVolumes)
data_view.set(value)
self.tetrahedronsRestVolumes_size = data_view.get_array_size()
@property
def velocities(self):
data_view = og.AttributeValueHelper(self._attributes.velocities)
return data_view.get()
@velocities.setter
def velocities(self, value):
if self._setting_locked:
raise og.ReadOnlyError(self._attributes.velocities)
data_view = og.AttributeValueHelper(self._attributes.velocities)
data_view.set(value)
self.velocities_size = data_view.get_array_size()
@property
def velocity_dampening(self):
data_view = og.AttributeValueHelper(self._attributes.velocity_dampening)
return data_view.get()
@velocity_dampening.setter
def velocity_dampening(self, value):
if self._setting_locked:
raise og.ReadOnlyError(self._attributes.velocity_dampening)
data_view = og.AttributeValueHelper(self._attributes.velocity_dampening)
data_view.set(value)
class ValuesForOutputs(og.DynamicAttributeAccess):
"""Helper class that creates natural hierarchical access to output attributes"""
def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface):
"""Initialize simplified access for the attribute data"""
context = node.get_graph().get_default_graph_context()
super().__init__(context, node, attributes, dynamic_attributes)
self.points_size = None
self.velocities_size = None
@property
def points(self):
data_view = og.AttributeValueHelper(self._attributes.points)
return data_view.get(reserved_element_count = self.points_size)
@points.setter
def points(self, value):
data_view = og.AttributeValueHelper(self._attributes.points)
data_view.set(value)
self.points_size = data_view.get_array_size()
@property
def velocities(self):
data_view = og.AttributeValueHelper(self._attributes.velocities)
return data_view.get(reserved_element_count = self.velocities_size)
@velocities.setter
def velocities(self, value):
data_view = og.AttributeValueHelper(self._attributes.velocities)
data_view.set(value)
self.velocities_size = data_view.get_array_size()
class ValuesForState(og.DynamicAttributeAccess):
"""Helper class that creates natural hierarchical access to state attributes"""
def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface):
"""Initialize simplified access for the attribute data"""
context = node.get_graph().get_default_graph_context()
super().__init__(context, node, attributes, dynamic_attributes)
def __init__(self, node):
super().__init__(node)
dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_INPUT)
self.inputs = PBDBasicGravityDatabase.ValuesForInputs(node, self.attributes.inputs, dynamic_attributes)
dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_OUTPUT)
self.outputs = PBDBasicGravityDatabase.ValuesForOutputs(node, self.attributes.outputs, dynamic_attributes)
dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_STATE)
self.state = PBDBasicGravityDatabase.ValuesForState(node, self.attributes.state, dynamic_attributes)
class abi:
"""Class defining the ABI interface for the node type"""
@staticmethod
def get_node_type():
get_node_type_function = getattr(PBDBasicGravityDatabase.NODE_TYPE_CLASS, 'get_node_type', None)
if callable(get_node_type_function):
return get_node_type_function()
return 'mnresearch.tetgen.PBDBasicGravity'
@staticmethod
def compute(context, node):
db = PBDBasicGravityDatabase(node)
try:
db.inputs._setting_locked = True
compute_function = getattr(PBDBasicGravityDatabase.NODE_TYPE_CLASS, 'compute', None)
if callable(compute_function) and compute_function.__code__.co_argcount > 1:
return compute_function(context, node)
return PBDBasicGravityDatabase.NODE_TYPE_CLASS.compute(db)
except Exception as error:
stack_trace = "".join(traceback.format_tb(sys.exc_info()[2].tb_next))
db.log_error(f'Assertion raised in compute - {error}\n{stack_trace}', add_context=False)
finally:
db.inputs._setting_locked = False
return False
@staticmethod
def initialize(context, node):
PBDBasicGravityDatabase._initialize_per_node_data(node)
# Set any default values the attributes have specified
if not node._do_not_use():
db = PBDBasicGravityDatabase(node)
db.inputs.edge = []
db.inputs.edgesRestLengths = []
db.inputs.elem = []
db.inputs.gravity = [0.0, -9.8, 0.0]
db.inputs.ground = -100.0
db.inputs.inverseMasses = []
db.inputs.ks_distance = 1.0
db.inputs.ks_volume = 1.0
db.inputs.num_substeps = 8
db.inputs.points = []
db.inputs.sim_constraints = 1
db.inputs.tetrahedronsRestVolumes = []
db.inputs.velocities = []
db.inputs.velocity_dampening = 0.1
initialize_function = getattr(PBDBasicGravityDatabase.NODE_TYPE_CLASS, 'initialize', None)
if callable(initialize_function):
initialize_function(context, node)
@staticmethod
def release(node):
release_function = getattr(PBDBasicGravityDatabase.NODE_TYPE_CLASS, 'release', None)
if callable(release_function):
release_function(node)
PBDBasicGravityDatabase._release_per_node_data(node)
@staticmethod
def update_node_version(context, node, old_version, new_version):
update_node_version_function = getattr(PBDBasicGravityDatabase.NODE_TYPE_CLASS, 'update_node_version', None)
if callable(update_node_version_function):
return update_node_version_function(context, node, old_version, new_version)
return False
@staticmethod
def initialize_type(node_type):
initialize_type_function = getattr(PBDBasicGravityDatabase.NODE_TYPE_CLASS, 'initialize_type', None)
needs_initializing = True
if callable(initialize_type_function):
needs_initializing = initialize_type_function(node_type)
if needs_initializing:
node_type.set_metadata(og.MetadataKeys.EXTENSION, "mnresearch.tetgen")
node_type.set_metadata(og.MetadataKeys.UI_NAME, "PBDBasicGravity")
node_type.set_metadata(og.MetadataKeys.DESCRIPTION, "PBDBasicGravity")
node_type.set_metadata(og.MetadataKeys.LANGUAGE, "Python")
PBDBasicGravityDatabase.INTERFACE.add_to_node_type(node_type)
@staticmethod
def on_connection_type_resolve(node):
on_connection_type_resolve_function = getattr(PBDBasicGravityDatabase.NODE_TYPE_CLASS, 'on_connection_type_resolve', None)
if callable(on_connection_type_resolve_function):
on_connection_type_resolve_function(node)
NODE_TYPE_CLASS = None
GENERATOR_VERSION = (1, 4, 0)
TARGET_VERSION = (2, 29, 1)
@staticmethod
def register(node_type_class):
PBDBasicGravityDatabase.NODE_TYPE_CLASS = node_type_class
og.register_node_type(PBDBasicGravityDatabase.abi, 1)
@staticmethod
def deregister():
og.deregister_node_type("mnresearch.tetgen.PBDBasicGravity")
| 17,984 | Python | 46.204724 | 141 | 0.62411 |
mnaskret/omni-tetGen/mnresearch/tetgen/ogn/nodes/PBDBasicGravity.py | """
This is the implementation of the OGN node defined in OgnNewNode.ogn
"""
# Array or tuple values are accessed as numpy arrays so you probably need this import
import math
import numpy as np
import warp as wp
import omni.timeline
from pxr import Usd, UsdGeom, Gf, Sdf
@wp.kernel
def boundsKer(predictedPositions: wp.array(dtype=wp.vec3),
groundLevel: float):
tid = wp.tid()
x = predictedPositions[tid]
if(x[1] < groundLevel):
predictedPositions[tid] = wp.vec3(x[0], groundLevel, x[2])
@wp.kernel
def PBDStepKer(positions: wp.array(dtype=wp.vec3),
predictedPositions: wp.array(dtype=wp.vec3),
velocities: wp.array(dtype=wp.vec3),
dT: float):
tid = wp.tid()
x = positions[tid]
xPred = predictedPositions[tid]
v = (xPred - x)*(1.0/dT)
x = xPred
positions[tid] = x
velocities[tid] = v
@wp.kernel
def gravityKer(positions: wp.array(dtype=wp.vec3),
predictedPositions: wp.array(dtype=wp.vec3),
velocities: wp.array(dtype=wp.vec3),
gravityConstant: wp.vec3,
velocityDampening: float,
dt: float):
tid = wp.tid()
x = positions[tid]
v = velocities[tid]
velocityDampening = 1.0 - velocityDampening
v = v + gravityConstant*dt*velocityDampening
xPred = x + v*dt
predictedPositions[tid] = xPred
@wp.kernel
def distanceConstraints(predictedPositions: wp.array(dtype=wp.vec3),
dP: wp.array(dtype=wp.vec3),
constraintsNumber: wp.array(dtype=int),
edgesA: wp.array(dtype=int),
edgesB: wp.array(dtype=int),
edgesRestLengths: wp.array(dtype=float),
inverseMasses: wp.array(dtype=float),
kS: float):
tid = wp.tid()
edgeIndexA = edgesA[tid]
edgeIndexB = edgesB[tid]
edgePositionA = predictedPositions[edgeIndexA]
edgePositionB = predictedPositions[edgeIndexB]
edgeRestLength = edgesRestLengths[tid]
dir = edgePositionA - edgePositionB
len = wp.length(dir)
inverseMass = inverseMasses[edgeIndexA] + inverseMasses[edgeIndexB]
edgeDP = (len-edgeRestLength) * wp.normalize(dir) * kS / inverseMass
wp.atomic_sub(dP, edgeIndexA, edgeDP)
wp.atomic_add(dP, edgeIndexB, edgeDP)
wp.atomic_add(constraintsNumber, edgeIndexA, 1)
wp.atomic_add(constraintsNumber, edgeIndexB, 1)
@wp.kernel
def volumeConstraints(predictedPositions: wp.array(dtype=wp.vec3),
dP: wp.array(dtype=wp.vec3),
constraintsNumber: wp.array(dtype=int),
tetrahedronsA: wp.array(dtype=int),
tetrahedronsB: wp.array(dtype=int),
tetrahedronsC: wp.array(dtype=int),
tetrahedronsD: wp.array(dtype=int),
tetrahedronsRestVolumes: wp.array(dtype=float),
inverseMasses: wp.array(dtype=float),
kS: float):
tid = wp.tid()
tetrahedronIndexA = tetrahedronsA[tid]
tetrahedronIndexB = tetrahedronsB[tid]
tetrahedronIndexC = tetrahedronsC[tid]
tetrahedronIndexD = tetrahedronsD[tid]
tetrahedronPositionA = predictedPositions[tetrahedronIndexA]
tetrahedronPositionB = predictedPositions[tetrahedronIndexB]
tetrahedronPositionC = predictedPositions[tetrahedronIndexC]
tetrahedronPositionD = predictedPositions[tetrahedronIndexD]
tetrahedronRestVolume = tetrahedronsRestVolumes[tid]
p1 = tetrahedronPositionB - tetrahedronPositionA
p2 = tetrahedronPositionC - tetrahedronPositionA
p3 = tetrahedronPositionD - tetrahedronPositionA
q2 = wp.cross(p3, p1)
q1 = wp.cross(p2, p3)
q3 = wp.cross(p1, p2)
q0 = - q1 - q2 - q3
mA = inverseMasses[tetrahedronIndexA]
mB = inverseMasses[tetrahedronIndexB]
mC = inverseMasses[tetrahedronIndexC]
mD = inverseMasses[tetrahedronIndexD]
volume = wp.dot(wp.cross(p1, p2), p3) / 6.0
lambd = mA * wp.dot(q0, q0) + mB * wp.dot(q1, q1) + mC * wp.dot(q2, q2) + mD * wp.dot(q3, q3)
lambd = kS * (volume - tetrahedronRestVolume) / lambd
wp.atomic_sub(dP, tetrahedronIndexA, q0 * lambd * mA)
wp.atomic_sub(dP, tetrahedronIndexB, q1 * lambd * mB)
wp.atomic_sub(dP, tetrahedronIndexC, q2 * lambd * mC)
wp.atomic_sub(dP, tetrahedronIndexD, q3 * lambd * mD)
wp.atomic_add(constraintsNumber, tetrahedronIndexA, 1)
wp.atomic_add(constraintsNumber, tetrahedronIndexB, 1)
wp.atomic_add(constraintsNumber, tetrahedronIndexC, 1)
wp.atomic_add(constraintsNumber, tetrahedronIndexD, 1)
@wp.kernel
def applyConstraints(predictedPositions: wp.array(dtype=wp.vec3),
dP: wp.array(dtype=wp.vec3),
constraintsNumber: wp.array(dtype=int)):
tid = wp.tid()
if(constraintsNumber[tid] > 0):
tmpDP = dP[tid]
N = float(constraintsNumber[tid])
DP = wp.vec3(tmpDP[0]/N, tmpDP[1]/N, tmpDP[2]/N)
predictedPositions[tid] = predictedPositions[tid] + DP
dP[tid] = wp.vec3(0.0, 0.0, 0.0)
constraintsNumber[tid] = 0
class PBDBasicGravity:
@staticmethod
def compute(db) -> bool:
timeline = omni.timeline.get_timeline_interface()
device = "cuda"
# # reset on stop
# if (timeline.is_stopped()):
# context.reset()
# initialization
if (timeline.is_playing()):
with wp.ScopedCudaGuard():
gravity = db.inputs.gravity
velocity_dampening = db.inputs.velocity_dampening
ground = db.inputs.ground
kSDistance = db.inputs.ks_distance
kSVolume = db.inputs.ks_volume
# convert node inputs to a GPU array
positions = wp.array(db.inputs.points, dtype=wp.vec3, device=device)
predictedPositions = wp.zeros_like(positions)
velocities = wp.array(db.inputs.velocities, dtype=wp.vec3, device=device)
inverseMasses = wp.array(db.inputs.inverseMasses, dtype=float, device=device)
dP = wp.zeros_like(positions)
constraintsNumber = wp.zeros(len(dP), dtype=int, device=device)
edgesSplit = np.hsplit(db.inputs.edge, 2)
edgesA = wp.array(edgesSplit[0], dtype=int, device=device)
edgesB = wp.array(edgesSplit[1], dtype=int, device=device)
edgesRestLengths = wp.array(db.inputs.edgesRestLengths, dtype=float, device=device)
tetrahedronsSplit = np.hsplit(db.inputs.elem, 4)
tetrahedronsA = wp.array(tetrahedronsSplit[0], dtype=int, device=device)
tetrahedronsB = wp.array(tetrahedronsSplit[1], dtype=int, device=device)
tetrahedronsC = wp.array(tetrahedronsSplit[2], dtype=int, device=device)
tetrahedronsD = wp.array(tetrahedronsSplit[3], dtype=int, device=device)
tetrahedronsRestVolumes = wp.array(db.inputs.tetrahedronsRestVolumes, dtype=float, device=device)
# step simulation
with wp.ScopedTimer("Simulate", active=False):
# simulate
sim_substeps = db.inputs.num_substeps
sim_constraints = db.inputs.sim_constraints
sim_dt = (1.0/30)/sim_substeps
for i in range(sim_substeps):
# simulate
wp.launch(kernel=gravityKer,
dim=len(positions),
inputs=[positions,
predictedPositions,
velocities,
gravity,
velocity_dampening,
sim_dt],
device=device)
for j in range(sim_constraints):
wp.launch(
kernel=volumeConstraints,
dim=len(tetrahedronsA),
inputs=[predictedPositions,
dP,
constraintsNumber,
tetrahedronsA,
tetrahedronsB,
tetrahedronsC,
tetrahedronsD,
tetrahedronsRestVolumes,
inverseMasses,
kSVolume],
device=device)
wp.launch(
kernel=distanceConstraints,
dim=len(edgesA),
inputs=[predictedPositions,
dP,
constraintsNumber,
edgesA,
edgesB,
edgesRestLengths,
inverseMasses,
kSDistance],
device=device)
wp.launch(
kernel=applyConstraints,
dim=len(positions),
inputs=[predictedPositions,
dP,
constraintsNumber],
device=device)
wp.launch(kernel=boundsKer,
dim=len(predictedPositions),
inputs=[predictedPositions,
ground],
device=device)
wp.launch(kernel=PBDStepKer,
dim=len(positions),
inputs=[positions,
predictedPositions,
velocities,
sim_dt],
device=device)
# write node outputs
db.outputs.points = positions.numpy()
db.outputs.velocities = velocities.numpy()
else:
with wp.ScopedTimer("Write", active=False):
# timeline not playing and sim. not yet initialized, just pass through outputs
db.outputs.points = db.inputs.points
db.outputs.velocities = db.inputs.velocities | 11,017 | Python | 36.349152 | 113 | 0.51829 |
mnaskret/omni-tetGen/mnresearch/tetgen/ogn/tests/TestPBDBasicGravity.py | import omni.kit.test
import omni.graph.core as og
import omni.graph.core.tests as ogts
import os
import carb
class TestOgn(ogts.test_case_class(use_schema_prims=True, allow_implicit_graph=False)):
async def test_import(self):
import mnresearch.tetgen.ogn.PBDBasicGravityDatabase
self.assertTrue(hasattr(mnresearch.tetgen.ogn.PBDBasicGravityDatabase, "PBDBasicGravityDatabase"))
async def test_usda(self):
test_file_name = "PBDBasicGravityTemplate.usda"
usd_path = os.path.join(os.path.dirname(__file__), "usd", test_file_name)
if not os.path.exists(usd_path):
self.assertTrue(False, f"{usd_path} not found for loading test")
(result, error) = await ogts.load_test_file(usd_path)
self.assertTrue(result, f'{error} on {usd_path}')
test_node = og.Controller.node("/TestGraph/Template_mnresearch_tetgen_PBDBasicGravity")
self.assertTrue(test_node.is_valid())
node_type_name = test_node.get_type_name()
self.assertEqual(og.GraphRegistry().get_node_type_version(node_type_name), 1)
self.assertTrue(test_node.get_attribute_exists("inputs:edge"))
input_attr = test_node.get_attribute("inputs:edge")
actual_input = og.Controller.get(input_attr)
ogts.verify_values([], actual_input, "mnresearch.tetgen.PBDBasicGravity USD load test - inputs:edge attribute value error")
self.assertTrue(test_node.get_attribute_exists("inputs:edgesRestLengths"))
input_attr = test_node.get_attribute("inputs:edgesRestLengths")
actual_input = og.Controller.get(input_attr)
ogts.verify_values([], actual_input, "mnresearch.tetgen.PBDBasicGravity USD load test - inputs:edgesRestLengths attribute value error")
self.assertTrue(test_node.get_attribute_exists("inputs:elem"))
input_attr = test_node.get_attribute("inputs:elem")
actual_input = og.Controller.get(input_attr)
ogts.verify_values([], actual_input, "mnresearch.tetgen.PBDBasicGravity USD load test - inputs:elem attribute value error")
self.assertTrue(test_node.get_attribute_exists("inputs:gravity"))
input_attr = test_node.get_attribute("inputs:gravity")
actual_input = og.Controller.get(input_attr)
ogts.verify_values([0.0, -9.8, 0.0], actual_input, "mnresearch.tetgen.PBDBasicGravity USD load test - inputs:gravity attribute value error")
self.assertTrue(test_node.get_attribute_exists("inputs:ground"))
input_attr = test_node.get_attribute("inputs:ground")
actual_input = og.Controller.get(input_attr)
ogts.verify_values(-100.0, actual_input, "mnresearch.tetgen.PBDBasicGravity USD load test - inputs:ground attribute value error")
self.assertTrue(test_node.get_attribute_exists("inputs:inverseMasses"))
input_attr = test_node.get_attribute("inputs:inverseMasses")
actual_input = og.Controller.get(input_attr)
ogts.verify_values([], actual_input, "mnresearch.tetgen.PBDBasicGravity USD load test - inputs:inverseMasses attribute value error")
self.assertTrue(test_node.get_attribute_exists("inputs:ks_distance"))
input_attr = test_node.get_attribute("inputs:ks_distance")
actual_input = og.Controller.get(input_attr)
ogts.verify_values(1.0, actual_input, "mnresearch.tetgen.PBDBasicGravity USD load test - inputs:ks_distance attribute value error")
self.assertTrue(test_node.get_attribute_exists("inputs:ks_volume"))
input_attr = test_node.get_attribute("inputs:ks_volume")
actual_input = og.Controller.get(input_attr)
ogts.verify_values(1.0, actual_input, "mnresearch.tetgen.PBDBasicGravity USD load test - inputs:ks_volume attribute value error")
self.assertTrue(test_node.get_attribute_exists("inputs:num_substeps"))
input_attr = test_node.get_attribute("inputs:num_substeps")
actual_input = og.Controller.get(input_attr)
ogts.verify_values(8, actual_input, "mnresearch.tetgen.PBDBasicGravity USD load test - inputs:num_substeps attribute value error")
self.assertTrue(test_node.get_attribute_exists("inputs:points"))
input_attr = test_node.get_attribute("inputs:points")
actual_input = og.Controller.get(input_attr)
ogts.verify_values([], actual_input, "mnresearch.tetgen.PBDBasicGravity USD load test - inputs:points attribute value error")
self.assertTrue(test_node.get_attribute_exists("inputs:sim_constraints"))
input_attr = test_node.get_attribute("inputs:sim_constraints")
actual_input = og.Controller.get(input_attr)
ogts.verify_values(1, actual_input, "mnresearch.tetgen.PBDBasicGravity USD load test - inputs:sim_constraints attribute value error")
self.assertTrue(test_node.get_attribute_exists("inputs:tetrahedronsRestVolumes"))
input_attr = test_node.get_attribute("inputs:tetrahedronsRestVolumes")
actual_input = og.Controller.get(input_attr)
ogts.verify_values([], actual_input, "mnresearch.tetgen.PBDBasicGravity USD load test - inputs:tetrahedronsRestVolumes attribute value error")
self.assertTrue(test_node.get_attribute_exists("inputs:velocities"))
input_attr = test_node.get_attribute("inputs:velocities")
actual_input = og.Controller.get(input_attr)
ogts.verify_values([], actual_input, "mnresearch.tetgen.PBDBasicGravity USD load test - inputs:velocities attribute value error")
self.assertTrue(test_node.get_attribute_exists("inputs:velocity_dampening"))
input_attr = test_node.get_attribute("inputs:velocity_dampening")
actual_input = og.Controller.get(input_attr)
ogts.verify_values(0.1, actual_input, "mnresearch.tetgen.PBDBasicGravity USD load test - inputs:velocity_dampening attribute value error")
| 5,839 | Python | 60.473684 | 150 | 0.71365 |
mnaskret/omni-tetGen/mnresearch/tetgen/nodes/__init__.py |
"""
Dynamically import every file in a directory tree that looks like a Python Ogn Node.
This includes linked directories, which is the mechanism by which nodes can be hot-reloaded from the source tree.
"""
import omni.graph.core as og
og.register_ogn_nodes(__file__, "mnresearch.tetgen")
| 290 | Python | 35.374996 | 113 | 0.768966 |
Kim2091/RTXRemixTools/LightAdjuster/LightAdjuster.py | import argparse
def adjust_value(line, value_name, percentage, log_changes, i):
if f'float {value_name} =' in line:
parts = line.split('=')
old_value = float(parts[1].strip())
new_value = old_value * percentage
new_line = f'{parts[0]}= {new_value}\n'
if log_changes:
log_line = f'Line {i + 1}: {line.strip()} -> {new_line.strip()}'
print(log_line)
with open('changes.log', 'a') as log:
log.write(log_line + '\n')
line = new_line
return line, True
return line, False
def adjust_file(file_path, start_line=1, log_changes=False, adjust_intensity=False, adjust_color_temperature=False, percentage=None):
with open(file_path, 'r') as file:
data = file.readlines()
lines_changed = 0
with open(file_path, 'w') as file:
for i, line in enumerate(data):
if i + 1 >= start_line:
if adjust_intensity:
line, changed = adjust_value(line, 'intensity', percentage, log_changes, i)
if changed:
lines_changed += 1
if adjust_color_temperature:
line, changed = adjust_value(line, 'colorTemperature', percentage, log_changes, i)
if changed:
lines_changed += 1
file.write(line)
print(f'Completed! {lines_changed} lines changed.')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Adjust the intensity and/or color temperature values in a file.')
parser.add_argument('file_path', type=str, help='The path to the file to modify.')
parser.add_argument('-s', '--start-line', type=int, default=1, help='The line number to start modifying at.')
parser.add_argument('-l', '--log', action='store_true', help='Whether to print a log of the changed lines.')
parser.add_argument('-ai', '--adjust-intensity', action='store_true', help='Whether to adjust the intensity value.')
parser.add_argument('-act', '--adjust-color-temperature', action='store_true', help='Whether to adjust the color temperature value.')
parser.add_argument('-p', '--percentage', type=float, required=True, help='The percentage to adjust the value by.')
args = parser.parse_args()
adjust_file(args.file_path, args.start_line, args.log, args.adjust_intensity, args.adjust_color_temperature, args.percentage)
| 2,440 | Python | 52.065216 | 137 | 0.609836 |
Kim2091/RTXRemixTools/MagicUSDA/MagicUSDA.py | import os
import argparse
import xxhash
from pxr import Usd, UsdGeom, UsdShade, Sdf
suffixes = ["_normal", "_emissive", "_metallic", "_rough"]
def generate_hashes(file_path) -> str:
# Read the file and extract the raw data. Thanks @BlueAmulet!
with open(file_path, "rb") as file:
data = file.read(128)
dwHeight = int.from_bytes(data[12:16], "little")
dwWidth = int.from_bytes(data[16:20], "little")
pfFlags = int.from_bytes(data[80:84], "little")
pfFourCC = data[84:88]
bitCount = int.from_bytes(data[88:92], "little")
mipsize = dwWidth * dwHeight
if pfFlags & 0x4: # DDPF_FOURCC
if pfFourCC == b"DXT1": # DXT1 is 4bpp
mipsize //= 2
elif pfFlags & 0x20242: # DDPF_ALPHA | DDPF_RGB | DDPF_YUV | DDPF_LUMINANCE
mipsize = mipsize * bitCount // 8
# Read the required portion of the file for hash calculation
with open(file_path, "rb") as file:
file.seek(128) # Move the file pointer to the appropriate position
data = file.read(mipsize)
hash_value = xxhash.xxh3_64(data).hexdigest()
return hash_value.upper()
def write_usda_file(args, file_list, suffix=None) -> [list, list]:
created_files = []
modified_files = []
game_ready_assets_path = os.path.join(args.directory)
# Check if there are any texture files with the specified suffix
if suffix:
has_suffix_files = False
for file_name in file_list:
if file_name.endswith(f"{suffix}.dds"):
has_suffix_files = True
break
if not has_suffix_files:
# return a blank set
return [created_files, modified_files]
usda_file_name = f'{args.output}{suffix if suffix else ""}.usda'
usda_file_path = os.path.join(game_ready_assets_path, usda_file_name)
if os.path.exists(usda_file_path):
modified_files.append(usda_file_path)
else:
created_files.append(usda_file_path)
targets = {}
reference_directory = args.reference_directory if args.reference_directory else args.directory
for file_name in file_list:
if file_name.endswith(".dds"):
# Extract only the file name from the absolute path
name = os.path.basename(file_name)
name, ext = os.path.splitext(name)
if "_" not in name or name.endswith("_diffuse") or name.endswith("_albedo"):
# Check if the generate_hashes argument is specified
if args.generate_hashes:
key = name.split("_")[0] # Use the prefix of the diffuse file name as the key
hash_value = generate_hashes(os.path.join(reference_directory, file_name)) # Generate hash for the diffuse file
else:
key = os.path.basename(name)
hash_value = key # Use the original name as the hash value
# Check if the key contains a hash or ends with _diffuse or _albedo
if not (key.isupper() and len(key) == 16) and not (key.endswith("_diffuse") or key.endswith("_albedo")):
continue
# Remove the _diffuse or _albedo suffix from the key and hash_value
key = key.replace("_diffuse", "").replace("_albedo", "")
hash_value = hash_value.replace("_diffuse", "").replace("_albedo", "")
# Get the relative path from the game ready assets path to the texture file
rel_file_path = os.path.relpath(file_name, args.directory)
targets[key] = (rel_file_path, hash_value)
# Create a new stage
stage = Usd.Stage.CreateNew(usda_file_path)
# Modify the existing RootNode prim
root_node_prim = stage.OverridePrim("/RootNode")
# Add a Looks scope as a child of the RootNode prim
looks_scope = UsdGeom.Scope.Define(stage, "/RootNode/Looks")
added_targets = set()
for value, (rel_file_path, hash_value) in targets.items():
# Check if there is a corresponding texture file for the specified suffix
if suffix and not any(
file_name.endswith(f"{value}{suffix}.dds") for file_name in file_list
): continue
if value in added_targets:
continue
else:
added_targets.add(value)
print(f"Adding texture {rel_file_path} with hash: {hash_value}")
# Add a material prim as a child of the Looks scope
material_prim = UsdShade.Material.Define(
stage, f"/RootNode/Looks/mat_{hash_value.upper()}"
)
material_prim.GetPrim().GetReferences().SetReferences([])
# Set the shader attributes
shader_prim = UsdShade.Shader.Define(
stage, f"/RootNode/Looks/mat_{hash_value.upper()}/Shader"
)
shader_prim.GetPrim().CreateAttribute("info:mdl:sourceAsset", Sdf.ValueTypeNames.Asset).Set(
f"{args.shader_type}.mdl"
)
shader_prim.GetPrim().CreateAttribute("info:implementationSource", Sdf.ValueTypeNames.Token).Set(
"sourceAsset"
)
shader_prim.GetPrim().CreateAttribute("info:mdl:sourceAsset:subIdentifier", Sdf.ValueTypeNames.Token).Set(
f"{args.shader_type}"
)
shader_output = shader_prim.CreateOutput("output", Sdf.ValueTypeNames.Token)
if not suffix or suffix == "_diffuse" or suffix == "_albedo":
diffuse_texture = shader_prim.CreateInput(
"diffuse_texture", Sdf.ValueTypeNames.Asset
)
# Use the dynamically generated relative path for the diffuse texture
diffuse_texture.Set(f".\{rel_file_path}")
# Process each type of texture
if not suffix or suffix == "_emissive":
emissive_file_name = f"{value}_emissive.dds"
# print(f"Emissive File Name: {emissive_file_name in file_list}")
# print(file_list)
if any(file_path.endswith(emissive_file_name) for file_path in file_list):
emissive_mask_texture = shader_prim.CreateInput(
"emissive_mask_texture", Sdf.ValueTypeNames.Asset
)
# Use the dynamically generated relative path for the emissive texture
emissive_rel_file_path = os.path.relpath(os.path.join(os.path.dirname(file_name), emissive_file_name), args.directory)
emissive_mask_texture.Set(f".\{emissive_rel_file_path}")
enable_emission = shader_prim.CreateInput(
"enable_emission", Sdf.ValueTypeNames.Bool
)
enable_emission.Set(True)
emissive_intensity = shader_prim.CreateInput(
"emissive_intensity", Sdf.ValueTypeNames.Float
)
emissive_intensity.Set(5)
if not suffix or suffix == "_metallic":
metallic_file_name = f"{value}_metallic.dds"
if any(file_path.endswith(metallic_file_name) for file_path in file_list):
metallic_texture = shader_prim.CreateInput(
"metallic_texture", Sdf.ValueTypeNames.Asset
)
# Use the dynamically generated relative path for the metallic texture
metallic_rel_file_path = os.path.relpath(os.path.join(os.path.dirname(file_name), metallic_file_name), args.directory)
metallic_texture.Set(f".\{metallic_rel_file_path}")
if not suffix or suffix == "_normal":
normal_file_name = f"{value}_normal.dds"
if any(file_path.endswith(normal_file_name) for file_path in file_list):
normalmap_texture = shader_prim.CreateInput(
"normal_texture", Sdf.ValueTypeNames.Asset
)
# Use the dynamically generated relative path for the normal texture
normal_rel_file_path = os.path.relpath(os.path.join(os.path.dirname(file_name), normal_file_name), args.directory)
normalmap_texture.Set(f".\{normal_rel_file_path}")
if not suffix or suffix == "_rough":
roughness_file_name = f"{value}_rough.dds"
if any(file_path.endswith(roughness_file_name) for file_path in file_list):
reflectionroughness_texture = shader_prim.CreateInput(
"reflectionroughness_texture", Sdf.ValueTypeNames.Asset
)
# Use the dynamically generated relative path for the roughness texture
roughness_rel_file_path = os.path.relpath(os.path.join(os.path.dirname(file_name), roughness_file_name), args.directory)
reflectionroughness_texture.Set(f".\{roughness_rel_file_path}")
# Connect shader output to material inputs
material_prim.CreateInput(
"mdl:displacement", Sdf.ValueTypeNames.Token
).ConnectToSource(shader_output)
material_prim.CreateInput(
"mdl:surface", Sdf.ValueTypeNames.Token
).ConnectToSource(shader_output)
material_prim.CreateInput(
"mdl:volume", Sdf.ValueTypeNames.Token
).ConnectToSource(shader_output)
# Save the stage
stage.Save()
return [modified_files, created_files]
def add_sublayers(args, file_list) -> list:
modified_files = []
game_ready_assets_path = os.path.join(args.directory)
mod_file_path = os.path.join(game_ready_assets_path, "mod.usda")
if os.path.exists(mod_file_path):
modified_files.append(mod_file_path)
# Open the existing stage
stage = Usd.Stage.Open(mod_file_path)
# Get the existing sublayers
existing_sublayers = list(stage.GetRootLayer().subLayerPaths)
# Create a set of existing sublayer file names
existing_sublayer_files = {
os.path.basename(sublayer_path) for sublayer_path in existing_sublayers
}
# Add new sublayers
new_sublayers = [
f"./{args.output}{suffix}.usda"
for suffix in suffixes
if f"{args.output}{suffix}.usda" not in existing_sublayer_files
and any(
os.path.basename(file_path) == f"{args.output}{suffix}.usda"
for file_path in file_list
)
]
stage.GetRootLayer().subLayerPaths = (existing_sublayers + new_sublayers)
# Save the stage
stage.Save()
return modified_files
if __name__ == "__main__":
# ARGUMENT BLOCK
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--directory", required=True, help="Path to directory")
parser.add_argument("-o", "--output", default="mod", help="Output file name")
parser.add_argument("-g", "--generate-hashes", action="store_true", help="Generates hashes for file names before the suffix")
parser.add_argument("-m", "--multiple-files", action="store_true", help="Save multiple .usda files, one for each suffix type (except for diffuse)")
parser.add_argument("-a", "--add-sublayers", action="store_true", help="Add sublayers made with -m to the mod.usda file. This argument only modifies the mod.usda file and does not affect any custom USDA file specified by the -o argument.")
parser.add_argument("-s", "--shader-type", default="AperturePBR_Opacity", choices=["AperturePBR_Opacity", "AperturePBR_Translucent"], help="Shader type")
parser.add_argument("-r", "--reference-directory", help="Path to reference directory for diffuse texture hashes")
args = parser.parse_args()
# Check target processing directory before use
if not os.path.isdir(args.directory):
raise FileNotFoundError("Specified processing directory (-d) is invalid")
# Recursively scan folders
file_list = []
for root, dirs, files in os.walk(args.directory):
for file in files:
file_list.append(os.path.join(root, file))
created_files = []
modified_files = []
# Process sublayer additions
print(f"Add Sublayers: {args.add_sublayers}")
if args.add_sublayers:
modified_files.extend(add_sublayers(args, file_list))
# Generate unique USDA files per suffix type (except diffuse)
if args.multiple_files:
for suffix in suffixes:
m, c = write_usda_file(args, file_list, suffix)
modified_files.extend(m), created_files.extend(c)
else: # Generate a single USDA file for all suffixes
m, c = write_usda_file(args, file_list)
modified_files.extend(m), created_files.extend(c)
# Complete
print("Finished!")
print("Created files:")
for file in created_files:
print(f" - {file}")
print("Modified files:")
for file in modified_files:
print(f" - {file}")
| 12,785 | Python | 43.242214 | 243 | 0.615722 |
Kim2091/RTXRemixTools/RemixMeshConvert/RemixMeshConvert.py | import argparse
import logging
import os
import shutil
import sys
from pxr import Usd, UsdGeom, Gf, Sdf
ALIASES = {
"primvars:UVMap": ("primvars:st", Sdf.ValueTypeNames.Float2Array),
"primvars:UVChannel_1": ("primvars:st1", Sdf.ValueTypeNames.Float2Array),
"primvars:map1": ("primvars:st1", Sdf.ValueTypeNames.Float2Array),
# Add more aliases here
}
def convert_face_varying_to_vertex_interpolation(usd_file_path):
stage = Usd.Stage.Open(usd_file_path)
mesh_prims = [prim for prim in stage.TraverseAll() if prim.IsA(UsdGeom.Mesh)]
for prim in mesh_prims:
mesh = UsdGeom.Mesh(prim)
indices = prim.GetAttribute("faceVertexIndices")
points = prim.GetAttribute("points")
if not indices or not points:
continue # Skip if the required attributes are missing
points_arr = points.Get()
modified_points = [points_arr[i] for i in indices.Get()]
points.Set(modified_points)
indices.Set([i for i in range(len(indices.Get()))])
mesh.SetNormalsInterpolation(UsdGeom.Tokens.vertex)
primvar_api = UsdGeom.PrimvarsAPI(prim)
for var in primvar_api.GetPrimvars():
if var.GetInterpolation() == UsdGeom.Tokens.faceVarying:
var.SetInterpolation(UsdGeom.Tokens.vertex)
# Replace aliases with "float2[] primvars:st"
if var.GetName() in ALIASES:
new_name, new_type_name = ALIASES[var.GetName()]
new_var = primvar_api.GetPrimvar(new_name)
if new_var:
new_var.Set(var.Get())
else:
new_var = primvar_api.CreatePrimvar(new_name, new_type_name)
new_var.Set(var.Get())
new_var.SetInterpolation(UsdGeom.Tokens.vertex) # Set interpolation to vertex
primvar_api.RemovePrimvar(var.GetBaseName())
return stage
def process_folder(input_folder, output_folder, output_extension=None):
for file_name in os.listdir(input_folder):
input_file = os.path.join(input_folder, file_name)
if output_extension:
file_name = os.path.splitext(file_name)[0] + '.' + output_extension
output_file = os.path.join(output_folder, file_name)
if not os.path.isfile(input_file):
continue
shutil.copy(input_file, output_file) # Make a copy of the input file and rename it to the output file
stage = convert_face_varying_to_vertex_interpolation(output_file)
stage.Save() # Modify the output file in place
logging.info(f"Processed file: {input_file} -> {output_file}")
def main():
parser = argparse.ArgumentParser(description='Convert USD file formats and interpolation of meshes.')
parser.add_argument('input', type=str, help='Input file or folder path')
parser.add_argument('output', type=str, help='Output file or folder path')
parser.add_argument('-f', '--format', type=str, choices=['usd', 'usda'], help='Output file format (usd or usda)')
args = parser.parse_args()
input_path = args.input
output_path = args.output
output_extension = args.format
logging.basicConfig(level=logging.INFO, format='%(message)s')
if os.path.isdir(input_path):
process_folder(input_path, output_path, output_extension)
else:
if output_extension:
output_path = os.path.splitext(output_path)[0] + '.' + output_extension
shutil.copy(input_path, output_path) # Make a copy of the input file and rename it to the output file
stage = convert_face_varying_to_vertex_interpolation(output_path)
stage.Save() # Modify the output file in place
logging.info(f"Processed file: {input_path} -> {output_path}")
if __name__ == '__main__':
main()
| 3,853 | Python | 37.929293 | 117 | 0.637944 |
Kim2091/RTXRemixTools/RemixMeshConvert/For USD Composer/RemixMeshConvert_OV.py | from pxr import Usd, UsdGeom, Sdf
ALIASES = {
"primvars:UVMap": ("primvars:st", Sdf.ValueTypeNames.Float2Array),
"primvars:UVChannel_1": ("primvars:st1", Sdf.ValueTypeNames.Float2Array),
"primvars:map1": ("primvars:st1", Sdf.ValueTypeNames.Float2Array),
# Add more aliases here
}
def convert_face_varying_to_vertex_interpolation(stage):
mesh_prims = [prim for prim in stage.TraverseAll() if prim.IsA(UsdGeom.Mesh)]
for prim in mesh_prims:
mesh = UsdGeom.Mesh(prim)
indices = prim.GetAttribute("faceVertexIndices")
points = prim.GetAttribute("points")
if not indices or not points:
continue # Skip if the required attributes are missing
points_arr = points.Get()
modified_points = [points_arr[i] for i in indices.Get()]
points.Set(modified_points)
indices.Set([i for i in range(len(indices.Get()))])
mesh.SetNormalsInterpolation(UsdGeom.Tokens.vertex)
primvar_api = UsdGeom.PrimvarsAPI(prim)
for var in primvar_api.GetPrimvars():
if var.GetInterpolation() == UsdGeom.Tokens.faceVarying:
var.SetInterpolation(UsdGeom.Tokens.vertex)
# Replace aliases with "float2[] primvars:st"
if var.GetName() in ALIASES:
new_name, new_type_name = ALIASES[var.GetName()]
new_var = primvar_api.GetPrimvar(new_name)
if new_var:
new_var.Set(var.Get())
else:
new_var = primvar_api.CreatePrimvar(new_name, new_type_name)
new_var.Set(var.Get())
new_var.SetInterpolation(UsdGeom.Tokens.vertex) # Set interpolation to vertex
# Remove the old primvar directly from the UsdGeomPrimvar object
var.GetAttr().Block()
return stage
stage = omni.usd.get_context().get_stage()
convert_face_varying_to_vertex_interpolation(stage)
| 1,995 | Python | 38.137254 | 97 | 0.614035 |
gigwegbe/synthetic_data_with_nvidia_replicator_and_edge_impulse/objects_position_normal_90.py | import omni.replicator.core as rep
with rep.new_layer():
# Load in asset
local_path = "/home/george/Documents/synthetic_data_with_nvidia_replicator_and_edge_impulse/"
TABLE_USD = f"{local_path}/asset/Collected_EastRural_Table/EastRural_Table.usd"
SPOON_SMALL_USD = f"{local_path}/asset/Collected_Spoon_Small/Spoon_Small.usd"
SPOON_BIG_USD = f"{local_path}/asset/Collected_Spoon_Big/Spoon_Big.usd"
FORK_SMALL_USD = f"{local_path}/asset/Collected_Fork_Small/Fork_Small.usd"
FORK_BIG_USD = f"{local_path}/asset/Collected_Fork_Big/Fork_Big.usd"
KNIFE_USD = f"{local_path}/asset/Collected_Knife/Knife.usd"
# Camera paramters
cam_position = (46, 200, 25)
cam_position2 = (46, 120, 25)
cam_position_random = rep.distribution.uniform((0, 181, 0), (0, 300, 0))
cam_rotation = (-90, 0, 0)
focus_distance = 114
focus_distance2 = 39.1
focal_length = 27
focal_length2 = 18.5
f_stop = 1.8
f_stop2 = 1.8
focus_distance_random = rep.distribution.normal(500.0, 100)
# Cultery path
current_cultery = SPOON_SMALL_USD # Change the item here e.g KNIFE_USD
output_path = current_cultery.split(".")[0].split("/")[-1]
def rect_lights(num=1):
lights = rep.create.light(
light_type="rect",
temperature=rep.distribution.normal(6500, 500),
intensity=rep.distribution.normal(0, 5000),
position=(45, 110, 0),
rotation=(-90, 0, 0),
scale=rep.distribution.uniform(50, 100),
count=num
)
return lights.node
def dome_lights(num=3):
lights = rep.create.light(
light_type="dome",
temperature=rep.distribution.normal(6500, 500),
intensity=rep.distribution.normal(0, 1000),
position=(45, 120, 18),
rotation=(225, 0, 0),
count=num
)
return lights.node
def table():
table = rep.create.from_usd(TABLE_USD, semantics=[('class', 'table')])
with table:
rep.modify.pose(
position=(46, -0.0, 20),
rotation=(0, -90, -90),
)
return table
# Define randomizer function for CULTERY assets. This randomization includes placement and rotation of the assets on the surface.
def cutlery_props(size=15):
instances = rep.randomizer.instantiate(rep.utils.get_usd_files(
current_cultery), size=size, mode='point_instance')
with instances:
rep.modify.pose(
position=rep.distribution.uniform(
(0, 76.3651, 0), (90, 76.3651, 42)),
rotation=rep.distribution.uniform(
(-90, -180, 0), (-90, 180, 0)),
)
return instances.node
# Register randomization
rep.randomizer.register(table)
rep.randomizer.register(cutlery_props)
rep.randomizer.register(rect_lights)
rep.randomizer.register(dome_lights)
# Multiple setup cameras and attach it to render products
camera = rep.create.camera(focus_distance=focus_distance, focal_length=focal_length,
position=cam_position, rotation=cam_rotation, f_stop=f_stop)
camera2 = rep.create.camera(focus_distance=focus_distance2, focal_length=focal_length2,
position=cam_position2, rotation=cam_rotation, f_stop=f_stop)
# Will render 1024x1024 images and 512x512 images
render_product = rep.create.render_product(camera, (1024, 1024))
render_product2 = rep.create.render_product(camera2, (512, 512))
# Initialize and attach writer
writer = rep.WriterRegistry.get("BasicWriter")
writer.initialize(output_dir=f"{local_path}/data/normal/{output_path}",
rgb=True, bounding_box_2d_tight=False, semantic_segmentation=False)
writer.attach([render_product, render_product2])
with rep.trigger.on_frame(num_frames=50):
rep.randomizer.table()
rep.randomizer.rect_lights(1)
rep.randomizer.dome_lights(1)
rep.randomizer.cutlery_props(5)
# Run the simulation graph
rep.orchestrator.run()
| 4,170 | Python | 37.62037 | 133 | 0.621823 |
gigwegbe/synthetic_data_with_nvidia_replicator_and_edge_impulse/old_setting/objects_position_normal_90.py | import omni.replicator.core as rep
with rep.new_layer():
# Load in asset
local_path = "/home/george/Documents/synthetic_data_with_nvidia_replicator_and_edge_impulse/"
TABLE_USD =f"{local_path}/asset/Collected_EastRural_Table/EastRural_Table.usd"
SPOON_SMALL_USD = f"{local_path}/asset/Collected_Spoon_Small/Spoon_Small.usd"
SPOON_BIG_USD = f"{local_path}/asset/Collected_Spoon_Big/Spoon_Big.usd"
FORK_SMALL_USD = f"{local_path}/asset/Collected_Fork_Small/Fork_Small.usd"
FORK_BIG_USD = f"{local_path}/asset/Collected_Fork_Big/Fork_Big.usd"
KNIFE_USD = f"{local_path}/asset/Collected_Knife/Knife.usd"
# Camera paramters
cam_position = (-131,200,-134)
cam_position2 = (-131,120,-134)
cam_position_random = rep.distribution.uniform((0,181,0), (0, 300, 0))
cam_rotation = (-90,0,0) #(-45,0,0)
focus_distance = 120
focus_distance2 = 72
focal_length = 19.1
focal_length2 = 7.5
f_stop = 1.8
f_stop2 = 1.8
focus_distance_random = rep.distribution.normal(500.0, 100)
# Cultery path
current_cultery = SPOON_SMALL_USD # Change the item here e.g KNIFE_USD
output_path = current_cultery.split(".")[0].split("/")[-1]
def rect_lights(num=2):
lights = rep.create.light(
light_type="rect",
temperature=rep.distribution.normal(6500, 500),
intensity=rep.distribution.normal(0, 5000),
position=(-131,150,-134),
rotation=(-90,0,0),
scale=rep.distribution.uniform(50, 100),
count=num
)
return lights.node
def dome_lights(num=1):
lights = rep.create.light(
light_type="dome",
temperature=rep.distribution.normal(6500, 500),
intensity=rep.distribution.normal(0, 1000),
position=(0,0,0),
rotation=(270,0,0),
count=num
)
return lights.node
def table():
table = rep.create.from_usd(TABLE_USD, semantics=[('class', 'table')])
with table:
rep.modify.pose(
position=(-135.39745, 0, -140.25696),
rotation=(0,-90,-90),
)
return table
# Define randomizer function for CULTERY assets. This randomization includes placement and rotation of the assets on the surface.
def cutlery_props(size=15):
instances = rep.randomizer.instantiate(rep.utils.get_usd_files(current_cultery), size=size, mode='point_instance')
with instances:
rep.modify.pose(
position=rep.distribution.uniform((-212, 76.2, -187), (-62, 76.2, -94)),
rotation=rep.distribution.uniform((-90,-180, 0), (-90, 180, 0)),
)
return instances.node
# Register randomization
rep.randomizer.register(table)
rep.randomizer.register(cutlery_props)
rep.randomizer.register(rect_lights)
rep.randomizer.register(dome_lights)
# Multiple setup cameras and attach it to render products
camera = rep.create.camera(focus_distance=focus_distance, focal_length=focal_length, position=cam_position, rotation=cam_rotation, f_stop=f_stop)
camera2 = rep.create.camera(focus_distance=focus_distance2, focal_length=focal_length2, position=cam_position2, rotation=cam_rotation, f_stop=f_stop)
# Will render 1024x1024 images and 512x512 images
render_product = rep.create.render_product(camera, (1024, 1024))
render_product2 = rep.create.render_product(camera2, (512, 512))
# Initialize and attach writer
writer = rep.WriterRegistry.get("BasicWriter")
writer.initialize(output_dir=f"{local_path}/data/normal/{output_path}", rgb=True, bounding_box_2d_tight=False, semantic_segmentation=False)
writer.attach([render_product, render_product2])
with rep.trigger.on_frame(num_frames=50):
rep.randomizer.table()
rep.randomizer.rect_lights(1)
rep.randomizer.dome_lights(1)
rep.randomizer.cutlery_props(15)
# Run the simulation graph
rep.orchestrator.run()
| 4,065 | Python | 38.096153 | 153 | 0.64182 |
mati-nvidia/window-menu-add/exts/maticodes.example.window.add/maticodes/example/window/add/extension.py | import carb
import omni.ext
import omni.kit.ui
from .window import MyCustomWindow, WINDOW_TITLE
class WindowMenuAddExtension(omni.ext.IExt):
def on_startup(self, ext_id):
carb.log_info("[maticodes.example.window.add] WindowMenuAddExtension startup")
# Note the "Window" part of the path that directs the new menu item to the "Window" menu.
self._menu_path = f"Window/{WINDOW_TITLE}"
self._window = None
self._menu = omni.kit.ui.get_editor_menu().add_item(self._menu_path, self._on_menu_click, True)
def on_shutdown(self):
carb.log_info("[maticodes.example.window.add] WindowMenuAddExtension shutdown")
omni.kit.ui.get_editor_menu().remove_item(self._menu)
if self._window is not None:
self._window.destroy()
self._window = None
def _on_menu_click(self, menu, toggled):
"""Handles showing and hiding the window from the 'Windows' menu."""
if toggled:
if self._window is None:
self._window = MyCustomWindow(WINDOW_TITLE, self._menu_path)
else:
self._window.show()
else:
if self._window is not None:
self._window.hide()
| 1,232 | Python | 34.22857 | 103 | 0.621753 |
mati-nvidia/window-menu-add/exts/maticodes.example.window.add/maticodes/example/window/add/window.py | import omni.kit.ui
import omni.ui as ui
WINDOW_TITLE = "My Custom Window"
class MyCustomWindow(ui.Window):
def __init__(self, title, menu_path):
super().__init__(title, width=640, height=480)
self._menu_path = menu_path
self.set_visibility_changed_fn(self._on_visibility_changed)
self._build_ui()
def on_shutdown(self):
self._win = None
def show(self):
self.visible = True
self.focus()
def hide(self):
self.visible = False
def _build_ui(self):
with self.frame:
with ui.VStack():
ui.Label("This is just an empty window", width=0, alignment=ui.Alignment.CENTER)
def _on_visibility_changed(self, visible):
omni.kit.ui.get_editor_menu().set_value(self._menu_path, visible)
| 809 | Python | 25.129031 | 96 | 0.606922 |
mati-nvidia/developer-office-hours/tools/scripts/csv2md.py | # SPDX-License-Identifier: Apache-2.0
import argparse
import csv
from pathlib import Path
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Convert DOH CSV to MD")
parser.add_argument(
"csvfile", help="The CSV file to convert"
)
args = parser.parse_args()
csvfile = Path(args.csvfile)
mdfile = csvfile.with_suffix(".md")
rows = []
with open(csvfile) as f:
with open(mdfile, "w") as out:
for row in csv.reader(f):
if row[2] and not row[5]:
out.write(f"1. [{row[1]}]({row[2]})\n")
print("Success!")
| 654 | Python | 20.833333 | 73 | 0.54893 |
mati-nvidia/developer-office-hours/tools/scripts/make_ext.py | # SPDX-License-Identifier: Apache-2.0
import argparse
import shutil
import os
from pathlib import Path
SOURCE_PATH = Path(__file__).parent / "template" / "maticodes.doh_YYYY_MM_DD"
def text_replace(filepath, tokens_map):
with open(filepath, "r") as f:
data = f.read()
for token, fstring in tokens_map.items():
data = data.replace(token, fstring)
with open(filepath, "w") as f:
f.write(data)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create folder link to Kit App installed from Omniverse Launcher")
parser.add_argument(
"date", help="The dates of the Office Hour in YYYY-MM-DD format."
)
args = parser.parse_args()
year, month, day = args.date.split("-")
# copy files
dest_path = Path(__file__).parent / "../.." / f"exts/maticodes.doh_{year}_{month}_{day}"
shutil.copytree(SOURCE_PATH, dest_path)
# rename folders
template_ext_folder = dest_path / "maticodes" / "doh_YYYY_MM_DD"
ext_folder = dest_path / "maticodes" / f"doh_{year}_{month}_{day}"
os.rename(template_ext_folder, ext_folder)
tokens_map = {
"[DATE_HYPHEN]": f"{year}-{month}-{day}",
"[DATE_UNDERSCORE]": f"{year}_{month}_{day}",
"[DATE_PRETTY]": f"{month}/{day}/{year}",
}
# text replace extension.toml
ext_toml = dest_path / "config" / "extension.toml"
text_replace(ext_toml, tokens_map)
# text replace README
readme = dest_path / "docs" / "README.md"
text_replace(readme, tokens_map)
# text replace extension.py
ext_py = ext_folder / "extension.py"
text_replace(ext_py, tokens_map)
print("Success!")
| 1,695 | Python | 28.241379 | 115 | 0.614159 |
mati-nvidia/developer-office-hours/tools/scripts/template/maticodes.doh_YYYY_MM_DD/maticodes/doh_YYYY_MM_DD/extension.py | # SPDX-License-Identifier: Apache-2.0
import carb
import omni.ext
import omni.ui as ui
class MyWindow(ui.Window):
def __init__(self, title: str = None, **kwargs):
super().__init__(title, **kwargs)
self.frame.set_build_fn(self._build_window)
def _build_window(self):
with ui.ScrollingFrame():
with ui.VStack(height=0):
ui.Label("My Label")
def clicked():
carb.log_info("Button Clicked!")
ui.Button("Click Me", clicked_fn=clicked)
class MyExtension(omni.ext.IExt):
def on_startup(self, ext_id):
carb.log_info("[maticodes.doh_[DATE_UNDERSCORE]] Dev Office Hours Extension ([DATE_HYPHEN]) startup")
self._window = MyWindow("MyWindow", width=300, height=300)
def on_shutdown(self):
carb.log_info("[maticodes.doh_[DATE_UNDERSCORE]] Dev Office Hours Extension ([DATE_HYPHEN]) shutdown")
if self._window:
self._window.destroy()
self._window = None
| 1,025 | Python | 29.17647 | 110 | 0.599024 |
mati-nvidia/developer-office-hours/tools/scripts/template/maticodes.doh_YYYY_MM_DD/scripts/my_script.py | # SPDX-License-Identifier: Apache-2.0 | 37 | Python | 36.999963 | 37 | 0.783784 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_01_06/scripts/query_script_components.py | # SPDX-License-Identifier: Apache-2.0
from pxr import Sdf
import omni.usd
stage = omni.usd.get_context().get_stage()
prim = stage.GetPrimAtPath("/World/Cube")
attr = prim.GetAttribute("omni:scripting:scripts")
if attr.IsValid():
scripts = attr.Get()
if scripts:
for script in scripts:
print(script) | 327 | Python | 26.333331 | 50 | 0.685015 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_01_06/scripts/add_script_component.py | # SPDX-License-Identifier: Apache-2.0
import omni.kit.commands
from pxr import Sdf
import omni.usd
omni.kit.commands.execute('ApplyScriptingAPICommand',
paths=[Sdf.Path('/World/Cube')])
omni.kit.commands.execute('RefreshScriptingPropertyWindowCommand')
stage = omni.usd.get_context().get_stage()
prim = stage.GetPrimAtPath("/World/Cube")
attr = prim.GetAttribute("omni:scripting:scripts")
scripts = attr.Get()
attr.Set([r"C:\Users\mcodesal\Downloads\new_script2.py"])
# attr.Set(["omniverse://localhost/Users/mcodesal/new_script2.py"])
omni.kit.commands.execute('ChangeProperty',
prop_path=Sdf.Path('/World/Cube.omni:scripting:scripts'),
value=Sdf.AssetPathArray(1, (Sdf.AssetPath('C:\\mcodesal\\Downloads\\new_script2.py'))),
prev=None)
attr = prim.GetAttribute("omni:scripting:scripts")
scripts = list(attr.Get())
scripts.append(r"C:\mcodesal\Downloads\new_script.py")
attr.Set(scripts)
| 899 | Python | 31.142856 | 89 | 0.757508 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_01_06/scripts/interact_script_components.py | # SPDX-License-Identifier: Apache-2.0
import carb.events
import omni.kit.app
MY_CUSTOM_EVENT = carb.events.type_from_string("omni.my.extension.MY_CUSTOM_EVENT")
bus = omni.kit.app.get_app().get_message_bus_event_stream()
bus.push(MY_CUSTOM_EVENT, payload={"prim_path": "/World/Cube", "x": 1}) | 295 | Python | 31.888885 | 83 | 0.728814 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_04_21/scripts/pass_arg_to_callback.py | # SPDX-License-Identifier: Apache-2.0
from functools import partial
import omni.ui as ui
def do_something(p1, p2):
print(f"Hello {p1} {p2}")
window = ui.Window("My Window", width=300, height=300)
with window.frame:
ui.Button("Click Me", clicked_fn=partial(do_something, "a", "b")) | 290 | Python | 25.454543 | 69 | 0.696552 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_04_21/scripts/create_mdl_mtl.py | # SPDX-License-Identifier: Apache-2.0
import omni.kit.commands
omni.kit.commands.execute('CreateAndBindMdlMaterialFromLibrary',
mdl_name='OmniPBR.mdl',
mtl_name='OmniPBR',
mtl_created_list=None) | 198 | Python | 27.428568 | 64 | 0.787879 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_04_21/scripts/lock_prim.py | # SPDX-License-Identifier: Apache-2.0
import omni.kit.commands
from pxr import Sdf
omni.kit.commands.execute('LockSpecs',
spec_paths=['/Desk'],
hierarchy=False) | 164 | Python | 19.624998 | 38 | 0.756098 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_04_14/scripts/toggle_fullscreen.py | # SPDX-License-Identifier: Apache-2.0
import omni.kit.actions.core
action_registry = omni.kit.actions.core.get_action_registry()
action = action_registry.get_action("omni.kit.ui.editor_menu_bridge", "action_editor_menu_bridge_window_fullscreen_mode")
action.execute() | 269 | Python | 37.571423 | 121 | 0.784387 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_04_14/scripts/move_prim_forward.py | # SPDX-License-Identifier: Apache-2.0
from pxr import Gf, UsdGeom, Usd
import omni.usd
stage = omni.usd.get_context().get_stage()
prim = stage.GetPrimAtPath("/World/Camera")
xform = UsdGeom.Xformable(prim)
local_transformation: Gf.Matrix4d = xform.GetLocalTransformation()
# Apply the local matrix to the start and end points of the camera's default forward vector (-Z)
a: Gf.Vec4d = Gf.Vec4d(0,0,0,1) * local_transformation
b: Gf.Vec4d = Gf.Vec4d(0,0,-1,1) * local_transformation
# Get the vector between those two points to get the camera's current forward vector
cam_fwd_vec = b-a
# Convert to Vec3 and then normalize to get unit vector
cam_fwd_unit_vec = Gf.Vec3d(cam_fwd_vec[:3]).GetNormalized()
# Multiply the forward direction vector with how far forward you want to move
forward_step = cam_fwd_unit_vec * 100
# Create a new matrix with the translation that you want to perform
offset_mat = Gf.Matrix4d()
offset_mat.SetTranslate(forward_step)
# Apply the translation to the current local transform
new_transform = local_transformation * offset_mat
# Extract the new translation
translate: Gf.Vec3d = new_transform.ExtractTranslation()
# Update the attribute
prim.GetAttribute("xformOp:translate").Set(translate) | 1,220 | Python | 44.222221 | 96 | 0.769672 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_04_14/scripts/execute_action.py | # SPDX-License-Identifier: Apache-2.0
import omni.kit.actions.core
action_registry = omni.kit.actions.core.get_action_registry()
action = action_registry.get_action("ext_id", "action_id")
action.execute() | 206 | Python | 28.571424 | 61 | 0.762136 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_01_13/scripts/add_script_component.py | # SPDX-License-Identifier: Apache-2.0
import omni.kit.commands
from pxr import Sdf
import omni.usd
# Create the Python Scripting Component property
omni.kit.commands.execute('ApplyScriptingAPICommand',
paths=[Sdf.Path('/World/Cube')])
omni.kit.commands.execute('RefreshScriptingPropertyWindowCommand')
# Add your script to the property
stage = omni.usd.get_context().get_stage()
prim = stage.GetPrimAtPath("/World/Cube")
attr = prim.GetAttribute("omni:scripting:scripts")
scripts = attr.Get()
# Property with no script paths returns None
if scripts is None:
scripts = []
else:
# Property with scripts paths returns VtArray.
# Convert to list to make it easier to work with.
scripts = list(scripts)
scripts.append(r"C:\Users\mcodesal\Downloads\new_script.py")
attr.Set(scripts)
| 785 | Python | 29.230768 | 66 | 0.769427 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_06_23/scripts/select_prims.py | # SPDX-License-Identifier: Apache-2.0
import omni.usd
stage = omni.usd.get_context().get_stage()
ctx = omni.usd.get_context()
selection: omni.usd.Selection = ctx.get_selection()
selection.set_selected_prim_paths(["/World/Cube", "/World/Sphere"], False)
import omni.kit.commands
import omni.usd
ctx = omni.usd.get_context()
selection: omni.usd.Selection = ctx.get_selection()
omni.kit.commands.execute('SelectPrimsCommand',
old_selected_paths=selection.get_selected_prim_paths(),
new_selected_paths=["/World/Cone"],
expand_in_stage=True)
| 547 | Python | 23.90909 | 74 | 0.744058 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_01_27/scripts/sub_child_changes.py | # SPDX-License-Identifier: Apache-2.0
# UsdWatcher
from pxr import Sdf, Tf, Usd
import omni.usd
stage = omni.usd.get_context().get_stage()
def changed_paths(notice, stage):
print("Change fired")
for p in notice.GetChangedInfoOnlyPaths():
if str(p).startswith("/World/Parent" + "/"):
print("Something happened to a descendent of /World/Parent")
print(p)
for p in notice.GetResyncedPaths():
if str(p).startswith("/World/Parent" + "/"):
print("A descendent of /World/Parent was added or removed")
print(p)
objects_changed = Tf.Notice.Register(Usd.Notice.ObjectsChanged, changed_paths, None)
objects_changed.Revoke()
| 696 | Python | 26.879999 | 84 | 0.656609 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_07_22/maticodes/doh_2022_07_22/extension.py | # SPDX-License-Identifier: Apache-2.0
import omni.ext
import omni.ui as ui
from omni.kit.widget.searchable_combobox import build_searchable_combo_widget
class MyWindow(ui.Window):
def __init__(self, title: str = None, delegate=None, **kwargs):
super().__init__(title, **kwargs)
self.frame.set_build_fn(self._build_window)
def _build_window(self):
with ui.ScrollingFrame():
with ui.VStack(height=0):
ui.Label("My Label")
self.proj_scale_model = ui.SimpleFloatModel()
self.proj_scale_model_sub = (
self.proj_scale_model.subscribe_value_changed_fn(
self.slider_value_changed
)
)
ui.FloatSlider(model=self.proj_scale_model, min=0, max=100)
def do_rebuild():
self.frame.rebuild()
ui.Button("Rebuild", clicked_fn=do_rebuild)
def clicked():
# Example showing how to retreive the value from the model.
print(
f"Button Clicked! Slider Value: {self.proj_scale_model.as_float}"
)
self.proj_scale_model.set_value(1.0)
ui.Button("Set Slider", clicked_fn=clicked)
def on_combo_click_fn(model):
component = model.get_value_as_string()
print(f"{component} selected")
component_list = ["Synthetic Data", "USD", "Kit", "UX", "UX / UI"]
component_index = -1
self._component_combo = build_searchable_combo_widget(
component_list,
component_index,
on_combo_click_fn,
widget_height=18,
default_value="Kit",
)
def slider_value_changed(self, model):
# Example showing how to get the value when it changes.
print("Slider Value:", model.as_float)
def destroy(self) -> None:
del self.proj_scale_model_sub
return super().destroy()
class MyExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
print(
"[maticodes.doh_2022_07_22] Dev Office Hours Extension (2022-07-22) startup"
)
self._window = MyWindow("MyWindow", width=300, height=300)
def on_shutdown(self):
print(
"[maticodes.doh_2022_07_22] Dev Office Hours Extension (2022-07-22) shutdown"
)
if self._window:
self._window.destroy()
self._window = None
| 2,804 | Python | 34.961538 | 119 | 0.542083 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_07_22/maticodes/doh_2022_07_22/__init__.py | # SPDX-License-Identifier: Apache-2.0
from .extension import *
| 63 | Python | 20.333327 | 37 | 0.761905 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_07_22/scripts/cmds_more_params.py | # SPDX-License-Identifier: Apache-2.0
import omni.kit.commands
from pxr import Gf, Usd
omni.kit.commands.execute('SetAnimCurveKey',
paths=['/World/toy_drummer.xformOp:translate'],
value=Gf.Vec3d(0.0, 0.0, 18))
omni.kit.commands.execute('SetAnimCurveKey',
paths=['/World/toy_drummer.xformOp:translate'],
value=Gf.Vec3d(0.0, 0.0, 24),
time=Usd.TimeCode(72))
| 365 | Python | 23.399998 | 48 | 0.731507 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_07_22/scripts/set_current_time.py | # SPDX-License-Identifier: Apache-2.0
# https://docs.omniverse.nvidia.com/py/kit/source/extensions/omni.timeline/docs/index.html
import omni.timeline
timeline = omni.timeline.get_timeline_interface()
# set in using seconds
timeline.set_current_time(1)
# set using frame number
fps = timeline.get_time_codes_per_seconds()
timeline.set_current_time(48 / fps)
| 360 | Python | 26.769229 | 90 | 0.775 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_07_22/scripts/reference_usdz.py | # SPDX-License-Identifier: Apache-2.0
import omni.kit.commands
from pxr import Sdf
import omni.usd
omni.kit.commands.execute('CreateReference',
path_to=Sdf.Path('/World/toy_drummer2'),
asset_path='C:/Users/mcodesal/Downloads/toy_drummer.usdz',
usd_context=omni.usd.get_context())
| 287 | Python | 21.153845 | 59 | 0.759582 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_07_22/scripts/run_action_graph.py | # SPDX-License-Identifier: Apache-2.0
# https://docs.omniverse.nvidia.com/py/kit/source/extensions/omni.graph/docs/index.html
import omni.graph.core as og
keys = og.Controller.Keys
og.Controller.edit("/World/ActionGraph", { keys.SET_VALUES: ("/World/ActionGraph/on_impulse_event.state:enableImpulse", True) })
| 313 | Python | 33.888885 | 128 | 0.763578 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_10_14/maticodes/doh_2022_10_14/extension.py | # SPDX-License-Identifier: Apache-2.0
import carb
import omni.ext
import omni.ui as ui
class MyWindow(ui.Window):
def __init__(self, title: str = None, **kwargs):
super().__init__(title, **kwargs)
self.frame.set_build_fn(self._build_window)
def _build_window(self):
def hello():
print("Hello")
ui.Button("Click Me", clicked_fn=hello)
class MyExtension(omni.ext.IExt):
def on_startup(self, ext_id):
carb.log_info("[maticodes.doh_2022_10_14] Dev Office Hours Extension (2022-10-14) startup")
self._window = MyWindow("MyWindow", width=300, height=300)
def on_shutdown(self):
carb.log_info("[maticodes.doh_2022_10_14] Dev Office Hours Extension (2022-10-14) shutdown")
if self._window:
self._window.destroy()
self._window = None
| 848 | Python | 28.275861 | 100 | 0.621462 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_10_14/scripts/context_menu_inject.py | # SPDX-License-Identifier: Apache-2.0
import omni.kit.context_menu
def show_menu(objects):
print("show it?")
return True
def hello_world(objects):
print(f"Hello Objects: {objects}")
menu_item_config = {
"name": "Hello World!",
"glyph": "menu_search.svg",
"show_fn": [show_menu],
"onclick_fn": hello_world,
}
# You must keep a reference to the menu item. Set this variable to None to remove the item from the menu
hello_world_menu_item = omni.kit.context_menu.add_menu(menu_item_config, "MENU", "omni.kit.window.viewport")
hello_world_menu_item = None | 583 | Python | 26.809523 | 108 | 0.687822 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_08_25/scripts/add_sbsar.py | # SPDX-License-Identifier: Apache-2.0
import omni.kit.commands
omni.kit.commands.execute('AddSbsarReferenceAndBindCommand', sbsar_path=r"C:\Users\mcodesal\Downloads\blueberry_skin.sbsar",
target_prim_path="/World/Sphere")
| 252 | Python | 35.142852 | 124 | 0.710317 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_08_25/scripts/get_selected_meshes.py | # SPDX-License-Identifier: Apache-2.0
import omni.usd
from pxr import Usd, UsdGeom
stage = omni.usd.get_context().get_stage()
selection = omni.usd.get_context().get_selection().get_selected_prim_paths()
meshes = []
for path in selection:
prim = stage.GetPrimAtPath(path)
if prim.IsA(UsdGeom.Mesh):
meshes.append(prim)
print("Selected meshes:")
print(meshes) | 377 | Python | 22.624999 | 76 | 0.713528 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_04_28/scripts/toggle_hud.py | # SPDX-License-Identifier: Apache-2.0
import omni.kit.viewport.utility as okvu
okvu.toggle_global_visibility() | 112 | Python | 21.599996 | 40 | 0.794643 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_04_28/scripts/custom_attrs.py |
# SPDX-License-Identifier: Apache-2.0
# Docs: https://docs.omniverse.nvidia.com/prod_kit/prod_kit/programmer_ref/usd/properties/create-attribute.html
import omni.usd
from pxr import Usd, Sdf
stage = omni.usd.get_context().get_stage()
prim: Usd.Prim = stage.GetPrimAtPath("/World/Cylinder")
attr: Usd.Attribute = prim.CreateAttribute("mySecondAttr", Sdf.ValueTypeNames.Bool)
attr.Set(False)
| 394 | Python | 29.384613 | 111 | 0.769036 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_04_28/scripts/custom_global_data.py | # SPDX-License-Identifier: Apache-2.0
import omni.usd
stage = omni.usd.get_context().get_stage()
layer = stage.GetRootLayer()
print(type(layer))
layer.SetCustomLayerData({"Hello": "World"})
stage.DefinePrim("/World/Hello", "HelloWorld")
stage.DefinePrim("/World/MyTypeless") | 278 | Python | 22.249998 | 46 | 0.741007 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_04_28/scripts/make_unselectable.py | # SPDX-License-Identifier: Apache-2.0
# Docs: https://docs.omniverse.nvidia.com/kit/docs/omni.usd/latest/omni.usd/omni.usd.UsdContext.html#omni.usd.UsdContext.set_pickable
import omni.usd
ctx = omni.usd.get_context()
ctx.set_pickable("/", True) | 247 | Python | 29.999996 | 133 | 0.757085 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_04_28/scripts/change_viewport_camera.py | # SPDX-License-Identifier: Apache-2.0
import omni.kit.viewport.utility as vu
from pxr import Sdf
vp_api = vu.get_active_viewport()
vp_api.camera_path = "/World/Camera_01"
| 173 | Python | 20.749997 | 39 | 0.745665 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_09_01/scripts/logging.py | # SPDX-License-Identifier: Apache-2.0
import logging
import carb
logger = logging.getLogger()
print("Hello")
carb.log_info("World")
logger.info("Omniverse")
| 164 | Python | 9.999999 | 37 | 0.72561 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_09_01/scripts/xforming.py | # SPDX-License-Identifier: Apache-2.0
from pxr import UsdGeom
import omni.usd
stage = omni.usd.get_context().get_stage()
cube = stage.GetPrimAtPath("/World/Xform/Cube")
cube_xformable = UsdGeom.Xformable(cube)
transform = cube_xformable.GetLocalTransformation()
print(transform)
transform2 = cube_xformable.GetLocalTransformation()
print(transform2)
| 355 | Python | 21.249999 | 52 | 0.785915 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_10_28/scripts/usd_watcher.py | # SPDX-License-Identifier: Apache-2.0
import omni.usd
from pxr import Gf
stage = omni.usd.get_context().get_stage()
cube = stage.GetPrimAtPath("/World/Cube")
def print_size(changed_path):
print("Size Changed:", changed_path)
def print_pos(changed_path):
print(changed_path)
if changed_path.IsPrimPath():
prim_path = changed_path
else:
prim_path = changed_path.GetPrimPath()
prim = stage.GetPrimAtPath(prim_path)
local_transform = omni.usd.get_local_transform_SRT(prim)
print("Translation: ", local_transform[3])
def print_world_pos(changed_path):
world_transform: Gf.Matrix4d = omni.usd.get_world_transform_matrix(prim)
translation: Gf.Vec3d = world_transform.ExtractTranslation()
print(translation)
size_attr = cube.GetAttribute("size")
cube_sub = omni.usd.get_watcher().subscribe_to_change_info_path(cube.GetPath(), print_world_pos)
cube_size_sub = omni.usd.get_watcher().subscribe_to_change_info_path(size_attr.GetPath(), print_size)
cube_sub = None
cube_size_sub = None | 1,040 | Python | 31.531249 | 101 | 0.714423 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_10_28/scripts/extras.py | # SPDX-License-Identifier: Apache-2.0
import omni.usd
stage = omni.usd.get_context().get_stage()
prim = stage.GetPrimAtPath("/World/Cube")
if prim:
print("Prim Exists")
from pxr import UsdGeom
# e.g., find all prims of type UsdGeom.Mesh
mesh_prims = [x for x in stage.Traverse() if x.IsA(UsdGeom.Mesh)]
mesh_prims = []
for x in stage.Traverse():
if x.IsA(UsdGeom.Mesh):
mesh_prims.append(x)
print(mesh_prims) | 432 | Python | 23.055554 | 65 | 0.685185 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_10_28/scripts/docking.py | # SPDX-License-Identifier: Apache-2.0
import omni.ui as ui
my_window = ui.Window("Example Window", width=300, height=300)
with my_window.frame:
with ui.VStack():
f = ui.FloatField()
def clicked(f=f):
print("clicked")
f.model.set_value(f.model.get_value_as_float() + 1)
ui.Button("Plus One", clicked_fn=clicked)
my_window.dock_in_window("Property", ui.DockPosition.SAME)
ui.dock_window_in_window(my_window.title, "Property", ui.DockPosition.RIGHT, 0.2)
my_window.deferred_dock_in("Content", ui.DockPolicy.TARGET_WINDOW_IS_ACTIVE)
| 548 | Python | 26.449999 | 81 | 0.717153 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_09_23/scripts/combobox_selected_item.py | # SPDX-License-Identifier: Apache-2.0
import omni.ui as ui
my_window = ui.Window("Example Window", width=300, height=300)
combo_sub = None
options = ["One", "Two", "Three"]
with my_window.frame:
with ui.VStack():
combo_model: ui.AbstractItemModel = ui.ComboBox(0, *options).model
def combo_changed(item_model: ui.AbstractItemModel, item: ui.AbstractItem):
value_model = item_model.get_item_value_model(item)
current_index = value_model.as_int
option = options[current_index]
print(f"Selected '{option}' at index {current_index}.")
combo_sub = combo_model.subscribe_item_changed_fn(combo_changed)
def clicked():
value_model = combo_model.get_item_value_model()
current_index = value_model.as_int
option = options[current_index]
print(f"Button Clicked! Selected '{option}' at index {current_index}.")
ui.Button("Print Combo Selection", clicked_fn=clicked)
| 1,009 | Python | 35.071427 | 83 | 0.634291 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_09_23/scripts/use_tokens.py | # SPDX-License-Identifier: Apache-2.0
# https://docs.omniverse.nvidia.com/py/kit/docs/guide/tokens.html
import carb.tokens
from pathlib import Path
path = Path("${shared_documents}") / "maticodes.foo"
resolved_path = carb.tokens.get_tokens_interface().resolve(str(path))
print(resolved_path) | 293 | Python | 31.666663 | 69 | 0.761092 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_09_23/scripts/simple_instancer.py | # SPDX-License-Identifier: Apache-2.0
import omni.usd
from pxr import Usd, UsdGeom, Sdf, Gf
stage: Usd.Stage = omni.usd.get_context().get_stage()
prim_path = Sdf.Path("/World/MyInstancer")
instancer: UsdGeom.PointInstancer = UsdGeom.PointInstancer.Define(stage, prim_path)
proto_container = UsdGeom.Scope.Define(stage, prim_path.AppendPath("Prototypes"))
shapes = []
shapes.append(UsdGeom.Cube.Define(stage, proto_container.GetPath().AppendPath("Cube")))
shapes.append(UsdGeom.Sphere.Define(stage, proto_container.GetPath().AppendPath("Sphere")))
shapes.append(UsdGeom.Cone.Define(stage, proto_container.GetPath().AppendPath("Cone")))
instancer.CreatePositionsAttr([Gf.Vec3f(0, 0, 0), Gf.Vec3f(2, 0, 0), Gf.Vec3f(4, 0, 0)])
instancer.CreatePrototypesRel().SetTargets([shape.GetPath() for shape in shapes])
instancer.CreateProtoIndicesAttr([0, 1, 2]) | 852 | Python | 49.176468 | 91 | 0.761737 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_09_23/scripts/one_widget_in_container.py | # SPDX-License-Identifier: Apache-2.0
import omni.ui as ui
my_window = ui.Window("Example Window", width=300, height=300)
with my_window.frame:
with ui.VStack():
with ui.CollapsableFrame():
with ui.VStack():
ui.FloatField()
ui.FloatField()
ui.Button("Button 1") | 328 | Python | 24.30769 | 62 | 0.588415 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_09_09/maticodes/doh_2022_09_09/extension.py | # SPDX-License-Identifier: Apache-2.0
import carb
import omni.ext
import omni.kit.commands
import omni.ui as ui
import omni.usd
from pxr import Gf, Sdf
# Check out: USDColorModel
# C:\ext_projects\omni-dev-office-hours\app\kit\exts\omni.example.ui\omni\example\ui\scripts\colorwidget_doc.py
class MyWindow(ui.Window):
def __init__(self, title: str = None, **kwargs):
super().__init__(title, **kwargs)
self._color_model = None
self._color_changed_subs = []
self._path_model = None
self._change_info_path_subscription = None
self._path_changed_sub = None
self._stage = omni.usd.get_context().get_stage()
self.frame.set_build_fn(self._build_window)
def _build_window(self):
with ui.ScrollingFrame():
with ui.VStack(height=0):
ui.Label("Property Path")
self._path_model = ui.StringField().model
self._path_changed_sub = self._path_model.subscribe_value_changed_fn(
self._on_path_changed
)
ui.Label("Color")
with ui.HStack(spacing=5):
self._color_model = ui.ColorWidget(width=0, height=0).model
for item in self._color_model.get_item_children():
component = self._color_model.get_item_value_model(item)
self._color_changed_subs.append(component.subscribe_value_changed_fn(self._on_color_changed))
ui.FloatField(component)
def _on_mtl_attr_changed(self, path):
color_attr = self._stage.GetAttributeAtPath(path)
color_model_items = self._color_model.get_item_children()
if color_attr:
color = color_attr.Get()
for i in range(len(color)):
component = self._color_model.get_item_value_model(color_model_items[i])
component.set_value(color[i])
def _on_path_changed(self, model):
if Sdf.Path.IsValidPathString(model.as_string):
attr_path = Sdf.Path(model.as_string)
color_attr = self._stage.GetAttributeAtPath(attr_path)
if color_attr:
self._change_info_path_subscription = omni.usd.get_watcher().subscribe_to_change_info_path(
attr_path,
self._on_mtl_attr_changed
)
def _on_color_changed(self, model):
values = []
for item in self._color_model.get_item_children():
component = self._color_model.get_item_value_model(item)
values.append(component.as_float)
if Sdf.Path.IsValidPathString(self._path_model.as_string):
attr_path = Sdf.Path(self._path_model.as_string)
color_attr = self._stage.GetAttributeAtPath(attr_path)
if color_attr:
color_attr.Set(Gf.Vec3f(*values[0:3]))
def destroy(self) -> None:
self._change_info_path_subscription = None
self._color_changed_subs = None
self._path_changed_sub = None
return super().destroy()
class MyExtension(omni.ext.IExt):
def on_startup(self, ext_id):
carb.log_info("[maticodes.doh_2022_09_09] Dev Office Hours Extension (2022-09-09) startup")
self._window = MyWindow("MyWindow", width=300, height=300)
def on_shutdown(self):
carb.log_info("[maticodes.doh_2022_09_09] Dev Office Hours Extension (2022-09-09) shutdown")
if self._window:
self._window.destroy()
self._window = None
| 3,584 | Python | 39.738636 | 117 | 0.588728 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_09_09/scripts/undo_group.py | # SPDX-License-Identifier: Apache-2.0
import omni.kit.commands
import omni.kit.undo
# Requires Ctrl+Z twice to undo
omni.kit.commands.execute('CreateMeshPrimWithDefaultXform',
prim_type='Cube')
omni.kit.commands.execute('CreateMeshPrimWithDefaultXform',
prim_type='Cube')
# Grouped into one undo
with omni.kit.undo.group():
omni.kit.commands.execute('CreateMeshPrimWithDefaultXform',
prim_type='Cube')
omni.kit.commands.execute('CreateMeshPrimWithDefaultXform',
prim_type='Cube') | 506 | Python | 27.166665 | 63 | 0.76087 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_09_09/scripts/skip_undo_history.py | # SPDX-License-Identifier: Apache-2.0
import omni.kit.commands
import omni.kit.primitive.mesh as mesh_cmds
# Creates history
omni.kit.commands.execute('CreateMeshPrimWithDefaultXform',
prim_type='Cube')
# Doesn't create history
mesh_cmds.CreateMeshPrimWithDefaultXformCommand("Cube").do()
| 293 | Python | 23.499998 | 60 | 0.798635 |
Subsets and Splits