file_path
stringlengths 21
202
| content
stringlengths 12
1.02M
| size
int64 12
1.02M
| lang
stringclasses 9
values | avg_line_length
float64 3.33
100
| max_line_length
int64 10
993
| alphanum_fraction
float64 0.27
0.93
|
---|---|---|---|---|---|---|
NVIDIA-Omniverse/kit-workshop-siggraph2022/exts/omni.example.custom_ui/omni/example/custom_ui/style.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ["scatter_window_style"]
from omni.ui import color as cl
from omni.ui import constant as fl
from omni.ui import url
import omni.kit.app
import omni.ui as ui
import pathlib
EXTENSION_FOLDER_PATH = pathlib.Path(
omni.kit.app.get_app().get_extension_manager().get_extension_path_by_module(__name__)
)
# Pre-defined constants. It's possible to change them runtime.
cl.scatter_window_hovered = cl("#2b2e2e")
cl.scatter_window_text = cl("#9e9e9e")
fl.scatter_window_attr_hspacing = 10
fl.scatter_window_attr_spacing = 1
fl.scatter_window_group_spacing = 2
# The main style dict
scatter_window_style = {
"Label::attribute_name": {
"color": cl.scatter_window_text,
"margin_height": fl.scatter_window_attr_spacing,
"margin_width": fl.scatter_window_attr_hspacing,
},
"CollapsableFrame::group": {"margin_height": fl.scatter_window_group_spacing},
"CollapsableFrame::group:hovered": {"secondary_color": cl.scatter_window_hovered},
}
| 1,408 | Python | 35.128204 | 89 | 0.740767 |
NVIDIA-Omniverse/kit-workshop-siggraph2022/exts/omni.example.custom_ui/omni/example/custom_ui/commands.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ["ScatterCreatePointInstancerCommand"]
from pxr import Gf
from pxr import Sdf
from pxr import Usd
from pxr import UsdGeom
from typing import List
from typing import Optional
from typing import Tuple
import omni.kit.commands
import omni.usd.commands
class ScatterCreatePointInstancerCommand(omni.kit.commands.Command, omni.usd.commands.stage_helper.UsdStageHelper):
"""
Create PointInstancer undoable **Command**.
### Arguments:
`path_to: str`
The path for the new prims
`transforms: List`
Pairs containing transform matrices and ids to apply to new objects
`prim_names: List[str]`
Prims to duplicate
"""
def __init__(
self,
path_to: str,
transforms: List[Tuple[Gf.Matrix4d, int]],
prim_names: List[str],
stage: Optional[Usd.Stage] = None,
context_name: Optional[str] = None,
):
omni.usd.commands.stage_helper.UsdStageHelper.__init__(self, stage, context_name)
self._path_to = path_to
# We have it like [(tr, id), (tr, id), ...]
# It will be transformaed to [[tr, tr, ...], [id, id, ...]]
unzipped = list(zip(*transforms))
self._positions = [m.ExtractTranslation() for m in unzipped[0]]
self._proto_indices = unzipped[1]
self._prim_names = prim_names.copy()
def do(self):
stage = self._get_stage()
# Set up PointInstancer
instancer = UsdGeom.PointInstancer.Define(stage, Sdf.Path(self._path_to))
attr = instancer.CreatePrototypesRel()
for name in self._prim_names:
attr.AddTarget(Sdf.Path(name))
instancer.CreatePositionsAttr().Set(self._positions)
instancer.CreateProtoIndicesAttr().Set(self._proto_indices)
def undo(self):
delete_cmd = omni.usd.commands.DeletePrimsCommand([self._path_to])
delete_cmd.do()
| 2,346 | Python | 32.056338 | 115 | 0.663257 |
NVIDIA-Omniverse/kit-workshop-siggraph2022/exts/omni.example.custom_ui/omni/example/custom_ui/scatter.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ["scatter"]
from typing import List, Optional
import random
from pxr import Gf
def scatter(
count: List[int], distance: List[float], randomization: List[float], id_count: int = 1, seed: Optional[int] = None
):
"""
Returns generator with pairs containing transform matrices and ids to
arrange multiple objects.
### Arguments:
`count: List[int]`
Number of matrices to generage per axis
`distance: List[float]`
The distance between objects per axis
`randomization: List[float]`
Random distance per axis
`id_count: int`
Count of differrent id
`seed: int`
If seed is omitted or None, the current system time is used. If seed
is an int, it is used directly.
"""
# Initialize the random number generator.
random.seed(seed)
for i in range(count[0]):
x = (i - 0.5 * (count[0] - 1)) * distance[0]
for j in range(count[1]):
y = (j - 0.5 * (count[1] - 1)) * distance[1]
for k in range(count[2]):
z = (k - 0.5 * (count[2] - 1)) * distance[2]
# Create a matrix with position randomization
result = Gf.Matrix4d(1)
result.SetTranslate(
Gf.Vec3d(
x + random.random() * randomization[0],
y + random.random() * randomization[1],
z + random.random() * randomization[2],
)
)
id = int(random.random() * id_count)
yield (result, id)
| 2,076 | Python | 30 | 118 | 0.577553 |
NVIDIA-Omniverse/kit-workshop-siggraph2022/exts/omni.example.custom_ui/omni/example/custom_ui/__init__.py | # Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ["ScatterCreatePointInstancerCommand", "ScatterWindowExtension", "ScatterWindow"]
from .commands import ScatterCreatePointInstancerCommand
from .extension import ScatterWindowExtension
from .window import ScatterWindow
| 663 | Python | 46.428568 | 91 | 0.820513 |
NVIDIA-Omniverse/kit-workshop-siggraph2022/exts/omni.example.custom_ui/omni/example/custom_ui/combo_box_model.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ["ComboBoxModel"]
from typing import Optional
import omni.ui as ui
class ListItem(ui.AbstractItem):
"""Single item of the model"""
def __init__(self, text):
super().__init__()
self.name_model = ui.SimpleStringModel(text)
def __repr__(self):
return f'"{self.name_model.as_string}"'
@property
def as_string(self):
"""Return the string of the name model"""
return self.name_model.as_string
class ComboBoxModel(ui.AbstractItemModel):
"""
Represents the model for lists. It's very easy to initialize it
with any string list:
string_list = ["Hello", "World"]
model = ComboBoxModel(*string_list)
ui.ComboBox(model)
"""
def __init__(self, *args, default=0):
super().__init__()
self._children = [ListItem(t) for t in args]
self._default = ui.SimpleIntModel()
self._default.as_int = default
# Update the combo box when default is changed
self._default.add_value_changed_fn(lambda _: self._item_changed(None))
def get_item_children(self, item):
"""Returns all the children when the widget asks it."""
if item is not None:
# Since we are doing a flat list, we return the children of root only.
# If it's not root we return.
return []
return self._children
def get_item_value_model_count(self, item):
"""The number of columns"""
return 1
def get_item_value_model(self, item: Optional[ListItem], column_id):
"""
Return value model.
It's the object that tracks the specific value.
In our case we use ui.SimpleStringModel.
"""
if item is None:
return self._default
return item.name_model
def get_current_item(self) -> ListItem:
"""Returns the currently selected item in ComboBox"""
return self._children[self._default.as_int]
| 2,391 | Python | 30.893333 | 82 | 0.637808 |
NVIDIA-Omniverse/kit-workshop-siggraph2022/exts/omni.example.custom_ui/omni/example/custom_ui/utils.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ["get_selection", "duplicate_prims"]
from typing import List
import omni.usd
import omni.kit.commands
from pxr import Sdf
from pxr import Gf
def get_selection() -> List[str]:
"""Get the list of currently selected prims"""
return omni.usd.get_context().get_selection().get_selected_prim_paths()
def duplicate_prims(transforms: List = [], prim_names: List[str] = [], target_path: str = "", mode: str = "Copy", scale: List[float] = [1,1,1]):
"""
Returns generator with pairs containing transform matrices and ids to
arrange multiple objects.
### Arguments:
`transforms: List`
Pairs containing transform matrices and ids to apply to new objects
`prim_names: List[str]`
Prims to duplicate
`target_path: str`
The parent for the new prims
`mode: str`
"Reference": Create a reference of the given prim path
"Copy": Create a copy of the given prim path
"PointInstancer": Create a PointInstancer
"""
if mode == "PointInstancer":
omni.kit.commands.execute(
"ScatterCreatePointInstancer",
path_to=target_path,
transforms=transforms,
prim_names=prim_names,
)
return
usd_context = omni.usd.get_context()
# Call commands in a single undo group. So the user will undo everything
# with a single press of ctrl-z
with omni.kit.undo.group():
# Create a group
omni.kit.commands.execute("CreatePrim", prim_path=target_path, prim_type="Scope")
for i, matrix in enumerate(transforms):
id = matrix[1]
matrix = matrix[0]
path_from = Sdf.Path(prim_names[id])
path_to = Sdf.Path(target_path).AppendChild(f"{path_from.name}{i}")
# Create a new prim
if mode == "Copy":
omni.kit.commands.execute("CopyPrims", paths_from=[path_from.pathString], paths_to=[path_to.pathString])
elif mode == "Reference":
omni.kit.commands.execute(
"CreateReference", usd_context=usd_context, prim_path=path_from, path_to=path_to, asset_path=""
)
else:
continue
stage = usd_context.get_stage()
prim = stage.GetPrimAtPath(path_to)
trans_matrix = matrix[3]
new_transform = Gf.Vec3d(trans_matrix[0], trans_matrix[1], trans_matrix[2])
omni.kit.commands.execute("TransformPrimSRT", path=path_to, new_translation=new_transform, new_scale=scale)
| 3,044 | Python | 34.823529 | 144 | 0.625493 |
NVIDIA-Omniverse/kit-workshop-siggraph2022/exts/omni.example.custom_ui/omni/example/custom_ui/window.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ["ScatterWindow"]
import omni.ui as ui
from .style import scatter_window_style
from .utils import get_selection
from .combo_box_model import ComboBoxModel
from .scatter import scatter
from .utils import duplicate_prims
LABEL_WIDTH = 120
SPACING = 4
class ScatterWindow(ui.Window):
"""The class that represents the window"""
def __init__(self, title: str, delegate=None, **kwargs):
self.__label_width = LABEL_WIDTH
super().__init__(title, **kwargs)
# Models
self._source_prim_model_a = ui.SimpleStringModel()
self._scatter_prim_model_a = ui.SimpleStringModel()
## Step 6.4: Add Prim Model B Here ##
self._scatter_type_model = ComboBoxModel("Reference", "Copy", "PointInstancer")
self._scatter_seed_model = ui.SimpleIntModel()
self._scatter_count_models = [ui.SimpleIntModel(), ui.SimpleIntModel(), ui.SimpleIntModel()]
self._scatter_distance_models = [ui.SimpleFloatModel(), ui.SimpleFloatModel(), ui.SimpleFloatModel()]
self._scatter_random_models = [ui.SimpleFloatModel(), ui.SimpleFloatModel(), ui.SimpleFloatModel()]
self._scale_models = [ui.SimpleFloatModel(), ui.SimpleFloatModel(), ui.SimpleFloatModel()]
# Defaults
self._scatter_prim_model_a.as_string = "/World/Scatter01"
## Step 6.6: Add Prim Model B Here ##
self._scatter_count_models[0].as_int = 50
self._scatter_count_models[1].as_int = 1
self._scatter_count_models[2].as_int = 1
self._scatter_distance_models[0].as_float = 10
self._scatter_distance_models[1].as_float = 10
self._scatter_distance_models[2].as_float = 10
self._scale_models[0].as_float = 1
self._scale_models[1].as_float = 1
self._scale_models[2].as_float = 1
# Apply the style to all the widgets of this window
self.frame.style = scatter_window_style
# Set the function that is called to build widgets when the window is
# visible
self.frame.set_build_fn(self._build_fn)
def destroy(self):
# It will destroy all the children
super().destroy()
@property
def label_width(self):
"""The width of the attribute label"""
return self.__label_width
@label_width.setter
def label_width(self, value):
"""The width of the attribute label"""
self.__label_width = value
self.frame.rebuild()
def _on_get_selection(self, model):
"""Called when the user presses the "Get From Selection" button"""
model.as_string = ", ".join(get_selection())
def _on_scatter(self, source_model, scatter_model):
"""Called when the user presses the "Scatter Prim" button"""
prim_names = [i.strip() for i in source_model.as_string.split(",")]
if not prim_names:
prim_names = get_selection()
if not prim_names:
return
transforms = scatter(
count=[m.as_int for m in self._scatter_count_models],
distance=[m.as_float for m in self._scatter_distance_models],
randomization=[m.as_float for m in self._scatter_random_models],
id_count=len(prim_names),
seed=self._scatter_seed_model.as_int,
)
duplicate_prims(
transforms=transforms,
prim_names=prim_names,
target_path=scatter_model.as_string,
mode=self._scatter_type_model.get_current_item().as_string,
scale=[self._scale_models[0].as_float, self._scale_models[1].as_float, self._scale_models[2].as_float]
)
def _build_source(self):
"""Build the widgets of the "Source" group"""
with ui.CollapsableFrame("Source", name="group"):
with ui.VStack(height=0, spacing=SPACING):
with ui.HStack():
ui.Label("Prim A", name="attribute_name", width=self.label_width)
ui.StringField(model=self._source_prim_model_a)
# Button that puts the selection to the string field
ui.Button(
" S ",
width=0,
height=0,
style={"margin": 0},
clicked_fn=lambda:self._on_get_selection(self._source_prim_model_a),
tooltip="Get From Selection",
)
## Step 6.8: Add New HStack Below ##
def _build_scatter(self):
"""Build the widgets of the "Scatter" group"""
with ui.CollapsableFrame("Scatter", name="group"):
with ui.VStack(height=0, spacing=SPACING):
with ui.HStack():
ui.Label("Prim A Path", name="attribute_name", width=self.label_width)
ui.StringField(model=self._scatter_prim_model_a)
## Step 6.10: Add new ui.HStack Below ##
with ui.HStack():
ui.Label("Prim Type", name="attribute_name", width=self.label_width)
ui.ComboBox(self._scatter_type_model)
with ui.HStack():
ui.Label("Seed", name="attribute_name", width=self.label_width)
ui.IntDrag(model=self._scatter_seed_model, min=0, max=10000)
def _build_axis(self, axis_id, axis_name):
"""Build the widgets of the "X" or "Y" or "Z" group"""
with ui.CollapsableFrame(axis_name, name="group"):
with ui.VStack(height=0, spacing=SPACING):
with ui.HStack():
ui.Label("Object Count", name="attribute_name", width=self.label_width)
ui.IntDrag(model=self._scatter_count_models[axis_id], min=1, max=100)
with ui.HStack():
ui.Label("Distance", name="attribute_name", width=self.label_width)
ui.FloatDrag(self._scatter_distance_models[axis_id], min=0, max=10000)
with ui.HStack():
ui.Label("Random", name="attribute_name", width=self.label_width)
ui.FloatDrag(self._scatter_random_models[axis_id], min=0, max=10000)
def _build_fn(self):
"""
The method that is called to build all the UI once the window is
visible.
"""
with ui.ScrollingFrame():
with ui.VStack(height=0):
self._build_source()
self._build_scatter()
self._build_axis(0, "X Axis")
self._build_axis(1, "Y Axis")
self._build_axis(2, "Z Axis")
# The Go button
ui.Button("Scatter Prim A", clicked_fn=lambda:self._on_scatter(self._source_prim_model_a, self._scatter_prim_model_a))
## Step 6.12: Add Go Button Below ##
| 7,416 | Python | 43.148809 | 134 | 0.572816 |
NVIDIA-Omniverse/kit-workshop-siggraph2022/exts/omni.example.custom_ui/docs/CHANGELOG.md | # Changelog
## [1.0.0] - 2022-06-27
### Added
- Initial window
| 64 | Markdown | 9.833332 | 23 | 0.59375 |
NVIDIA-Omniverse/kit-workshop-siggraph2022/exts/omni.example.custom_ui/docs/README.md | # Scatter Tool (omni.example.ui_scatter_tool)
![](https://github.com/NVIDIA-Omniverse/kit-extension-sample-scatter/raw/main/exts/omni.example.ui_scatter_tool/data/preview.png)
## Overview
This Extension creates a new UI and function to `Scatter` a selected primitive along the X, Y, and Z Axis. The user can set parameters for Objecct Count, Distance, and Randomization.
## [Tutorial](../Tutorial/Scatter_Tool_Guide.md)
This extension sample also includes a step-by-step tutorial to accelerate your growth as you learn to build your own Omniverse Kit extensions.
In the tutorial you will learn how to build off exisitng modules using `Omniverse Ui Framework` and create `Scatter Properties`. Additionally, the tutorial has a `Final Scripts` folder to use as a reference as you go along.
[Get started with the tutorial.](../Tutorial/Scatter_Tool_Guide.md)
## Usage
Once the extension is enabled in the `Extension Manager` the `Scatter Window` will appear. You may dock this window or keep it floating in the console. Select your primitive in the hierarchy that you want to scatter and then click the `S` button next to the `Source > Prim` pathway to set the selected primitive. Then, set your `Scatter Properties` and click the `Scatter` button. | 1,259 | Markdown | 65.315786 | 380 | 0.77363 |
NVIDIA-Omniverse/kit-workshop-siggraph2022/exts/omni.example.scene_auth_scatter/docs/README.md | # Scatter Tool (omni.example.ui_scatter_tool)
![](https://github.com/NVIDIA-Omniverse/kit-extension-sample-scatter/raw/main/exts/omni.example.ui_scatter_tool/data/preview.png)
## Overview
This Extension creates a new UI and function to `Scatter` a selected primitive along the X, Y, and Z Axis. The user can set parameters for Objecct Count, Distance, and Randomization.
## Usage
Once the extension is enabled in the `Extension Manager` the `Scatter Window` will appear. You may dock this window or keep it floating in the console. Select your primitive in the hierarchy that you want to scatter and then click the `S` button next to the `Source > Prim` pathway to set the selected primitive. Then, set your `Scatter Properties` and click the `Scatter` button. | 767 | Markdown | 68.818176 | 380 | 0.773142 |
NVIDIA-Omniverse/kit-workshop-siggraph2022/exts/omni.example.scene_auth_scatter/workshop/Siggraph2022_Scatter_Workshop_1.md | ![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/logo.png?raw=true)
# NVIDIA OMNIVERSE
# Easily Develop Advanced 3D Layout Tools on NVIDIA Omniverse
See how to easily create your own custom scene layout tool with the modular Omniverse platform with a few lines of Python script. In this workshop, you'll build your own custom scene layout in Omniverse using Python.
# Learning Objectives
- How to Enable an extension
- Utilize Command Executions
- Create a feature to Scatter from a Prim's origin
<video width="560" height="315" controls>
<source src="https://dli-lms.s3.amazonaws.com/assets/x-ov-02-v1/3DLayoutToolsIntro.mp4" type="video/mp4">
</video>
# Section I
<video width="560" height="315" controls>
<source src="https://dli-lms.s3.amazonaws.com/assets/x-ov-02-v1/3DLayoutToolsSection1.mp4" type="video/mp4">
</video>
# Open Stage and Get Extension from Community / Third Party
## Step 1: Open the Workshop Stage
### <b>Step 1.1: Download the Stage from the Link Provided</b>
[Stage Link](https://dli-lms.s3.amazonaws.com/assets/x-ov-02-v1/Stage.zip)
### <b> Step 1.2: Unzip Stage Using Extract All...
This creates an unzipped file folder called `Stage`.
### <b> Step 1.3: Open Stage in Omniverse
Navigate inside Omniverse Code's `Content tab` to the stage file's location on your system.
(i.e. C:/Users/yourName/Downloads/Stage)
**Double Click** `Stage.usd` in the center window pane of the `Content tab` at the bottom of the Omniverse Code Console and it will appear in the viewport.
## Step 2: Adding the Extension
We will be getting an extension from the *Community / Third Party Section* of the *Extension Manager*. There are also other extensions developed by NVIDIA in the *NVIDIA Section*.
### Step 2.1: Open Extension Manager
**Click** on the *Extension Tab*.
### Step 2.2: Filter by Community / Third Party Extensions
**Select** *Community / Third Party* tab.
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/filtercommunity.png?raw=true)
*Community / Third Party* section is where you can find other developer's extensions from the Community.
### Step 2.3: Search for Scatter
**Search** for "scatter" and **Click** on the extension with the subtitle *omni.example.scene_auth_scatter*.
> **Note:** There are two different scatter tools. Please double check that the one installed has the subtitle: *omni.example.scene_auth_scatter*.
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/communitysearch.png?raw=true)
### Step 2.4: Install/Enable the Extension
**Click** on the *Install button* to download the extension. If the extension is already downloaded **Click** on the toggle next to *Disable*.
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/installext.png?raw=true)
## Step 3: Using the Extension
With the extension enabled, try the following steps.
### Step 3.1: Select a Prim
Select a Prim in the *Stage*
> **Note:** Prim is short for “primitive”, the prim is the fundamental unit in Omniverse. Anything imported or created in a USD scene is a prim. This includes, cameras, sounds, lights, meshes, and more. Primitives are technically containers of metadata, properties, and other prims. Read more about USD prims in the official documentation.
We recommend using any of these Prims:
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/primstoselect.png?raw=true)
#### Step 3.2: Set Prim Path to Scatter Window
With the selected Prim, **click** the *S button* in the *Scatter Window*.
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/clickS.png?raw=true)
#### Step 3.3: Scatter Selected Prim
At the bottom of the *Scatter Window*, **click** the *Scatter button*
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/scatterbutton.png?raw=true)
### Step 3.4: Undo Scatter
Find the `Scatter01` folder in `Stage` and left-click on the folder then right-click to delete or hit the `delete` button on your keyboard.
`Stage` is the panel that allows you to see all the assets in your current `USD`, or Universal Scene Description. It lists the prims in heirarchical order.
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_3/exts/omni.example.ui_scatter_tool/Workshop/images/deletescatter.gif?raw=true)
## Challenge Step 4: What else can you do with the Scatter Extension
These are *optional* challenges.
### Challenge 4.1: Use Cases
Come up with 5 use cases on how you would expand this extension.
### Challenge 4.2: Scatter Multiple Prims at Once
Try to scatter more than one marble at once.
<details>
<summary>Hint</summary>
#### Challenge Step 4.2.1: Scatter Multiple Prims at Once
In the *Stage*, **hold** *Ctrl key* and **select** multiple Prims.
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/multiprim.png?raw=true)
#### Challenge Step 4.2.2: Scatter Multiple Prims at Once
**Repeat** steps `3.2` and `2.3`.
</details>
# Section II
<video width="560" height="315" controls>
<source src="https://dli-lms.s3.amazonaws.com/assets/x-ov-02-v1/3DLayoutToolsSection2Intro.mp4" type="video/mp4">
</video>
<video width="560" height="315" controls>
<source src="https://dli-lms.s3.amazonaws.com/assets/x-ov-02-v1/3DLayoutToolsWorkshop2.mp4" type="video/mp4">
</video>
# Scatter Relative to Source Prim
## Step 5: Change the Scatter functionality to Handle any Given Origin
To use any origin we will be modifying the scatter functionality to recieve a position. The current scatter tool sets scatter at world origin (0,0,0). This is inconvient when there are prims far away from origin.
### Step 5.1: Open the Extension in Visual Studio Code
From the *Scatter Extension*, **Click** the Visual Studio Icon.
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/open_vs.png?raw=true)
A new instance of *Visual Studio Code* will open up.
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/vs_code.png?raw=true)
### Step 5.2: Open `scatter.py`
**Locate** and **Open** `scatter.py` from `exts/omni.example.scene_auth_scatter > omni/example/ui_scatter_tool > scatter.py`
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/scatterpy.png?raw=true)
### Step 5.3: Add New Origin Parameter to `scatter()`
**Add** `source_prim_location: List[float] = (0,0,0)` as a parameter for `scatter()`
``` python
def scatter(
count: List[int],
distance: List[float],
randomization: List[float],
id_count: int = 1,
seed: Optional[int] = None,
source_prim_location: List[float] = (0,0,0)
):
```
`source_prim_location` will contain x, y, and z coordinates of the prim we selected to scatter.
### Step 5.4: Locate `result.SetTranslate`
**Locate** near the bottom of `scatter.py` the code snippet below.
``` python
result.SetTranslate(
Gf.Vec3d(
x,
y,
z,
)
)
```
`Vec3d` creates a 3 dimensional vector. Each prim's position is generated via the code above.
### Step 5.5: Calculate the New Origin
During `Vec3d` creation, **add** each coordinate value stored in `source_prim_location` to the generated coordinate. i.e. `x` would turn into `source_prim_location[0] + x`.
``` python
result.SetTranslate(
Gf.Vec3d(
source_prim_location[0] + x,
source_prim_location[1] + y,
source_prim_location[2] + z,
)
)
```
`scatter()` should look as follows:
``` python
def scatter(
count: List[int],
distance: List[float],
randomization: List[float],
id_count: int = 1,
seed: Optional[int] = None,
source_prim_location: List[float] = (0,0,0)
):
"""
Returns generator with pairs containing transform matrices and ids to
arrange multiple objects.
### Arguments:
`count: List[int]`
Number of matrices to generage per axis
`distance: List[float]`
The distance between objects per axis
`randomization: List[float]`
Random distance per axis
`id_count: int`
Count of differrent id
`seed: int`
If seed is omitted or None, the current system time is used. If seed
is an int, it is used directly.
"""
# Initialize the random number generator.
random.seed(seed)
for i in range(count[0]):
x = (i - 0.5 * (count[0] - 1)) * distance[0]
for j in range(count[1]):
y = (j - 0.5 * (count[1] - 1)) * distance[1]
for k in range(count[2]):
z = (k - 0.5 * (count[2] - 1)) * distance[2]
# Create a matrix
result = Gf.Matrix4d(1)
result.SetTranslate(
Gf.Vec3d(
source_prim_location[0] + x,
source_prim_location[1] + y,
source_prim_location[2] + z,
)
)
id = int(random.random() * id_count)
yield (result, id)
```
### Step 5.6: Save `scatter.py` and Open `window.py`
**Save** `scatter.py` and **Open** `window.py` from `ext/omni.example.scene_auth_scatter > omni/example/ui_scatter_tool > window.py`.
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/windowpy.png?raw=true)
### Step 5.7: Add Gf module
Underneath the imports and above `LABEL_WIDTH`, **add** the line `from pxr import Gf`
``` python
import omni.ui as ui
from .style import scatter_window_style
from .utils import get_selection
from .combo_box_model import ComboBoxModel
from .scatter import scatter
from .utils import duplicate_prims
from pxr import Gf
LABEL_WIDTH = 120
SPACING = 4
```
### Step 5.8: Locate `transforms`
**Locate** where `transforms` is declared inside of `_on_scatter()`
``` python
transforms = scatter(
count=[m.as_int for m in self._scatter_count_models],
distance=[m.as_float for m in self._scatter_distance_models],
randomization=[m.as_float for m in self._scatter_random_models],
id_count=len(prim_names),
seed=self._scatter_seed_model.as_int
)
```
### Step 5.9 Hard Code Origin Position
**Add** `source_prim_location=Gf.Vec3d((0,0,-500))`after `seed=self._scatter_seed_model.as_int`.
>**Note:** Don't forget to add a comma after `seed=self._scatter_seed_model.as_int`.
``` python
transforms = scatter(
count=[m.as_int for m in self._scatter_count_models],
distance=[m.as_float for m in self._scatter_distance_models],
randomization=[m.as_float for m in self._scatter_random_models],
id_count=len(prim_names),
seed=self._scatter_seed_model.as_int,
source_prim_location=Gf.Vec3d((0,0,-500))
)
```
`Gf.Vec3d((0,0,-500))` creates a 3 coordinate vector where x = 0, y = 0 and z = -500. Since Y represents up and down in the scene, X and Z are like a floor. By setting Z to -500 we are setting the scatter location -500 units along the Z axis.
### Step 5.10: Select a Marble in the *Stage*
**Save** `window.py` and go back to *Omniverse Code*. Go to *Stage* and **expand** Marbles, then **select** any marble.
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/marbleselect.png?raw=true)
### Step 5.11: Set the Selected Marble's Path to the Scatter Extension
With a marble selected, **click** on the *S button* in the *Scatter Window*.
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/clickS.png?raw=true)
### Step 5.12: Scatter the Marbles
**Scroll** to the bottom of the extension and **click** the *Scatter button*.
> **Note**: If you do not see the *Scatter button* **scroll down** in the *extension window* or **expand** the *extension window* using the right corner.
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/scatterbutton.png?raw=true)
Notice how the marbles scattered to the right of the stage. This is -500 units on the Z axis. Try and change some of the values in the Y and/or X axis as well to see where the marbles will scatter next.
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/sidescatter.png?raw=true)
## Step 6: Get the Location of the Source Prim
We will be changing the origin where the Prims get scattered. Firstly, we will be grabbing the location of the source prim.
### Step 6.1: Open `window.py`
**Open** `window.py` from `ext/omni.example.scene_auth_scatter > omni/example/ui_scatter_tool > window.py`
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/windowpy.png?raw=true)
### Step 6.2: Add `omni.usd` module
Under `import omni.ui as ui`, **add** the line `import omni.usd`
``` python
# Import omni.usd module
import omni.usd
```
You should then have the following at the top of your file:
``` python
import omni.ui as ui
import omni.usd
from .style import scatter_window_style
from .utils import get_selection
from .combo_box_model import ComboBoxModel
from .scatter import scatter
from .utils import duplicate_prims
from pxr import Gf
```
The `omni.usd` module is one of the core Kit APIs, and provides access to USD (Universal Scene Description) and USD-related application services.
### Step 6.3: Locate `_on_scatter()`
**Scroll Down** to find `_on_scatter()`, and **add** a new line before the variable declaration of `transforms`.
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/newline.png?raw=true)
`_on_scatter()` is called when the user presses the *Scatter* button in the extension window.
### Step 6.4: Get USD Context
On the new line, **declare** `usd_context`. Make sure the line is tabbed in and parallel with the line `if not prim_names:`.
``` python
usd_context = omni.usd.get_context()
```
Your code should look like the following:
``` python
def _on_scatter(self):
"""Called when the user presses the "Scatter" button"""
prim_names = [i.strip() for i in self._source_prim_model.as_string.split(",")]
if not prim_names:
prim_names = get_selection()
if not prim_names:
# TODO: "Can't clone" message
pass
# Get the UsdContext we are attached to
usd_context = omni.usd.get_context()
transforms = scatter(
count=[m.as_int for m in self._scatter_count_models],
distance=[m.as_float for m in self._scatter_distance_models],
randomization=[m.as_float for m in self._scatter_random_models],
id_count=len(prim_names),
seed=self._scatter_seed_model.as_int,
)
duplicate_prims(
transforms=transforms,
prim_names=prim_names,
target_path=self._scatter_prim_model.as_string,
mode=self._scatter_type_model.get_current_item().as_string,
)
```
### Step 6.5: Get the Stage
Below `usd_context` declaration, **add** `stage = usd_context.get_stage()`
``` python
# Store the UsdContext we are attached to
usd_context = omni.usd.get_context()
# Get the stage from the current UsdContext
stage = usd_context.get_stage()
```
The stage variable will use USD to get the current stage. The `Stage` is where your prims are nested in the hierarchy.
### Step 6.6: Get Source Prim from Stage
On the next line, **add** `prim = stage.GetPrimAtPath(self._source_prim_model.as_string)`
``` python
# Store the UsdContext we are attached to
usd_context = omni.usd.get_context()
# Get the stage from the current UsdContext
stage = usd_context.get_stage()
# Store the Prim that is currently referenced in the extension
prim = stage.GetPrimAtPath(self._source_prim_model.as_string)
```
### Step 6.7: Get Source Prim's Translation
Next we will **store** the prim's positional data by adding, `position = prim.GetAttribute('xformOp:translate').Get()`. After checking your work below **save** `window.py`.
``` python
# Store the UsdContext we are attached to
usd_context = omni.usd.get_context()
# Get the stage from the current UsdContext
stage = usd_context.get_stage()
# Store the Prim that is currently referenced in the extension
prim = stage.GetPrimAtPath(self._source_prim_model.as_string)
# Get the focused Prim's positional data
position = prim.GetAttribute('xformOp:translate').Get()
```
In order to get the location of the prim, we needed the translate value which is stored in the Xform. This gives us a X, Y, and Z of the selected prim.
> **Note** The Transform (Xform) is the fundamental element of all objects in Omniverse, the Location.
Check your work, it should look like this:
``` python
def _on_scatter(self):
"""Called when the user presses the "Scatter" button"""
prim_names = [i.strip() for i in self._source_prim_model.as_string.split(",")]
if not prim_names:
prim_names = get_selection()
if not prim_names:
# TODO: "Can't clone" message
pass
# Store the UsdContext we are attached to
usd_context = omni.usd.get_context()
# Get the stage from the current UsdContext
stage = usd_context.get_stage()
# Store the Prim that is currently referenced in the extension
prim = stage.GetPrimAtPath(self._source_prim_model.as_string)
# Get the focused Prim's positional data
position = prim.GetAttribute('xformOp:translate').Get()
transforms = scatter(
count=[m.as_int for m in self._scatter_count_models],
distance=[m.as_float for m in self._scatter_distance_models],
randomization=[m.as_float for m in self._scatter_random_models],
id_count=len(prim_names),
seed=self._scatter_seed_model.as_int,
source_prim_location=Gf.Vec3((0,0,-500))
)
duplicate_prims(
transforms=transforms,
prim_names=prim_names,
target_path=self._scatter_prim_model.as_string,
mode=self._scatter_type_model.get_current_item().as_string,
)
```
## Step 7: Use the Selected Prim's Location as the Scatter Origin
After updating the scatter functionality we can pass the location of the source prim that we calculated from before.
### Step 7.1: Open window.py
**Open** `window.py` and locate where `transforms` is declared in `_on_scatter()`
``` python
transforms = scatter(
count=[m.as_int for m in self._scatter_count_models],
distance=[m.as_float for m in self._scatter_distance_models],
randomization=[m.as_float for m in self._scatter_random_models],
id_count=len(prim_names),
seed=self._scatter_seed_model.as_int
)
```
### Step 7.2: Pass the Location to `scatter()`
After `seed=self._scatter_seed_model.as_int`, **replace** the line `source_prim_location=Gf.Vec3d(0,0,-500)` with `source_prim_location=position`
``` python
transforms = scatter(
count=[m.as_int for m in self._scatter_count_models],
distance=[m.as_float for m in self._scatter_distance_models],
randomization=[m.as_float for m in self._scatter_random_models],
id_count=len(prim_names),
seed=self._scatter_seed_model.as_int,
source_prim_location=position
)
```
### Step 7.3: Test it out
Save `window.py` and head back into Omniverse. **Scatter** a prim in the stage. Then **scatter** a different prim. Notice how they will only scatter to the corresponding prim's location.
### Step 7.4: Hit Play
Play out the scene!
## Challenge Step 8.1: Add Randomization to Scatter
Currently, when the *Scatter button* is pressed it will scatter uniformly. Try to change up the code to allow for random distrubtion. Expand the *Hint* section if you get stuck.
> **Hint** Use `random.random()`
<details>
<summary>Hint</summary>
### Challenge Step 8.1.1: Open `scatter.py`
**Open** `scatter.py` and *locate* `scatter()`.
### Challenge Step 8.1.2: Add Random Value
**Locate** where we generate our Vec3d / `result.SetTranslate()`. **Modify** the first passed parameter as `source_prim_location[0] + (x + random.random() * randomization[0]),`
``` python
result.SetTranslate(
Gf.Vec3d(
source_prim_location[0] + (x + random.random() * randomization[0]),
source_prim_location[1] + y,
source_prim_location[2] + z,
)
)
```
`randomization[0]` refers to the element in the UI of the *Scatter Extension Window* labeled *Random*.
### Challenge Step 8.1.3: Apply to Y and Z Values
**Modify** the Y and Z values that get passed into *Vec3d* constructor similar to the previous step.
``` python
result.SetTranslate(
Gf.Vec3d(
source_prim_location[0] + (x + random.random() * randomization[0]),
source_prim_location[1] + (y + random.random() * randomization[1]),
source_prim_location[2] + (z + random.random() * randomization[2]),
)
)
```
### Challenge Step 8.1.4: Change Random Value
**Save** `scatter.py` and **go back** to Omniverse. **Modify** the *Random* parameter in the *Scatter Window*.
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/random.png?raw=true)
### Challenge Step 8.1.5: Scatter Prims
**Click** the *Scatter button* and see how the Prims scatter.
> **Note:** Make your Random values high if you are scattering in a small area.
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/scatterbutton.png?raw=true)
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/randomscatter.png?raw=true)
</details>
<br>
# Section III
<video width="560" height="315" controls>
<source src="https://dli-lms.s3.amazonaws.com/assets/x-ov-02-v1/3DLayoutToolsWorkshop3.mp4" type="video/mp4">
</video>
# Scatter the Objects
## Step 9: Scatter a Marble
The stage has a few marbles we can use to scatter around.
### Step 9.1: Select a Marble in the *Stage*
Go to *Stage* and **expand** Marbles, then **select** any marble.
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/marbleselect.png?raw=true)
### Step 9.2: Copy the Selected Marble's Path to the Scatter Extension
With a marble selected, **click** on the *S button* in the *Scatter Window*.
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/clickS.png?raw=true)
### Step 9.3: Change Distance Value for X Axis
**Change** the *Distance* in the *X Axis* to 10.
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/distance10.png?raw=true)
### Step 9.4: Click the Scatter Button
**Click** the *Scatter* button at the bottom of the window.
> **Note**: If you do not see the *Scatter button* **scroll down** in the *extension window* or **expand** the *extension window* using the right corner.
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/scatterbutton.png?raw=true)
Your scene should look similar to this after clicking the *Scatter button*.
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/marbleScattered.png?raw=true)
## Step 10: Watch the Scene Play
The play button is used for more than playing animations or movies. We can also use the play button to simulate physics.
### Step 10.1: Hit the Play Button
With the marbles scattered we can watch it in action. **Click** the *Play button* to watch the scene.
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/playbutton.png?raw=true)
What happens when we press play:
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/playbuttonaction.gif?raw=true)
> **Note:** To reset the scene **click** the *Stop button*.
### Step 10.2: Select a Different Prim
**Select** a diferent Prim in the *Stage*. It could be another marble, the jar, bowl, etc.
We recommend using any of these Prims:
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/primstoselect.png?raw=true)
### Step 10.3: Copy Selected Prim to Scatter Window
With the Prim selected, **Click** the *S button* to copy the Prim Path into the *Scatter Extension Window*.
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/clickS.png?raw=true)
### Step 10.4: Change Scatter Parameters
**Change** some of the parameters in the *Scatter Window*. I.e. In *Y Axis* **change** *Object Count* to 20 and *Distance* to 5.
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/params.png?raw=true)
### Step 10.5: Scatter New Prims
**Click** the *Scatter button* at the bottom of the *Scatter Window*.
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/scatterbutton.png?raw=true)
### Step 10.6: Hit the Play Button
**Click** the *Play button* and watch the scene play out.
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/playbutton.png?raw=true)
Try to Scatter many items in the scene and play around with the extension.
## Challenge Step 11: Scale Scatter Prims based on Provided Scale
You will notice that there is a *Scale* option. However, this does not work. Try to get it working. Expand the *Hint* section if you get stuck.
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/scale.png?raw=true)
> **Tip:** Look into `window.py` to see where the value get's used.
<details>
<summary>Hint</summary>
### Challenge Step 11.1: Locate `duplicate_prims()` in `window.py`
**Find** `duplicate_prims()` in `window.py`.
``` python
duplicate_prims(
transforms=transforms,
prim_names=prim_names,
target_path=self._scatter_prim_model.as_string,
mode=self._scatter_type_model.get_current_item().as_string
)
```
`duplicate_prims()` will take all of the transforms and depending on the mode selected duplicate's the selected prim. This is ideal for adding in a scale parameter.
### Challenge Step 11.2: Pass Scale values in `duplicate_prims()`
`self._scale_models` holds each scale set in the *Scatter Window*. **Add** `scale=[self._scale_models[0].as_float, self._scale_models[1].as_float, self._scale_models[2].as_float]` in `duplicate_prims()`.
``` python
duplicate_prims(
transforms=transforms,
prim_names=prim_names,
target_path=self._scatter_prim_model.as_string,
mode=self._scatter_type_model.get_current_item().as_string,
scale=[self._scale_models[0].as_float, self._scale_models[1].as_float, self._scale_models[2].as_float]
)
```
### Challenge Step 11.3: Locate `duplicate_prims()` in `utils.py`
**Open** `utils.py` from `ext/omni.example.scene_auth_scatter > omni/example/ui_scatter_tool > utils.py`. **Locate** `duplicate_prims()`.
``` python
def duplicate_prims(transforms: List = [], prim_names: List[str] = [], target_path: str = "", mode: str = "Copy"):
```
### Challenge Step 11.4: Add new parameter to `duplicate_prims()`
**Add** `scale: List[float] = [1,1,1]` as a parmeter for `duplicate_prims()`.
``` python
def duplicate_prims(transforms: List = [], prim_names: List[str] = [], target_path: str = "", mode: str = "Copy", scale: List[float] = [1,1,1]):
```
### Challenge Step 11.5: Pass Scale Parameter into Kit Command
**Scroll down** to find `omni.kit.commands.execute("TransformPrimSRT", path=path_to, new_translation=new_transform)`
**Add** `new_scale=scale` to Kit Command.
``` python
omni.kit.commands.execute("TransformPrimSRT", path=path_to, new_translation=new_transform, new_scale=scale)
```
### Challenge Step 11.6: Save and Test
**Save** the files and try to Scatter Prims with a different scale.
![](images/scatterscale.gif)
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/scatterscale.gif?raw=true)
</details>
## Congratulations!
You have completed this workshop! We hope you have enjoyed learning and playing with Omniverse!
[Join us on Discord to extend the conversation!](https://discord.com/invite/nvidiaomniverse)
| 29,675 | Markdown | 38.515313 | 340 | 0.71171 |
NVIDIA-Omniverse/kit-workshop-siggraph2022/exts/omni.example.scene_auth_scatter/workshop/CN_SceneLayout_Workshop.md |
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/logo.png?raw=true)
# NVIDIA OMNIVERSE
# 通过 NVIDIA Omniverse 轻松开发高级 3D 设计工具
了解如何仅借助数行 Python 脚本,通过模块化 Omniverse 平台轻松创建您自己的自定义场景布局工具。在本课程中,您将在 Omniverse 中使用 Python 构建您的自定义场景布局。
# 学习目标
- 了解如何启用扩展功能
- 使用命令执行
- 创建一个从基元的原点散布对象的功能
<video width="560" height="315" controls>
<source src="https://dli-lms.s3.amazonaws.com/assets/x-ov-02-v1-zh/3DLayoutToolsIntro_CN_v1.mp4" type="video/mp4">
</video>
# 第 I 部分 打开 Stage,并从社区(或第三方)获取扩展功能
<video width="560" height="315" controls>
<source src="https://dli-lms.s3.amazonaws.com/assets/x-ov-02-v1-zh/3DLayoutToolsSection1_CN_v1.mp4" type="video/mp4">
</video>
## 第 1 步:打开 Workshop Stage
### <b>第 1.1 步:从下面提供的链接下载 Stage </b>
[下载 Stage](https://dli-lms.s3.amazonaws.com/assets/x-ov-05-v1/Stage.zip)
### <b>第 1.2 步:使用 “Extract All...”(提取所有文件...)解压 Stage </b>
此操作会创建一个名为 `Stage` 的解压文件夹。
### <b>第 1.3 步:在 Omniverse 中打开 Stage </b>
在 Omniverse Code 的 `Content`(内容)选项卡中,找到系统中存放 Stage 文件的位置。
(即 C:/Users/yourName/Downloads/Stage)
在 Omniverse Code 控制台底部的 `Content` (内容)选项卡中,**双击**中间窗格中的 `Stage.usd`,即可在视图区中打开该 Stage。
## 第 2 步:添加扩展功能
我们将从“Extension Manager”(扩展功能管理器)的`Community/Third Party`(社区/第三方)部分获取扩展功能。此外,在`NVIDIA`部分可以获取 NVIDIA 开发的其他扩展功能。
### 第 2.1 步:打开 `Extensions`(扩展功能)选项卡
单击 `Extensions`(扩展功能)管理器选项卡
### 第 2.2 步:对来自社区或第三方的扩展功能进行筛选
选择 `Community/Third Party`(社区/第三方)选项卡
<br>
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/filtercommunity.png?raw=true)
在`Community/Third Party`(社区/第三方)部分,您可以找到由社区中的其他开发者提供的扩展功能。
### 第 2.3 步:搜索 Scatter(散布)工具
在搜索框中搜索“scatter”,然后**单击**副标题为 *omni.example.scene_auth_scatter* 的扩展功能。
> **注意:**可以找到两个不同的 Scatter 工具。请仔细核对,确保您安装的 Scatter 工具的副标题为:*omni.example.scene_auth_scatter*。
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/communitysearch.png?raw=true)
### 第 2.4 步:安装/启动扩展程序
**单击`Install`(安装)按钮,下载该扩展功能。如果您已经下载了该扩展功能,请单击`Disable`(禁用)旁边的切换按钮。**
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/installext.png?raw=true)
## 第 3 步:使用扩展功能
启用扩展功能后,请尝试执行以下步骤。
### 第 3.1 步:选择一个 Prim(基元)
在 Stage 中选择一个 prim。
> **注意:Prim(基元)是 primitive 的简写,它是 Omniverse 中的基本单元。在 USD 场景中导入或创建的任何对象,都是一个基元,例如镜头、声音、光线、网格等等。从技术角度看,Prim 是元数据、属性和其他基元的容器。您可以在官方文档中了解有关 USD 基元的更多的信息。**
我们建议使用下面圈出的任意基元:
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/primstoselect.png?raw=true)
### 第 3.2 步:在 `Scatter Window`(散布窗口)中设置基元的路径
选择好基元后,**单击**`Scatter Window`(散布窗口)中的`S`按钮。
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/clickS.png?raw=true)
### 第 3.3 步:使用选定的基元执行散布操作
在`Scatter Window`(散布窗口)底部,**单击** `Scatter`(散布)按钮。
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/scatterbutton.png?raw=true)
### 第 3.4 步:撤消散布操作
在 `Stage` 选项卡下面,找到 `Scatter01` 文件夹并左键单击该文件夹,然后单击鼠标右键选择“Delete”(删除)或按下键盘上的 `delete` 按钮,即可将其删除。
在 `Stage` 面板中,您可以看到当前 `USD` (Universal Scene Description) 中的所有素材。它会按层次顺序列出基元。
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_3/exts/omni.example.ui_scatter_tool/Workshop/images/deletescatter.gif?raw=true)
## 第 4 步(自我挑战):您还可以使用散布扩展功能执行哪些其他操作?
下面是*可选的*自我挑战。
### 第 4.1 步(自我挑战):用例
通过 5 个用例了解如何进一步使用散布扩展功能。
### 第 4.2 步(自我挑战):一次对多个基元执行散布操作
尝试一次对多个大理石基元执行散布操作。
<details>
<summary>提示</summary>
#### 第 4.2.1 步(自我挑战):一次对多个基元执行散布操作
在`Stage`里,**按住** `Ctrl` 键**选择**多个基元。
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/multiprim.png?raw=true)
#### 第 4.2.2 步(自我挑战):一次对多个基元执行散布操作
**重复**第 `3.2` 步和第 `2.3` 步。
</details>
# 第 II 部分 相对于源基元的散布操作
<video width="560" height="315" controls>
<source src="https://dli-lms.s3.amazonaws.com/assets/x-ov-02-v1-zh/3DLayoutToolsSection2Intro_CN_v1.mp4" type="video/mp4">
</video>
<video width="560" height="315" controls>
<source src="https://dli-lms.s3.amazonaws.com/assets/x-ov-02-v1-zh/3DLayoutToolsWorkshop2_CN_v1.mp4" type="video/mp4">
</video>
## 第 5 步:更改散布功能,以支持任意给定的原点
要将散布功能用于任意原点,我们需要对其进行修改,使其能够接收位置数据。当前的散布工具将散布功能设置在 Stage 的原点 (0,0,0)。要是有一些基元位于距原点很远的位置,就会非常不便。
### 第 5.1 步:在 Visual Studio Code 中打开扩展功能
从 `Scatter Extension`(散布扩展功能)中,**单击** `Visual Studio` 图标。
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/open_vs.png?raw=true)
系统将打开一个新的 *Visual Studio Code* 实例。
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/vs_code.png?raw=true)
### 第 5.2 步:打开 `scatter.py`
在 `exts/omni.example.scene_auth_scatter > omni/example/ui_scatter_tool > scatter.py` 中,**找到**并**打开** `scatter.py`。
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/scatterpy.png?raw=true)
### 第 5.3 步:向 `scatter()` 添加新原点参数
**将** `source_prim_location: List[float] = (0,0,0)` 作为参数,添加到 `scatter()`
``` python
def scatter(
count: List[int],
distance: List[float],
randomization: List[float],
id_count: int = 1,
seed: Optional[int] = None,
source_prim_location: List[float] = (0,0,0)
):
```
`source_prim_location` 将包含我们选定执行散布操作的prim(基元)的 x 轴、y 轴和 z 轴坐标。
### 第 5.4 步:找到 `result.SetTranslate`
在 `scatter.py` 的底部,**找到**如下代码片段。
``` python
result.SetTranslate(
Gf.Vec3d(
x,
y,
z,
)
)
```
`Vec3d` 会创建一个三维向量。每个prim(基元)的位置都是通过上面这段代码生成的。
### 第 5.5 步:计算新原点
在 `Vec3d` 创建过程中,将 `source_prim_location` 中存储的各个坐标值**添加**到生成的坐标,例如:`x` 应改为 `source_prim_location[0] + x`。
``` python
result.SetTranslate(
Gf.Vec3d(
source_prim_location[0] + x,
source_prim_location[1] + y,
source_prim_location[2] + z,
)
)
```
`scatter()` 应类似下面的示例代码:
``` python
def scatter(
count: List[int],
distance: List[float],
randomization: List[float],
id_count: int = 1,
seed: Optional[int] = None,
source_prim_location: List[float] = (0,0,0)
):
"""
使用包含变换矩阵和 ID 的数据对返回生成器,
以排列多个对象。
### 参数:
`count: List[int]`
每个坐标轴上要生成的矩阵的数量
`distance: List[float]`
每个坐标轴上各个对象之间的距离
`randomization: List[float]`
每个坐标轴的随机距离
`id_count: int`
不同 ID 的数量
`seed: int`
如果不设置 seed 或将 seed 设置为“None”,则使用当前系统时间。如果将 seed
设置为 int 类型,则直接使用它。
"""
# 初始化随机数字生成器。
random.seed(seed)
for i in range(count[0]):
x = (i - 0.5 * (count[0] - 1)) * distance[0]
for j in range(count[1]):
y = (j - 0.5 * (count[1] - 1)) * distance[1]
for k in range(count[2]):
z = (k - 0.5 * (count[2] - 1)) * distance[2]
# 创建矩阵
result = Gf.Matrix4d(1)
result.SetTranslate(
Gf.Vec3d(
source_prim_location[0] + x,
source_prim_location[1] + y,
source_prim_location[2] + z,
)
)
id = int(random.random() * id_count)
yield (result, id)
```
### 第 5.6 步:保存 `scatter.py` 并打开 `window.py`
**保存** `scatter.py`,然后从 `ext/omni.example.scene_auth_scatter > omni/example/ui_scatter_tool > window.py` **打开** `window.py`。
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/windowpy.png?raw=true)
### 第 5.7 步:添加 Gf 模块
在导入代码部分,在 `LABEL_WIDTH` 的上方,**添加**一行代码:`from pxr import Gf`。
``` python
import omni.ui as ui
from .style import scatter_window_style
from .utils import get_selection
from .combo_box_model import ComboBoxModel
from .scatter import scatter
from .utils import duplicate_prims
from pxr import Gf
LABEL_WIDTH = 120
SPACING = 4
```
### 第 5.8 步:找到 `transforms`
**找到** `_on_scatter()` 中声明 `transforms` 的位置。
``` python
transforms = scatter(
count=[m.as_int for m in self._scatter_count_models],
distance=[m.as_float for m in self._scatter_distance_models],
randomization=[m.as_float for m in self._scatter_random_models],
id_count=len(prim_names),
seed=self._scatter_seed_model.as_int
)
```
### 第 5.9 步:硬编码原点位置
在 `seed=self._scatter_seed_model.as_int` 后面,**添加** `source_prim_location=Gf.Vec3d((0,0,-500))`。
> **注意:**请别忘记在 `seed=self._scatter_seed_model.as_int` 后面添加逗号。
``` python
transforms = scatter(
count=[m.as_int for m in self._scatter_count_models],
distance=[m.as_float for m in self._scatter_distance_models],
randomization=[m.as_float for m in self._scatter_random_models],
id_count=len(prim_names),
seed=self._scatter_seed_model.as_int,
source_prim_location=Gf.Vec3d((0,0,-500))
)
```
`Gf.Vec3d((0,0,-500))` 会创建一个三坐标向量,坐标值为 x = 0、y = 0、z = -500。由于 Y 值代表stage上下方向的坐标,所以 X 值和 Z 值相当于位于地面上。通过将 Z 值设置为 -500,即可将散布位置设置到沿 Z 轴移动 -500 单位的位置。
### 第 5.10 步:在*Stage*中选择一块大理石
**保存** `window.py`,然后回到 *Omniverse Code*。转到*Stage*部分,**展开**“Marbles”(大理石),然后**选择**任意大理石素材。
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/marbleselect.png?raw=true)
### 第 5.11 步:在散布扩展中设置所选大理石的路径
选择好大理石素材后,**单击**`Scatter Window`(散布窗口)中的`S`按钮。
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/clickS.png?raw=true)
### 第 5.12 步:对大理石执行散布操作
**滚动**到扩展功能的底部,然后**单击**`Scatter`(散布)按钮。
> **注意:**如果看不到`Scatter`(散布)按钮,请在*扩展程序窗口*中**向下滚动**,或者拉动右下角**扩大***扩展程序窗口*。
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/scatterbutton.png?raw=true)
注意观察大理石如何在Stage的右侧进行散布。该位置也就是 Z 轴上 -500 单位的位置。尝试更改 Y 轴和/或 X 轴的某些值,看看大理石会改在哪里进行散布。
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/sidescatter.png?raw=true)
## 第 6 步:获取源 prim 的位置
在这一步中,我们将更改对 prim 执行散布操作的原点。首先,我们需要获取源 prim 的位置。
### 第 6.1 步:打开 `window.py`
从 `ext/omni.example.scene_auth_scatter > omni/example/ui_scatter_tool > window.py` **打开** `window.py`
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/windowpy.png?raw=true)
### 第 6.2 步:添加 `omni.usd` 模块
在 `import omni.ui as ui` 下面,**添加**一行代码:`import omni.usd`
``` python
# 导入 omni.usd 模型
import omni.usd
```
此时,代码顶部应包含以下内容:
``` python
import omni.ui as ui
import omni.usd
from .style import scatter_window_style
from .utils import get_selection
from .combo_box_model import ComboBoxModel
from .scatter import scatter
from .utils import duplicate_prims
from pxr import Gf
```
`omni.usd` 模块是核心 API 之一,通过它可以实现对 USD 和与 USD 相关的应用服务的访问。
### 第 6.3 步:找到 `_on_scatter()`
**向下滚动**,找到`_on_scatter()`,然后在 `transforms` 的变量声明代码前**添加**一行新代码。
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/newline.png?raw=true)
`_on_scatter()` 会在用户按下扩展功能窗口中的`Scatter`按钮时调用。
### 第 6.4 步:获取 USD 上下文
在新添加的行下面,**声明** `usd_context`。请确保此行代码与 `if not prim_names:` 代码行齐头缩进。
``` python
usd_context = omni.usd.get_context()
```
完成添加后,您的代码应类似于如下示例:
``` python
def _on_scatter(self):
"""当用户按下“"Scatter"”(散布)按钮时调用"""
prim_names = [i.strip() for i in self._source_prim_model.as_string.split(",")]
if not prim_names:
prim_names = get_selection()
if not prim_names:
# 待办事项:添加 “Can't clone”(无法复制)消息
pass
# 获取要锚定的 UsdContext
usd_context = omni.usd.get_context()
transforms = scatter(
count=[m.as_int for m in self._scatter_count_models],
distance=[m.as_float for m in self._scatter_distance_models],
randomization=[m.as_float for m in self._scatter_random_models],
id_count=len(prim_names),
seed=self._scatter_seed_model.as_int,
)
duplicate_prims(
transforms=transforms,
prim_names=prim_names,
target_path=self._scatter_prim_model.as_string,
mode=self._scatter_type_model.get_current_item().as_string,
)
```
### 第 6.5 步:获取 Stage
在 `usd_context` 声明下,**添加** `stage = usd_context.get_stage()`。
``` python
# 存储要锚定的 UsdContext
usd_context = omni.usd.get_context()
# 从当前 UsdContext 获取 Stage
stage = usd_context.get_stage()
```
变量stage将使用 USD 获取当前的 Stage。Stage 的层次结构中嵌着多个 prims。
### 第 6.6 步:从Stage获取源 Prim
在下一行中,**添加** `prim = stage.GetPrimAtPath(self._source_prim_model.as_string)`。
``` python
# 存储要锚定的 UsdContext
usd_context = omni.usd.get_context()
# 从当前 UsdContext 获取stage
stage = usd_context.get_stage()
# 将当前扩展功能中被引用的 Prim保存起来
prim = stage.GetPrimAtPath(self._source_prim_model.as_string)
```
### 第 6.7 步:获取源Prim的转换数据
接下来,我们需要添加 `position = prim.GetAttribute('xformOp:translate').Get()`,以**存储**prim的位置数据。检查了下面的示例代码后,请**保存** `window.py`。
``` python
# 存储要锚定的 UsdContext
usd_context = omni.usd.get_context()
# 从当前 UsdContext 获取stage
stage = usd_context.get_stage()
# 存储扩展程序中当前引用的prim(基元)
prim = stage.GetPrimAtPath(self._source_prim_model.as_string)
# 获取焦点基元的位置数据
position = prim.GetAttribute('xformOp:translate').Get()
```
为了获得prim的位置,我们需要获取 Xform 中存储的转换值,从而得到选定的prim的 X 轴、Y 轴和 Z 轴坐标。
> **注意:**转换参数 (Xform) 是 Omniverse 中的所有对象的基本元素,决定了对象的位置。
检查您的代码,应该像下面的示例:
``` python
def _on_scatter(self):
"""当用户按下“"Scatter"”(散布)按钮时调用"""
prim_names = [i.strip() for i in self._source_prim_model.as_string.split(",")]
if not prim_names:
prim_names = get_selection()
if not prim_names:
# 待办事项:添加 “Can't clone”(无法复制)消息
pass
# 存储要锚定的 UsdContext
usd_context = omni.usd.get_context()
# 从当前 UsdContext 获取stage
stage = usd_context.get_stage()
# 保存扩展功能当前所引用的prim
prim = stage.GetPrimAtPath(self._source_prim_model.as_string)
# 获取聚焦的prim的位置数据
position = prim.GetAttribute('xformOp:translate').Get()
transforms = scatter(
count=[m.as_int for m in self._scatter_count_models],
distance=[m.as_float for m in self._scatter_distance_models],
randomization=[m.as_float for m in self._scatter_random_models],
id_count=len(prim_names),
seed=self._scatter_seed_model.as_int,
source_prim_location=Gf.Vec3((0,0,-500))
)
duplicate_prims(
transforms=transforms,
prim_names=prim_names,
target_path=self._scatter_prim_model.as_string,
mode=self._scatter_type_model.get_current_item().as_string,
)
```
## 第 7 步:使用选定的 Prim(基元)的位置作为散布原点
更新散布功能后,我们就可以传递前面计算的源prim的位置值了。
### 第 7.1 步:打开 window.py
**打开** `window.py`,在 `_on_scatter()` 中找到声明 `transforms` 的位置。
``` python
transforms = scatter(
count=[m.as_int for m in self._scatter_count_models],
distance=[m.as_float for m in self._scatter_distance_models],
randomization=[m.as_float for m in self._scatter_random_models],
id_count=len(prim_names),
seed=self._scatter_seed_model.as_int
)
```
### 第 7.2 步:将位置值传递到 `scatter()`
在 `seed=self._scatter_seed_model.as_int` 后面,将代码 `source_prim_location=Gf.Vec3d(0,0,-500)` **替换为** `source_prim_location=position`。
``` python
transforms = scatter(
count=[m.as_int for m in self._scatter_count_models],
distance=[m.as_float for m in self._scatter_distance_models],
randomization=[m.as_float for m in self._scatter_random_models],
id_count=len(prim_names),
seed=self._scatter_seed_model.as_int,
source_prim_location=position
)
```
### 第 7.3 步:进行测试
保存 `window.py` 并返回到 Omniverse。在stage中对某个prim执行**散布**操作。然后,再对另一个prim执行**散布**操作。注意观察它们如何仅在相应prim的位置处进行散布。
### 第 7.4 步:点击 `Play`(播放)
运行您的stage!
## 第 8.1 步(自我挑战):为散布扩展功能添加随机化功能
现在,按下`Scatter`(散布)按钮后,对象会均匀地散布到stage中。请尝试更改代码,以实现随机分布。如果您找不到思路,请展开*提示*部分。
> **提示:**使用 `random.random()`。
<details>
<summary>提示</summary>
### 第 8.1.1 步(自我挑战):打开 `scatter.py`
**打开** `scatter.py`,并*找到* `scatter()`。
### 第 8.1.2 步(自我挑战):添加随机值
**找到**用于生成 Vec3d 的 `result.SetTranslate()` 代码。将传递的第一个参数**修改**为 `source_prim_location[0] + (x + random.random() * randomization[0]),`。
``` python
result.SetTranslate(
Gf.Vec3d(
source_prim_location[0] + (x + random.random() * randomization[0]),
source_prim_location[1] + y,
source_prim_location[2] + z,
)
)
```
`randomization[0]` 指的是 Scatter 扩展功能窗口中标记为 Random 的选项。
### 第 8.1.3 步(自我挑战):同样修改 Y 值和 Z 值
按照上一步中的操作,对传递到 *Vec3d* 构造的 Y 值和 Z 值进行相同的**修改**。
``` python
result.SetTranslate(
Gf.Vec3d(
source_prim_location[0] + (x + random.random() * randomization[0]),
source_prim_location[1] + (y + random.random() * randomization[1]),
source_prim_location[2] + (z + random.random() * randomization[2]),
)
)
```
### 第 8.1.4 步(自我挑战):更改随机值
**保存** `scatter.py`,然后**返回到** Omniverse。**修改**“*Scatter Window*”(散布窗口)中的“*Random*”(随机)参数。
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/random.png?raw=true)
### 第 8.1.5 步(自我挑战):对多个基元执行散布操作
**单击**`Scatter`(散布)按钮,并查看基元的散布情况。
> **注意:**如果散布范围很小,请增大“Random”(随机)值。
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/scatterbutton.png?raw=true)
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/randomscatter.png?raw=true)
</details>
# 第 III 部分 散布物体
<video width="560" height="315" controls>
<source src="https://dli-lms.s3.amazonaws.com/assets/x-ov-02-v1-zh/3DLayoutToolsWorkshop3_CN_v1.mp4" type="video/mp4">
</video>
## 第 9 步:对一个大理石 Prim(基元) 执行散布操作
Stage 中包含多个大理石 prims(基元),可用于四处散布。
### 第 9.1 步:在 *Stage* 中选择一个大理石基元
转到 `Stage` 部分,**展开** “Marbles”(大理石),然后**选择**任意大理石基元。
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/marbleselect.png?raw=true)
### 第 9.2 步:将所选大理石的路径复制到散布扩展功能
选择好大理石素材后,**单击** `Scatter Window` (散布窗口)中的 `S` 按钮。
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/clickS.png?raw=true)
### 第 9.3 步:更改 X 轴的距离值
将`X Axis`(X 轴)的 `Distance`(距离)值**更改**为 10
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/distance10.png?raw=true)
### 第 9.4 步:单击`Scatter`(散布)按钮
**单击**窗口底部的 `Scatter`(散布)按钮。
> **注意**:如果看不到 `Scatter`(散布)按钮,请在 `Extentions` 窗口中向下滚动,或者拉动右下角**扩大** `Extentions`窗口。
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/scatterbutton.png?raw=true)
单击 `Scatter`(散布)按钮后,您的 stage 应该与下面的示例类似。
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/marbleScattered.png?raw=true)
## 第 10 步:观看 Stage 里的动画
`Play`(播放)按钮的作用不仅是播放动画或影片,我们也可以用它进行物理学仿真。
### 第 10.1 步:单击 `Play`(播放)按钮
对大理石基元设置了散布功能后,我们可以观看散布的动画效果。**单击** `Play`(播放)按钮观看 stage。
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/playbutton.png?raw=true)
按下 `Play`(播放)按钮后的效果:
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/playbuttonaction.gif?raw=true)
> **注意**:要**重置** stage,请**单击** `Stop`(停止)按钮。
### 第 10.2 步:选择其他的 Prims(基元)
在 `Stage` 选项下,**选择**另一个 prim(基元)。您可以选择另一个大理石 prim(基元),也可以选择瓶、碗或其他 prim(基元)。
我们建议使用下面圈出的任意 prim:
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/primstoselect.png?raw=true)
### 第 10.3 步:将选定的 Prim 复制到散布窗口
选好 prim 后,**单击** `S` 按钮,将 prim 的路径复制到 `Scatter` 窗口里。
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/clickS.png?raw=true)
### 第 10.4 步:更改散布参数
**更改** `Scatter Window`(散布窗口)中的某些参数。例如:在 `Y Axis`(Y 轴)部分,分别将 `Object Count`(对象数量)和 `Distance` (距离)的值**更改**为 20 和 5。
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/params.png?raw=true)
### 第 10.5 步:对新 Prim(基元)执行散布操作
**单击** `Scatter Window`(散布窗口)底部的 `Scatter`(散布)按钮。
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/scatterbutton.png?raw=true)
### 第 10.6 步:单击 `Play`(播放)按钮
**单击** `Play`(播放)按钮,并观看Stage的动画效果。
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/playbutton.png?raw=true)
尝试使用散布扩展功能在 stage 中散布多种物品,并进行播放。
## 第 11 步(自我挑战):按照给定的缩放倍数对散布的 Prim 进行缩放
您可能注意到,窗口中有一个 `Scale`(缩放)选项。但是,这个选项未发挥任何作用。我们来试着让它派上用场。如果您找不到思路,请展开*提示*部分。
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/scale.png?raw=true)
> **提醒:**可以查看 `window.py`,看看这个值是在哪里使用的。
<details>
<summary>提示</summary>
### 第 11.1 步(自我挑战):在 `window.py` 中,找到 `duplicate_prims()`
在 `window.py` 中,**找到** `duplicate_prims()`。
``` python
duplicate_prims(
transforms=transforms,
prim_names=prim_names,
target_path=self._scatter_prim_model.as_string,
mode=self._scatter_type_model.get_current_item().as_string
)
```
`duplicate_prims()` 会接收所有转换参数,并根据选定的模式复制选定的基元。它非常适合添加到 scale 参数中。
### 第 11.2 步(自我挑战):在 `duplicate_prims()` 中传递范围值
`self._scale_models` 储存了“*Scatter Window*”(散布窗口)中的每一项范围设置。在 `duplicate_prims()` 中,**添加** `scale=[self._scale_models[0].as_float, self._scale_models[1].as_float, self._scale_models[2].as_float]`。
``` python
duplicate_prims(
transforms=transforms,
prim_names=prim_names,
target_path=self._scatter_prim_model.as_string,
mode=self._scatter_type_model.get_current_item().as_string,
scale=[self._scale_models[0].as_float, self._scale_models[1].as_float, self._scale_models[2].as_float]
)
```
### 第 11.3 步(自我挑战):在 `utils.py` 中,找到 `duplicate_prims()`
从 `ext/omni.example.scene_auth_scatter > omni/example/ui_scatter_tool > utils.py` **打开** `utils.py`。**找到** `duplicate_prims()`。
``` python
def duplicate_prims(transforms: List = [], prim_names: List[str] = [], target_path: str = "", mode: str = "Copy"):
```
### 第 11.4 步(自我挑战):向 `duplicate_prims()` 添加新参数
向 `duplicate_prims()` **添加**新参数 `scale: List[float] = [1,1,1]`。
``` python
def duplicate_prims(transforms: List = [], prim_names: List[str] = [], target_path: str = "", mode: str = "Copy", scale: List[float] = [1,1,1]):
```
### 第 11.5 步(自我挑战):将缩放倍数参数传递到 Kit Command
**向下滚动**,找到 `omni.kit.commands.execute("TransformPrimSRT", path=path_to, new_translation=new_transform)`。
将 `new_scale=scale` **添加**到 Kit Command。
``` python
omni.kit.commands.execute("TransformPrimSRT", path=path_to, new_translation=new_transform, new_scale=scale)
```
### 第 11.6 步(自我挑战):保存并测试
**保存**文件,然后尝试使用不同的缩放值对基元执行散布操作。
![](images/scatterscale.gif)
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_1/exts/omni.example.scene_auth_scatter/workshop/images/scatterscale.gif?raw=true)
</details>
## 恭喜!
您已完成本培训!希望您在学习和使用 Omniverse 的过程中找到乐趣!
[欢迎在 Discord 上加入我们,进行更深入的交流!](https://discord.com/invite/nvidiaomniverse)
| 23,799 | Markdown | 30.903485 | 194 | 0.690449 |
NVIDIA-Omniverse/kit-workshop-siggraph2022/exts/omni.example.ui_scene.manipulator_tool/Workshop/CN_SceneManipulator_Workshop.md |
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_2/exts/omni.example.ui_scene.widget_info/Workshop/images/logo.png?raw=true)
# NVIDIA OMNIVERSE
# 通过 NVIDIA Omniverse 构建自定义 3D 场景操作工具
了解如何在易于扩展的模块化的 Omniverse 平台上构建高级工具。Omniverse 开发者团队将向您展示如何扩展并增强您所熟悉且喜爱的 3D 工具。
# 学习目标
- 启用扩展程序
- 将 `scale` 函数附加至滑块小组件上
<video width="560" height="315" controls>
<source src="https://dli-lms.s3.amazonaws.com/assets/x-ov-06-v1-zh/sceneManipulatorIntro_CN_v1.mp4" type="video/mp4">
</video>
# UI Scene_Widget Info
## 第 I 部分
<video width="560" height="315" controls>
<source src="https://dli-lms.s3.amazonaws.com/assets/x-ov-06-v1-zh/sceneManipulator1_CN_v1.mp4" type="video/mp4">
</video>
### 第 1 步:打开 Workshop 场景
#### <b>第 1.1 步:从下面提供的链接下载 Stage</b>
[Stage 下载链接](https://dli-lms.s3.amazonaws.com/assets/x-ov-05-v1/Stage.zip)
#### <b>第 1.2 步:使用“Extract All...”(提取所有文件...)选项解压 Stage 文件
此操作会创建一个名为 `Stage` 的解压文件夹。
#### <b>第 1.3 步:在 Omniverse 中打开 Stage
在 Omniverse Code 的 `Content` 选项卡,找到系统中存放 Stage 文件的位置。
(即 C:/Users/yourName/Downloads/Stage)
在 Omniverse Code 控制台底部的 `Content` 选项卡中,**双击**中间窗格中的 `Stage.usd` 即可在视图(Viewport)中打开该 Stage。
### 第 2 步:安装小组件扩展功能
#### <b>第 2.1 步:打开`Extensions`(扩展功能)选项卡</b>
单击 `Extensions`(扩展功能)管理器选项卡。
#### <b>第 2.2 步:对社区/第三方的扩展功能进行筛选</b>
选择 `Community/Third Party`(社区/第三方)选项卡
<br>
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_2/exts/omni.example.ui_scene.widget_info/Workshop/images/extensionCommunity.PNG?raw=true)
<br>
#### <b>第 2.3 步:搜索小组件信息</b>
搜索 `Widget Info` 并单击 `Omni UI Scene Object Info With Widget Example`
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_2/exts/omni.example.ui_scene.widget_info/Workshop/images/widgetExt.png?raw=true)
#### <b>第 2.4 步:安装/启用扩展程序</b>
单击选中的扩展程序,然后在右侧控制台中单击 `Install`(安装)。安装后,启用扩展程序。
><span>❗</span>您可能会收到一个警告,指明此扩展程序未经验证。您可以安全地安装此扩展程序。
<br>
#### <b>第 2.5 步:检查小组件是否起作用</b>
前往 `Viewport`(视图),然后在层次结构中选择一个 `prim`(基元)。
`prim` 是“primitive”(基元)的缩写。基元是 Omniverse 中的基本单元。在 `USD`(Universal Scene Description) 场景中导入或创建的任何对象都是一个基元。这包括:镜头、声音、光线、网格等等。
您会在视图中的 `prim` 上方看到以下小组件:
<br>
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_2/exts/omni.example.ui_scene.widget_info/Workshop/images/viewportWidgetEnabled.PNG?raw=true)
<br>
><span>❓</span> 您注意到了吗?
>- 基元的路径显示在小组件中。
>- 小组件中有一个缩放滑块,但它不起作用!我们将在下一部分中修复此问题。
<br>
#### <b>第 3 步:找到播放按钮</b>
在视口中找到 `Play`(按钮),并看看单击它时会发生什么!别忘了在完成后按 `Stop`(停止)按钮。
<details>
<summary>单击此处可查看按钮所在的位置</summary>
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_2/exts/omni.example.ui_scene.widget_info/Workshop/images/playButton.png?raw=true)
</details>
<br>
>#### <span>🧠</span><b>第 4 步(自我挑战):头脑风暴用例</b>
><i>本培训中的所有挑战都是可选的。</i>
>
>思考小组件的 3 种使用方式。例如,您注意到它可用来显示 prim 的路径,那么您还可以在小组件中显示 prim 的其它信息吗?与同行进行头脑风暴,并思考如何将小组件应用于您所从事的行业!稍后我们将就此进行小组讨论。
<br>
<br>
>### <span>⛔</span>建议在此处停留,思考一下,再继续学习第 II 部分
<br>
## 第 II 部分
<video width="560" height="315" controls>
<source src="https://dli-lms.s3.amazonaws.com/assets/x-ov-06-v1-zh/sceneManipulator2Intro_CN_v1.mp4" type="video/mp4">
</video>
<video width="560" height="315" controls>
<source src="https://dli-lms.s3.amazonaws.com/assets/x-ov-06-v1-zh/sceneManipulator2_CN_v1.mp4" type="video/mp4">
</video>
### 第 5 步:找到您的工作文件
#### <b>第 5.1 步:打开 Visual Studio</b>
转至 `Extensions`(扩展功能)选项卡。
单击 `Widget Info`(小组件信息)扩展功能以在右侧打开扩展功能概述。
单击文件夹图标旁边的 `VS Code` 图标:
<br>
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_2/exts/omni.example.ui_scene.widget_info/Workshop/images/vsCodeIcon.PNG?raw=true)
<br>
系统将弹出单独的 `VS Code` 窗口,如下所示:
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_2/exts/omni.example.ui_scene.widget_info/Workshop/images/vsCodeopened.png?raw=true)
<br>
#### <b>第 5.2 步:找到操控器脚本</b>
在左列下拉菜单中的以下位置找到此会话所需的文件:
`exts -> omni.example.ui_scene.widget_info\omni\example\ui_scene\widget_info`
您当前位于:
`widget_info_manipulator.py`
<br>
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_2/exts/omni.example.ui_scene.widget_info/Workshop/images/fileStructLocation.gif?raw=true)
<br>
### 第 6 步:修复损坏的滑块
>#### 第 6.1 步:添加新导入
在脚本顶部找到 `imports`。
添加新导入:
```python
from pxr import Gf
```
现在,导入的库将如下所示:
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_2/exts/omni.example.ui_scene.widget_info/Workshop/images/newImport.png?raw=true)
<br>
在以下步骤中,您将使用 `Graphics Foundation`(简称 Gf),它是一个包含基础图形类和操作的软件包。
#### <b>第 6.2 步:找到函数 `update_scale`</b>
在脚本底部找到以下函数:
```python
# 更新滑块
def update_scale(prim_name, value):
```
此函数会更新小组件中的滑块。但是,它目前没有任何代码用来更新缩放比例。让我们开始添加所需的代码来实现这一点!
#### <b>第 6.3 步:获取当前场景</b>
在 `update_scale` 函数内部,找到 `print` 调用。
定义`stage` 变量,例如:
```python
stage = self.model.usd_context.get_stage()
```
从 USD 上下文中,我们抓取当前活动的stage,并将其存储到 `stage` 变量中。
`Stage` 是您的 prims 在层次结构中嵌套的地方。
现在,`update_scale` 应如下所示:
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_2/exts/omni.example.ui_scene.widget_info/Workshop/images/GetStage.png?raw=true)
<br>
><span>❗</span>请确保新的 stage 变量与 print 调用的缩进列是对齐的。否则,请添加或删除制表符(tab键),直到实现对齐。
<br>
#### <b>第 6.4 步:获取已选择的 prim(基元)</b>
接下来,在stage 变量的下面为当前选择的 prim 添加变量:
```python
prim = stage.GetPrimAtPath(self.model._current_path)
```
`update_scale` 现在如下所示:
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_2/exts/omni.example.ui_scene.widget_info/Workshop/images/getPrim.png?raw=true)
><span>❗</span> 此 prim 变量应与其上方的 stage 和 print 调用保持对齐。
<br>
#### <b>第 6.5 步:更新 `scale`</b>
在下一行中添加新的 `scale` 变量。
在此变量中,您将获得`xform` 的 `scale`(缩放比例)属性,然后设置 `scale` 的 `Vector3` 值,如下所示:
```python
scale = prim.GetAttribute("xformOp:scale")
scale.Set(Gf.Vec3d(value, value, value))
```
现在,您已完成,`update_scale` 函数将如下所示:
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_2/exts/omni.example.ui_scene.widget_info/Workshop/images/setScale.png?raw=true)
><span>❗</span>`scale` 变量应与其上方的变量保持对齐。
<br>
### 第 7 步:它起作用了吗?
#### <b>第 7.1 步:保存并测试!</b>
保存操控器脚本,并检查缩放滑块在小组件中是否起作用!
><span>❗</span> 保存时,您可能会注意到小组件在视口中消失。这是预期行为,再次单击基元即可显示小组件。
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_2/exts/omni.example.ui_scene.widget_info/Workshop/images/scaleWorking.gif?raw=true)
函数 `update_scale` 正在更新您的滑块,在此函数中,您添加了可获取 `stage` 和当前所选择的 `prim`(小组件显示在其上方)的属性,然后在滑块移动时调用 scale 的 Vector3,以在各个方向上改变 prim(基元)的大小。
><span>❗</span>不起作用? 查看 `Console`(控制台)以调试任何错误。
>
>![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_2/exts/omni.example.ui_scene.widget_info/Workshop/images/Console.png?raw=true)
<br>
>#### 💡 <b>第 8 步(自我挑战):更大的缩放比例</b>
><i>本培训中的所有挑战都是可选的。</i>
>
>您能否更改函数,实现对一个 prim(基元)以大于 1.0 的比例进行缩放?
>
><details>
><summary>单击此处获取答案</summary>
>
>设置 `value` 变量,并将其值乘以一个数字。
>
>例如:
>
>```python
> def update_scale(prim_name, value):
> if value <= 0:
> value = 0.01
> print(f"changing scale of {prim_name}, {value}")
> ## 新的值变量添加在下方
> value = 10*value
> stage = self.model.usd_context.get_stage()
> prim = stage.GetPrimAtPath(self.model._current_path)
> scale = prim.GetAttribute("xformOp:scale")
> scale.Set(Gf.Vec3d(value, value, value))
> if self._slider_model:
> self._slider_subscription = None
> self._slider_model.as_float = 1.0
> self._slider_subscription = self._slider_model.subscribe_value_changed_fn(
> lambda m, p=self.model.get_item("name"): update_scale(p, m.as_float)
> )
>```
>
></details>
<br>
>#### <span>🧠</span><b>第 9 步(自我挑战):您希望使用小组件控制其他哪些属性?</b>
><i>本培训中的所有挑战都是可选的。</i>
>
> 针对您可能要添加到此小组件的其他 3-5 个属性展开头脑风暴。稍后我们将就此进行公开讨论。
<br>
>### <span>⛔</span> 建议在此处停留,思考一下,再继续学习第 III 部分。
<br>
## 第 III 部分:
<video width="560" height="315" controls>
<source src="https://dli-lms.s3.amazonaws.com/assets/x-ov-06-v1-zh/sceneManipulator3_CN_v1.mp4" type="video/mp4">
</video>
### 第 10 步:创建您的场景
#### <b>第 10.1 步:缩放各种物品!</b>
在您的场景中随意选取一个 prim(基元)并缩放它,例如变出非常大的大理石或很小的罐子。
如何打造独特的场景?
><span>⭐</span>请在完成后按 `Play`(播放)按钮!
>
>![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_2/exts/omni.example.ui_scene.widget_info/Workshop/images/playButton.png?raw=true)
<br>
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_2/exts/omni.example.ui_scene.widget_info/Workshop/images/section3.gif?raw=true)
<br>
>#### <span>🧠</span><b>第 11 步(自我挑战):在一个轴上缩放</b>
><i>本培训中的所有挑战都是可选的。</i>
>
>您能否更改函数,实现仅在一个坐标轴的方向上对基元进行缩放?
>
><details>
><summary>单击此处获取答案</summary>
>
>对于您不希望进行缩放的轴方向,在 `scale.Set(Gf.Vec3d(value,value,value))` 中将对应该坐标轴的值更改为 1。
>
>例如:
>
>```python
>scale.Set(Gf.Vec3d(value,1,1))
>```
>
>这会将缩放更改为仅在 X 轴上进行,因为 Y 轴和 Z 轴的值保留为 1,而 X 轴会更改。
>
></details>
<br>
>#### <span>🧠</span><b>第 12 步(自我挑战):打开光线操控器</b>
><i>本培训中的所有挑战都是可选的。</i>
>
>打开光线操控器扩展程序,然后单击面光源。
>
>如何使用此工具更改光线强度?
>
><details>
><summary>单击此处获取答案</summary>
>
>在 `Extensions`(扩展功能)选项卡中,在 `Community/Third Party`(社区/第三方)中搜索“Light”(光线),然后安装/启用 `Omni.Ui Scene Sample for Manipulating Select Light` 扩展程序。
>
>![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_2/exts/omni.example.ui_scene.widget_info/Workshop/images/LightExt.png?raw=true)
>
><br>
>
>在层次结构中选择一个面光源。
>
>![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_2/exts/omni.example.ui_scene.widget_info/Workshop/images/RectLight.png?raw=true)
>
><br>
>
>使用光标抓取光线工具的边,然后通过向前或向后拖动来更改光线强度。
>
>![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_2/exts/omni.example.ui_scene.widget_info/Workshop/images/RectIntensity.png?raw=true)
>
></details>
<br>
## 恭喜!
您已完成本培训!希望您喜欢学习和使用 Omniverse!
[欢迎在 Discord 上加入我们,进行更深入的交流!](https://discord.com/invite/nvidiaomniverse)
| 10,032 | Markdown | 23.772839 | 172 | 0.69986 |
NVIDIA-Omniverse/kit-workshop-siggraph2022/exts/omni.example.ui_scene.manipulator_tool/Workshop/Siggraph2022_Manipulator_Tools_Workshop.md |
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_2/exts/omni.example.ui_scene.widget_info/Workshop/images/logo.png?raw=true)
# NVIDIA OMNIVERSE
# How to Build Custom 3D Scene Manipulator Tools on NVIDIA Omniverse
See how you can build advanced tools on the modular, easily extensible Omniverse platform. You’ll learn from the Omniverse developer ecosystem team how you can extend and enhance the 3D tools you know and love today.
# Learning Objectives
- Enable Extension
- Attach `scale` function to Slider Widget
<video width="560" height="315" controls>
<source src="https://d36m44n9vdbmda.cloudfront.net/assets/x-ov-06-v1/sceneManipulatorIntro.mp4" type="video/mp4">
</video>
# UI Scene_Widget Info
## Section I
<video width="560" height="315" controls>
<source src="https://d36m44n9vdbmda.cloudfront.net/assets/x-ov-06-v1/sceneManipulator1.mp4" type="video/mp4">
</video>
### Step 1: Open the Workshop Stage
#### <b>Step 1.1: Download the Stage from the Link Provided</b>
[Stage Link](https://dli-lms.s3.amazonaws.com/assets/x-ov-06-v1/Stage.zip)
#### <b> Step 1.2: Unzip Stage Using Extract All...
This creates an unzipped file folder called `Stage`.
#### <b> Step 1.3: Open Stage in Omniverse
Navigate inside Omniverse Code's `Content tab` to the stage file's location on your system.
(i.e. C:/Users/yourName/Downloads/Stage)
**Double Click** `Stage.usd` in the center window pane of the `Content tab` at the bottom of the Omniverse Code Console and it will appear in the viewport.
### Step 2: Install the Widget Extension
#### <b>Step 2.1: Open the Extensions Tab</b>
Click on `Extensions` Manager Tab
#### <b>Step 2.2: Filter by Community / Third Party Extensions</b>
Select `Community / Third Party` tab
<br>
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_2/exts/omni.example.ui_scene.widget_info/Workshop/images/extensionCommunity.PNG?raw=true)
<br>
#### <b>Step 2.3: Search for Widget Info</b>
Search for `Widget Info` and click on `Omni UI Scene Object Info With Widget Example`
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_2/exts/omni.example.ui_scene.widget_info/Workshop/images/widgetExt.png?raw=true)
#### <b>Step 2.4: Install/Enable the Extension</b>
Click on the extension and then click `Install` in the right console. Once installed, enable the extension.
><span>❗</span> You may get a warning that this extension is not verified. It is safe to install this extension.
<br>
#### <b>Step 2.5: Check that the Widget is Working</b>
Navigate to `Viewport` then select a `prim` in the hierarchy.
A `prim` is short for primitive. The prim is the fundamental unit in Omniverse. Anything imported or created in a `USD`, Universal Scene Description, scene. This includes camera, sounds, lights, meshes, etc.
You should see the following widget appear in the viewport above the `prim`:
<br>
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_2/exts/omni.example.ui_scene.widget_info/Workshop/images/viewportWidgetEnabled.PNG?raw=true)
<br>
><span>❓</span> Did you notice?
>- The path of the prim is displayed in the widget.
>- There is a scale slider in the widget but it doesn't work! We will fix this in the next section.
<br>
#### <b>Step 3: Find the Play Button</b>
Locate the `Play` button in the viewport and see what happens when you click it! Don't forget to hit the `Stop` button when you are finished.
<details>
<summary>Click here to see where the button is located </summary>
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_2/exts/omni.example.ui_scene.widget_info/Workshop/images/playButton.png?raw=true)
</details>
<br>
>#### <span>🧠</span><b>Challenge Step 4: Brainstorm Use Cases</b>
><i>All Challenges in this workshop are optional</i>
>
>Think of 3 ways a widget could be used. For example, you noticed that the path of the prim is displayed, what else could you display about the prim in the widget? Brain storm with your peers and think of how it can be used for your industry! We will have a group discussion about this later on.
<br>
<br>
>### <span>⛔</span> Stop here and wait to move on to Section II
<br>
## Section II
<video width="560" height="315" controls>
<source src="https://d36m44n9vdbmda.cloudfront.net/assets/x-ov-06-v1/sceneManipulator2Intro.mp4" type="video/mp4">
</video>
<video width="560" height="315" controls>
<source src="https://d36m44n9vdbmda.cloudfront.net/assets/x-ov-06-v1/sceneManipulator2.mp4" type="video/mp4">
</video>
### Step 5: Find your Work Files
#### <b>Step 5.1: Open Visual Studio</b>
Go to the `Extensions` tab.
Click the `Widget Info` extension to open the extension overview to the right.
Click the `VS Code` icon next to the folder icon:
<br>
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_2/exts/omni.example.ui_scene.widget_info/Workshop/images/vsCodeIcon.PNG?raw=true)
<br>
`VS Code` will pop up separately and look like this:
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_2/exts/omni.example.ui_scene.widget_info/Workshop/images/vsCodeopened.png?raw=true)
<br>
#### <b>Step 5.2: Locate Manipulator Script</b>
Locate the files you need for this session in the left column drop-down menus at:
`exts -> omni.example.ui_scene.widget_info\omni\example\ui_scene\widget_info`
You are working in
`widget_info_manipulator.py`
<br>
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_2/exts/omni.example.ui_scene.widget_info/Workshop/images/fileStructLocation.gif?raw=true)
<br>
### Step 6: Fix the Broken Slider
>#### Step 6.1: Add a New Import
Locate the `imports` at the top of the script.
Add the new import:
```python
from pxr import Gf
```
The imports will now look like this:
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_2/exts/omni.example.ui_scene.widget_info/Workshop/images/newImport.png?raw=true)
<br>
In the following steps, you will use `Graphics Foundation`, or Gf, which is a package of fundamental graphics types and operations.
#### <b>Step 6.2: Find the Function Update_Scale</b>
Locate the following function at the bottom of the script:
```python
# Update the slider
def update_scale(prim_name, value):
```
This function updates the slider in the Widget. However, it currently does not have any logic to update the scale. Let's start adding the code we need to get that working!
#### <b>Step 6.3: Get the Current Stage</b>
Inside of `update_scale` function, find the `print` call.
Define the `stage` variable underneath this call, like so:
```python
stage = self.model.usd_context.get_stage()
```
From the USD context we grab the active stage and store it into the stage variable.
The `Stage` is where your prims are nested in the hierarchy.
So now, `update_scale` should look like this:
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_2/exts/omni.example.ui_scene.widget_info/Workshop/images/GetStage.png?raw=true)
<br>
><span>❗</span> Make sure your new stage variable is lined up with the print call. If it is not, add or delete tabs until it is.
<br>
#### <b>Step 6.4: Get the Selected Prim</b>
Next, add a variable underneath the stage variable for the currently selected prim:
```python
prim = stage.GetPrimAtPath(self.model._current_path)
```
`update_scale` will now look like this:
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_2/exts/omni.example.ui_scene.widget_info/Workshop/images/getPrim.png?raw=true)
><span>❗</span> This prim variable should be lined up with the stage and print call above it.
<br>
#### <b>Step 6.5: Update the Scale </b>
Add a new scale variable on the next line.
In this variable you will get the scale `attribute` of the `xform` and the scale's Vector3 value, like so:
```python
scale = prim.GetAttribute("xformOp:scale")
scale.Set(Gf.Vec3d(value, value, value))
```
Now, your completed `update_scale` function will look like this:
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_2/exts/omni.example.ui_scene.widget_info/Workshop/images/setScale.png?raw=true)
><span>❗</span>The scale variable should be lined up with the variables above it.
<br>
### Step 7: Did it work?
#### <b>Step 7.1: Save and Test! </b>
Save your manipulator script and check that the scale slider works in your widget!
><span>❗</span> When you save, you may notice that the widget disappears in the viewport. This is to be expected, click the prim again to show the widget.
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_2/exts/omni.example.ui_scene.widget_info/Workshop/images/scaleWorking.gif?raw=true)
Your slider is now being udpated by the function `update_scale`, where you added properties that grab the `Stage` and the currently selected `prim` that the widget is displayed on, then calls the scale vector3 when the slider is moved to scale the prim in all directions.
><span>❗</span> Not Working? Check the `Console` to debug any errors.
>
>![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_2/exts/omni.example.ui_scene.widget_info/Workshop/images/Console.png?raw=true)
<br>
>#### :bell:<b>Challenge Step 8: Scale Larger</b>
><i>All Challenges in this workshop are optional</i>
>
>Can you change the function to scale the prim larger than 1.0?
>
><details>
><summary> Click here for the answer </summary>
>
>Set a `value` variable and multiply a number by value.
>
>For example:
>
>```python
> def update_scale(prim_name, value):
> if value <= 0:
> value = 0.01
> print(f"changing scale of {prim_name}, {value}")
> ## NEW VALUE VARIABLE ADDED BELOW
> value = 10*value
> stage = self.model.usd_context.get_stage()
> prim = stage.GetPrimAtPath(self.model._current_path)
> scale = prim.GetAttribute("xformOp:scale")
> scale.Set(Gf.Vec3d(value, value, value))
> if self._slider_model:
> self._slider_subscription = None
> self._slider_model.as_float = 1.0
> self._slider_subscription = self._slider_model.subscribe_value_changed_fn(
> lambda m, p=self.model.get_item("name"): update_scale(p, m.as_float)
> )
>```
>
></details>
<br>
>#### <span>🧠</span><b>Challenge Step 9: What other properties might you want to control with the widget?</b>
><i>All Challenges in this workshop are optional</i>
>
> Brainstorm 3-5 other properties that you could add to this widget. We will have an open discussion later on.
<br>
>### <span>⛔</span> Stop here and wait to move on to Section III
<br>
## Section III:
<video width="560" height="315" controls>
<source src="https://d36m44n9vdbmda.cloudfront.net/assets/x-ov-06-v1/sceneManipulator3.mp4" type="video/mp4">
</video>
### Step 10: Create your scene
#### <b>Step 10.1: Scale Everything!</b>
Play around in your scene and scale the prims in various sizes, such as a very large marble or a tiny jar.
How can you make your scene unique?
><span>⭐</span> Press the `Play` button when you are finished!
>
>![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_2/exts/omni.example.ui_scene.widget_info/Workshop/images/playButton.png?raw=true)
<br>
![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_2/exts/omni.example.ui_scene.widget_info/Workshop/images/section3.gif?raw=true)
<br>
>#### <span>🧠</span><b>Challenge Step 11: Scale in One Axis</b>
><i>All Challenges in this workshop are optional</i>
>
>Can you change the function to scale the prim in only one axis?
>
><details>
><summary> Click here for the answer </summary>
>
>Change the value's to 1 in `scale.Set(Gf.Vec3d(value,value,value))` of the axes that you do not want to scale in.
>
>For example:
>
>```python
>scale.Set(Gf.Vec3d(value,1,1))
>```
>
>Which, would change the scale in the X axis as the Y and Z axis will remain at a value of 1 and the X axis will change.
>
></details>
<br>
>#### <span>🧠</span><b>Challenge Step 12: Turn on the Light Manipulator</b>
><i>All Challenges in this workshop are optional</i>
>
>Turn on the Light Manipulator Extension and click on the Rect Light.
>
>How can you change the intensity of the light using the tool?
>
><details>
><summary>Click here for the answer</summary>
>
>In the `Extensions` tab, search for Light in the `Community / Third Party` and install/enable the `Omni.Ui Scene Sample for Manipulating Select Light` extension.
>
>![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_2/exts/omni.example.ui_scene.widget_info/Workshop/images/LightExt.png?raw=true)
>
><br>
>
>Select one of the Rect Lights in the hierarchy.
>
>![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_2/exts/omni.example.ui_scene.widget_info/Workshop/images/RectLight.png?raw=true)
>
><br>
>
>Use your cursor to grab the edges of the Light tool and change the intensity by dragging forward or backward.
>
>![](https://github.com/NVIDIA-Omniverse/kit-workshop-siggraph2022/blob/workshop_2/exts/omni.example.ui_scene.widget_info/Workshop/images/RectIntensity.png?raw=true)
>
></details>
<br>
## Congratulations!
You have completed this workshop! We hope you have enjoyed learning and playing with Omniverse!
[Join us on Discord to extend the conversation!](https://discord.com/invite/nvidiaomniverse)
| 13,812 | Markdown | 33.5325 | 295 | 0.72437 |
NVIDIA-Omniverse/deep-dive-into-microservices/README.md | # Companion Code to *A Deep Dive into Building Microservices with Omniverse*
This companion code project contains the resources developed during the [*Deep Dive into Building Microservices with Omniverse*](https://www.nvidia.com/en-us/on-demand/session/gtcfall21-a31204/) session presented during GTC November 2021.
While there is no better way to learn that to jump in an try writing your own microservice from the sample template for Kit extensions, we also know that having an example to get started can be a valuable resource. Our hope is that these sample extensions, along with the background information of the GTC session, will fast-forward you on your development journey in Omniverse.
[![Deep Dive into Building Microservices with Omniverse GTC 2021](./docs/deep-dive-into-microservices-with-omniverse-session-poster.jpg)](https://www.nvidia.com/en-us/on-demand/session/gtcfall21-a31204/)
Let us know about what this inspired you to create! [We'd love to hear from you!](https://forums.developer.nvidia.com/c/omniverse/showcase/362)
## About
This project contains the 2 extensions created during the conversation on microservices, which serve as a demonstration of the flexibility of this architecture.
Using the example of a 3D asset pipeline, these extensions illustrate the scalability and reusability of loosely coupled components by:
1. **Creating a microservice to validate conformity of a USD scene.** This illustrates how a studio may scale and distribute automated tasks, allowing creators to focus on doing what they excel at, and relieve them from manual tasks.
2. **Exposing a microservice to convert 3D assets from one format to another.** Independently from the previous microservice, this service can be configured to be as broad or as narrow as necessary to adapt to the needs a studio may have. Separate teams could even reuse and deploy the microservice to serve their unique needs: while artists may need UI integrated into their favorite content creation tools, pipeline developers may also automate the batch import of content shared by partners.
3. **Assembling both services together and recording the status of conversion tasks.** By storing the result of conversion tasks in a database, one can imagine evolving this simple pipeline into an automated system exposing a dashboard where artists could monitor the status of their conversions tasks processing in the background, or adding features such as notifications when a task is completed.
## Configuration
To get started:
1. Download a copy of this repository to your machine.
2. From the [Omniverse Launcher](https://www.nvidia.com/en-us/omniverse), download *Create* and *Farm Queue*.
To load this project in *Create* (or any *Omniverse Kit*-based application such as *View*, *Machinima* or *Isaac Sim*), add a link from this repository to the application using the provided `link_app` script:
**On Windows:**
```batch
link_app.bat C:/Users/<username>/AppData/Local/ov/pkg/create-2021.3.7
```
**On Linux:**
```bash
./link_app.sh ~/.local/share/ov/pkg/create-2021.3.7
```
If the operation completed successfully, an *./app* folder should appear, linking the root of this repository to the install location of the Kit-based application from the Omniverse Launcher.
## Executing the sample
Once configured, this sample project can be executed by launching an instance of Create or any Kit-based application, and submitting a task to the endpoint exposed by the `omni.service.assets.convert` service.
**On Windows:**
```batch
REM Launch Create, with the extension enabled:
app/omni.create.bat ^
--ext-folder C:\Users\<username>\AppData\Local\ov\pkg\farm-queue-102.1.0\exts-farm-queue ^
--ext-folder ./exts ^
--enable omni.services.assets.convert
```
**On Linux:**
```shell
# Launch Create, with the extension enabled:
./app/omni.create.sh \
--ext-folder ~/.local/share/ov/pkg/farm-queue-102.1.0/exts-farm-queue \
--ext-folder ./exts \
--enable omni.services.assets.convert
```
Once the application is launched, a conversion task can be submitted to the service by using the web interface listing all the exposed microservices. This interactive interface is exposed at the following location:
* For *Kit*: http://localhost:8011/docs
* For *Create*: http://localhost:8111/docs
* For *Isaac Sim*: http://localhost:8211/docs
## Additional resources
* [Presentation slides](https://drive.google.com/file/d/1lThTzjQqnGOgVE6GddwKhsl_f4f1vVsm/view?usp=sharing)
* [NVIDIA Omniverse Platform](https://developer.nvidia.com/nvidia-omniverse-platform)
* [NVIDIA Omniverse Developer Resource Center](https://developer.nvidia.com/nvidia-omniverse-developer-resource-center)
* *Getting started* documentation:
* [NVIDIA Omniverse Microservices](https://docs.omniverse.nvidia.com/services)
* [NVIDIA Omniverse Farm](https://docs.omniverse.nvidia.com/farm)
| 4,887 | Markdown | 64.173332 | 495 | 0.777369 |
NVIDIA-Omniverse/deep-dive-into-microservices/tools/scripts/link_app.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import argparse
import os
import packmanapi
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create folder link to Kit App installed from Omniverse Launcher.")
parser.add_argument(
"path",
help="Path to Kit App installed from Omniverse Launcher, e.g.: 'C:/Users/bob/AppData/Local/ov/pkg/create-2021.3.4'",
)
args = parser.parse_args()
if not os.path.exists(args.path):
print(f"Provided path doesn't exist: \"{args.path}\"")
else:
SCRIPT_ROOT = os.path.dirname(os.path.realpath(__file__))
packmanapi.link(f"{SCRIPT_ROOT}/../../app", args.path)
| 1,062 | Python | 36.964284 | 124 | 0.711864 |
NVIDIA-Omniverse/deep-dive-into-microservices/exts/omni.services.assets.convert/config/extension.toml | [package]
version = "1.0.0"
title = "Asset conversion service"
description = "A simple demonstration of an asset conversion microservice."
authors = ["Omniverse Kit Team"]
preview_image = "data/preview_image.png"
readme = "docs/README.md"
changelog = "docs/CHANGELOG.md"
repository = ""
category = "Example"
keywords = ["kit", "service", "asset", "conversion", "example"]
[dependencies]
"omni.client" = {}
"omni.kit.asset_converter" = {}
"omni.kit.pip_archive" = {}
"omni.services.assets.validate" = {}
"omni.services.core" = {}
"omni.services.facilities.database.manager" = {}
# The main Python module this extension provides, it will be publicly available as
# "import omni.services.assets.convert":
[[python.module]]
name = "omni.services.assets.convert"
[settings.exts."omni.services.assets.convert"]
# URL prefix where the conversion service will be mounted:
url_prefix = "/assets"
# Database settings, using an SQLite database for demonstration purposes:
[settings.exts."omni.services.assets.convert".dbs.asset-conversions]
connection_string = "sqlite:///${data}/asset-conversions.db"
| 1,095 | TOML | 32.21212 | 82 | 0.73516 |
NVIDIA-Omniverse/deep-dive-into-microservices/exts/omni.services.assets.convert/omni/services/assets/convert/extension.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import asyncio
import carb
import omni.ext
from omni.services.core import main
from omni.services.facilities.database.manager import DatabaseManagerFacility
from .services import router
class AssetConversionServiceExtension(omni.ext.IExt):
"""Asset conversion extension."""
def on_startup(self, ext_id) -> None:
ext_name = ext_id.split("-")[0]
url_prefix = carb.settings.get_settings_interface().get(f"exts/{ext_name}/url_prefix")
# Setup the database facility:
self._database_facility = DatabaseManagerFacility(ext_name=ext_name)
self._db_ready = asyncio.ensure_future(self._initialize_db())
# Register the database facility with the router, so it can be used by service endpoints:
router.register_facility("db_manager", self._database_facility)
main.register_router(router=router, prefix=url_prefix, tags=["Assets"])
main.get_app().title = "Omniverse Farm"
main.get_app().description = "A microservice-based framework for distributed task execution."
tags_metadata = {
"name": "Assets",
"description": "Manage assets submitted to the Queue."
}
if not main.get_app().openapi_tags:
main.get_app().openapi_tags = []
main.get_app().openapi_tags.append(tags_metadata)
def on_shutdown(self) -> None:
if self._db_ready:
self._db_ready.cancel()
self._db_ready = None
main.deregister_router(router=router)
async def _initialize_db(self) -> None:
"""Initialize the database to be used to store asset conversion results."""
async with self._database_facility.get("asset-conversions") as db:
table_columns = [
"id INTEGER PRIMARY KEY AUTOINCREMENT",
"source_asset VARCHAR(256) NOT NULL",
"destination_asset VARCHAR(256) NOT NULL",
"success BOOLEAN NOT NULL",
]
await db.execute(query=f"CREATE TABLE IF NOT EXISTS AssetConversions ({', '.join(table_columns)});")
| 2,511 | Python | 39.516128 | 112 | 0.66826 |
NVIDIA-Omniverse/deep-dive-into-microservices/exts/omni.services.assets.convert/omni/services/assets/convert/services/convert.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from typing import Dict
from pydantic import BaseModel, Field
from omni.services.client import AsyncClient
from omni.services.core import routers
import omni.usd
router = routers.ServiceAPIRouter()
class ConversionRequestModel(BaseModel):
"""Model describing the request to convert a given asset to a different format."""
import_path: str = Field(
...,
title="Path of the source asset to be converted",
description="Location where the asset to convert can be located by an Agent.",
)
output_path: str = Field(
...,
title="Output path where to store the converted asset",
description="Location where to place the converted asset.",
)
converter_settings: Dict = Field(
{},
title="Converter settings",
description="Settings to provide to the Kit Asset Converter extension in order to perform the conversion.",
)
class ConversionResponseModel(BaseModel):
"""Model describing the response to the request to convert a given USD asset."""
status: str = Field(
...,
title="Conversion status",
description="Status of the conversion of the given asset.",
)
@router.post(
path="/convert",
summary="Convert assets to a different format",
description="Convert the given asset into a different format.",
response_model=ConversionResponseModel,
)
@router.post("/convert")
async def run(
req: ConversionRequestModel,
db_manager=router.get_facility("db_manager"),
) -> ConversionResponseModel:
# Convert the given asset:
task = omni.kit.asset_converter.get_instance().create_converter_task(
import_path=req.import_path,
output_path=req.output_path,
progress_callback=lambda current, total: print(f"Conversion progress: {current/total*100.0}%"),
asset_converter_context=req.converter_settings,)
success = await task.wait_until_finished()
if not success:
detailed_status_code = task.get_status()
detailed_status_error_string = task.get_detailed_error()
raise Exception(f"Failed to convert \"{req.import_path}\". Error: {detailed_status_code}, {detailed_status_error_string}")
# Execute the validation service exposed by the "omni.service.assets.validate" extension:
client = AsyncClient("local://")
validation_result = await client.assets.validate(
scene_path=req.import_path,
expected_camera_count=5,
)
# Record the result of the validation in the database:
query = """
INSERT INTO AssetConversions (source_asset, destination_asset, success)
VALUES (:source_asset, :destination_asset, :success)
"""
values = {
"source_asset": req.import_path,
"destination_asset": req.output_path,
"success": 1 if validation_result["success"] else 0,
}
async with db_manager.get("asset-conversions") as db:
await db.execute(query=query, values=values)
return ConversionResponseModel(status="finished")
| 3,449 | Python | 35.315789 | 130 | 0.696724 |
NVIDIA-Omniverse/deep-dive-into-microservices/exts/omni.services.assets.convert/docs/CHANGELOG.md | # Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [1.0.0] - 2021-11-10
### Added
- Initial release for GTC November 2021.
| 328 | Markdown | 31.899997 | 168 | 0.728659 |
NVIDIA-Omniverse/deep-dive-into-microservices/exts/omni.services.assets.convert/docs/README.md | # Asset Conversion Service [omni.services.assets.convert]
## About
A simple extension demonstrating writing a microservice to convert assets using Omniverse Kit-based applications.
## Usage
Once enabled, the extension will expose a `/assets/convert` service endpoint, which can be explored from the list of available microservice endpoints exposed by the application:
* For *Kit*: http://localhost:8011/docs
* For *Create*: http://localhost:8111/docs
* For *Isaac Sim*: http://localhost:8211/docs
## Running the extension
To enable and execute the extension, from the root of the repository:
**On Windows:**
```batch
REM Link the extension against a Kit-based application from the Launcher:
link_app.bat C:/Users/<username>/AppData/Local/ov/pkg/create-2021.3.7
REM Launch Create, with the extension enabled:
app/omni.create.bat ^
--ext-folder C:\Users\<username>\AppData\Local\ov\pkg\farm-queue-102.1.0\exts-farm-queue ^
--ext-folder ./exts ^
--enable omni.services.assets.convert
```
**On Linux:**
```shell
# Link the extension against a Kit-based application from the Launcher:
./link_app.sh ~/.local/share/ov/pkg/create-2021.3.7
# Launch Create, with the extension enabled:
./app/omni.create.sh \
--ext-folder ~/.local/share/ov/pkg/farm-queue-102.1.0/exts-farm-queue \
--ext-folder ./exts \
--enable omni.services.assets.convert
```
To launch this small demo pipeline, all that remains is integrating some UI components to let Users submit tasks to the service, or start one from the command-line:
```shell
curl -X 'POST' \
'http://localhost:8011/assets/convert' \
-H 'Accept: application/json' \
-H 'Content-Type: application/json' \
-d '{
"import_path": "/full/path/to/source_content.usd",
"output_path": "/full/path/to/destination_content.obj",
"converter_settings": {}
}'
```
| 1,836 | Markdown | 32.399999 | 177 | 0.724401 |
NVIDIA-Omniverse/deep-dive-into-microservices/exts/omni.services.assets.validate/config/extension.toml | [package]
version = "1.0.0"
title = "Asset validation service"
description = "A simple demonstration of an asset validation microservice."
authors = ["Omniverse Kit Team"]
preview_image = "data/preview_image.png"
readme = "docs/README.md"
changelog = "docs/CHANGELOG.md"
repository = ""
category = "Example"
keywords = ["kit", "service", "asset", "validation", "example"]
[dependencies]
"omni.services.core" = {}
"omni.usd" = {}
# The main Python module this extension provides, it will be publicly available as
# "import omni.services.assets.validate":
[[python.module]]
name = "omni.services.assets.validate"
[settings.exts."omni.services.assets.validate"]
# URL prefix where the validation service will be mounted:
url_prefix = "/assets"
| 744 | TOML | 28.799999 | 82 | 0.729839 |
NVIDIA-Omniverse/deep-dive-into-microservices/exts/omni.services.assets.validate/omni/services/assets/validate/extension.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import carb
import omni.ext
from omni.services.core import main
from .services import router
class AssetValidationServiceExtension(omni.ext.IExt):
"""Asset validation extension."""
def on_startup(self, ext_id) -> None:
ext_name = ext_id.split("-")[0]
url_prefix = carb.settings.get_settings_interface().get(f"exts/{ext_name}/url_prefix")
main.register_router(router=router, prefix=url_prefix, tags=["Assets"])
main.get_app().title = "Omniverse Farm"
main.get_app().description = "A microservice-based framework for distributed task execution."
tags_metadata = {
"name": "Assets",
"description": "Manage assets submitted to the Queue."
}
if not main.get_app().openapi_tags:
main.get_app().openapi_tags = []
main.get_app().openapi_tags.append(tags_metadata)
def on_shutdown(self) -> None:
main.deregister_router(router=router)
| 1,388 | Python | 35.552631 | 101 | 0.693804 |
NVIDIA-Omniverse/deep-dive-into-microservices/exts/omni.services.assets.validate/omni/services/assets/validate/services/validate.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import asyncio
from typing import List, Optional
from pxr import UsdGeom
from pydantic import BaseModel, Field
import carb
from omni.services.core import routers
import omni.usd
router = routers.ServiceAPIRouter()
class ValidationRequestModel(BaseModel):
"""Model describing the request to validate a given USD stage."""
scene_path: str = Field(
...,
title="USD scene path",
description="Full path to the USD scene to validate, hosted on a location accessible to the Agent.",
)
expected_camera_count: int = Field(
...,
title="Expected number of cameras",
description="Expected number of cameras to find in the scene.",
)
class ValidationResponsetModel(BaseModel):
"""Model describing the response to the request to validate a given USD stage."""
success: bool = Field(
...,
title="Success",
description="Flag indicating if the validation was successful.",
)
actual_camera_count: int = Field(
...,
title="Number of cameras found",
description="Actual number of cameras found in the scene.",
)
async def load_usd_stage(usd_file: str, stage_load_timeout: Optional[float] = None) -> bool:
"""
Load the given USD stage into the Kit runtime.
Args:
usd_file (str): Location of the stage to open.
stage_load_timeout (Optional[float]): Maximum duration for which to wait before considering a loading timeout.
Returns:
bool: A flag indicating whether or not the given USD stage was successfully loaded.
"""
success, error = await omni.usd.get_context().open_stage_async(usd_file)
if not success:
carb.log_error(f"Unable to open \"{usd_file}\": {str(error)}")
raise Exception(f"Unable to open \"{usd_file}\".")
carb.log_info("Stage opened. Waiting for \"ASSETS_LOADED\" event.")
usd_context = omni.usd.get_context()
if usd_context.get_stage_state() != omni.usd.StageState.OPENED:
while True:
try:
event, _ = await asyncio.wait_for(usd_context.next_stage_event_async(), timeout=stage_load_timeout)
if event == int(omni.usd.StageEventType.ASSETS_LOADED):
carb.log_info(f"Assets for \"{usd_file}\" loaded")
return True
except asyncio.TimeoutError:
_, files_loaded, total_files = usd_context.get_stage_loading_status()
if files_loaded == total_files:
carb.log_warn("Timed out waiting for \"ASSETS_LOADED\" event but all files seem to have loaded.")
return False
raise Exception(f"Timed out waiting for \"ASSETS_LOADED\" event for \"{usd_file}\". Aborting.")
def get_all_stage_cameras() -> List[UsdGeom.Camera]:
"""
Return the list of all USD cameras found the current USD stage.
Args:
None
Returns:
List[UsdGeom.Camera]: The list of all USD cameras found in the current USD stage.
"""
cameras: List[UsdGeom.Camera] = []
stage = omni.usd.get_context().get_stage()
for prim in stage.TraverseAll():
if prim.IsA(UsdGeom.Camera):
cameras.append(UsdGeom.Camera(prim))
return cameras
@router.post(
path="/validate",
summary="Validate assets for conformity",
description="Validate that the USD Stage at the given location conforms to pre-determined validation rules.",
response_model=ValidationResponsetModel,
)
async def run(req: ValidationRequestModel) -> ValidationResponsetModel:
# Load the USD stage:
await load_usd_stage(usd_file=req.scene_path)
# Perform the validation.
#
# NOTE: For demonstration purposes, we are only considering the number of cameras present in the given USD scene to
# demonstrate integration with tools and workflows.
stage_cameras = get_all_stage_cameras()
camera_count = len(stage_cameras)
validation_success = camera_count == req.expected_camera_count
# Return the validation results:
return ValidationResponsetModel(
success=validation_success,
actual_camera_count=camera_count,
)
| 4,606 | Python | 34.167939 | 119 | 0.668693 |
NVIDIA-Omniverse/deep-dive-into-microservices/exts/omni.services.assets.validate/docs/README.md | # Asset Validation Service [omni.services.assets.validate]
## About
A simple extension demonstrating writing a microservice to validate assets using Omniverse Kit-based applications.
## Usage
Once enabled, the extension will expose a `/assets/validate` service endpoint, which can be explored from the list of available microservice endpoints exposed by the application:
* For *Kit*: http://localhost:8011/docs
* For *Create*: http://localhost:8111/docs
* For *Isaac Sim*: http://localhost:8211/docs
## Running the extension
To enable and execute the extension, from the root of the repository:
**On Windows:**
```batch
REM Link the extension against a Kit-based application from the Launcher:
link_app.bat C:/Users/<username>/AppData/Local/ov/pkg/create-2021.3.7
REM Launch Create, with the extension enabled:
app/omni.create.bat ^
--ext-folder ./exts ^
--enable omni.services.assets.validate
```
**On Linux:**
```shell
# Link the extension against a Kit-based application from the Launcher:
./link_app.sh ~/.local/share/ov/pkg/create-2021.3.7
# Launch Create, with the extension enabled:
./app/omni.create.sh \
--ext-folder ./exts \
--enable omni.services.assets.validate
```
| 1,208 | Markdown | 29.224999 | 178 | 0.741722 |
NVIDIA-Omniverse/kit-extension-sample-csv-reader/README.md | # CSV Reader Extension Sample
## [CSV Reader (omni.csv.reader)](exts/omni.csv.reader)
![CSV Reader UI and Result](exts/omni.csv.reader/data/OV_CSVReader_WhatToExpect.png)
### About
This sample extension presents, as a CSV reader extension, how to read a csv file, to populate a 3D scene with prims at X, Y, Z coordinates given by the CSV file data, as well as color(s). Generated prims rely on USD referencing.
### [README](exts/omni.csv.reader)
See the [README for this extension](exts/omni.csv.reader) to learn more about it including how to use it.
### [Tutorial](tutorial/tutorial.md)
Follow a [step-by-step tutorial](tutorial/tutorial.md) that walks you through the creation of `generate()` to open, read and populate the 3D scene, grouping objects by cluster or not, and with different colors.
## Adding This Extension
To add this extension to your Omniverse app:
1. Go into: Extension Manager -> Gear Icon -> Extension Search Path
2. Add this as a search path: `git://github.com/NVIDIA-Omniverse/kit-extension-sample-csv-reader?branch=main&dir=exts`
## Linking with an Omniverse app
For a better developer experience, it is recommended to create a folder link named `app` to the *Omniverse Kit* app installed from *Omniverse Launcher*. A convenience script to use is included.
Run:
```bash
> link_app.bat
```
There is also an analogous `link_app.sh` for Linux. If successful you should see `app` folder link in the root of this repo.
If multiple Omniverse apps is installed script will select recommended one. Or you can explicitly pass an app:
```bash
> link_app.bat --app code
```
You can also just pass a path to create link to:
```bash
> link_app.bat --path "C:/Users/bob/AppData/Local/ov/pkg/create-2022.1.3"
```
## Contributing
The source code for this repository is provided as-is and we are not accepting outside contributions.
| 1,863 | Markdown | 35.549019 | 229 | 0.749866 |
NVIDIA-Omniverse/kit-extension-sample-csv-reader/tutorial/tutorial.md | ![](https://github.com/NVIDIA-Omniverse/kit-extension-sample-csv-reader/raw/main/tutorial/images/logo.png)
# Create a CVS Reader Omniverse Kit Extension
**CSV** File, or **C**omma **S**eparated **V**alues, is the simplest form for storing data/information separated by commas. You can learn more about them in this [Wikipedia article](https://en.wikipedia.org/wiki/Comma-separated_values).
CSV files are commonly used to exchange data of various type and are broadly used. For example, you code CSV data for:
- the position of radio antennas and their types spread across one town/region
- the position of hotels in Paris and their grade
In this case the CSV file contains X, Y, Z information about the position of
some elements to be placed in a 3D environment, as well as a cluster column (representing some extra info), that will be used to color the elements by group.
## Learning Objectives
In this guide, you learn how to:
- Open a CSV file and read it
- Place a prim at an X, Y, Z position given by the CSV File
- Create USD references for the prims
- Color the prims based on data retrieved from the CSV file
<p align="center">
<img width=75% src="images/OV_CSVReader_WhatToExpect.png">
<p>
## Prerequisites
- Omniverse Code 2022.1 or above
- [Omniverse compatible GPU](https://docs.omniverse.nvidia.com/app_view/common/technical-requirements.html)
- Working knowledge of Python
- Working knowledge of USD in particular the notion of references
- [PIXAR USD Tutorial referencing](https://graphics.pixar.com/usd/release/tut_referencing_layers.html)
- [NVIDIA Developer page](https://developer.nvidia.com/usd/tutorials)
- [NVIDIA DLI Course](https://courses.nvidia.com/courses/course-v1:DLI+S-FX-02+V1/)
- [CSV](https://en.wikipedia.org/wiki/Comma-separated_values)
## Step 1: Download the Starter Project
In this section, you download and familiarize yourself with the starter project you use throughout this tutorial.
To get the starting code for this hands-on lab, please clone the `tutorial-start` branch of `kit-extension-sample-csv-reader` [github repository](https://github.com/NVIDIA-Omniverse/kit-extension-sample-csv-reader/tree/tutorial-start).
```shell
git clone -b tutorial-start https://github.com/NVIDIA-Omniverse/kit-extension-sample-csv-reader.git
```
This repository contains the assets you use in this tutorial.
### Step 1.1: Load the Extension
In the _Extensions_ tab, click on the **gear**. Next, in the **extension search path**, add the path to the `exts` sub-folder where you cloned the git repository. Then, search for **CSV** in the _Extensions_ tab, and enable the extension by clicking on its toggle button.
<p align="center">
<img width="75%" src="images/LoadExt.png">
<p>
To learn more about the other files in the repository, please check the [Build an Omniverse Extension in less than 10 Minutes](https://www.nvidia.com/en-us/on-demand/session/omniverse2020-om1483/), which explains how to create on extension.
### Step 1.2: Open `models.py`
This tutorial will focus on the `models.py` file found in the `exts/omni.csv.reader/omni/csv/reader/` directory, and in particular, on `generate()`. The starting point of `generate()` is included below for your reference:
```python
def generate(self):
# Clear the stage
# create a new stage with Y up and in meters
# set the up axis
# set the unit of the world
# add a light
# check that CSV exists
# Read CSV file
# Iterate over each row in the CSV file
# Skip the header row
# Don't read more than the max number of elements
# Create the shape with the appropriate color at each coordinate
# root prim
# add group to path if the user has selected that option
# create the prim if it does not exist
# Create prim to add the reference to.
# Add the reference
# Get mesh from shape instance
# Set location
# Set Color
pass
```
> 📝 **Note:** CSV Sample Files are provided within the _data_ folder of this extension
## Step 2: Prepare the Stage
This section demonstrates how to prepare a stage for shapes to be imported from a CSV file.
### Step 2.1: Clear the Stage
The first step is to clear the stage in order to remove any data from previous runs of this tool. This is done with the following code:
```python
def generate(self):
# Clear the stage
stage = omni.usd.get_context().get_stage()
root_prim = stage.GetPrimAtPath(self.root_path)
if (root_prim.IsValid()):
stage.RemovePrim(self.root_path)
```
The first statement gets the current stage. The second statement gets te prim path to the root prim, and if that prim is valid it is cleared.
### Step 2.2: Create a New Stage
Next, a new stage is created with the following statements:
```python
# create a new stage with Y up and in meters
if omni.usd.get_context().new_stage() is False:
carb.log_warn(f"Failed creating a new stage.")
return
stage = omni.usd.get_context().get_stage()
```
Here, a new stage is created. If that fails a warning is printed to the console and `generate()` returns early. Otherwise, the new stage is used going forward.
### Step 2.3: Set Stage Parameters
Then, the parameters for the stage are set with the statements below:
```python
# set the up axis
UsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.y)
# set the unit of the world
UsdGeom.SetStageMetersPerUnit(stage, self.stage_unit_per_meter)
stage.SetDefaultPrim(root_prim)
```
In these statements, the `y` axis is set to up, the stage units are set to meters, the root prim is set as the default prim. These steps are all necessary so that when you import shapes from a CSV file they have the up-direction you expect, are the correct size, and are added to the correct location within the stage tree.
### Step 2.4: Add a light
Finally, a light is added so that the shapes are visible once imported:
```python
# add a light
light_prim_path = self.root_path + '/DistantLight'
light_prim = UsdLux.DistantLight.Define(stage, light_prim_path)
light_prim.CreateAngleAttr(0.53)
light_prim.CreateColorAttr(Gf.Vec3f(1.0, 1.0, 0.745))
light_prim.CreateIntensityAttr(5000.0)
```
## Step 3: CSV file
This section demonstrates how to open and read from a CSV file.
### Step 3.1: CSV File Format
CSV Files are a common file format used by data scientists to store data. Two sample CSV files are shown below:
<p align="center">
<img width="75%" src="images/CSV_Sample_both.png">
<p>
the common format for CSV files contains a header in the first line with names for the different fields and any number of following lines which contain values for each column. Each row represents one element in the list.
The rest of this section will outline how to open and read the data from a CSV file.
### Step 3.2: Check that the File Exists
It is good practice to check that a file exists before trying to open it as shown below:
```python
# check that CSV exists
if os.path.exists(self.csv_file_path):
```
If the file exists, then continue. If not, gracefully exit the routine and preferably notify the user that the file does not exist.
### Step 3.3: Read the CSV file
To open and read a CSV file, use Python’s built-in [**_csv_**](https://docs.python.org/3/library/csv.html) module as demonstrated in the following snippet:
```python
# Read CSV file
with open(self.csv_file_path, newline='') as csvfile:
csv_reader = csv.reader(csvfile, delimiter=',')
i = 1
```
Here the file is opened with the `open` statement and then then `csv.reader` reads the file's contents into a list. The iterator, `i`, will be used later to name each shape.
### Step 3.4: Process the CSV file
Each line of the CSV is processed using the following code block:
```python
# Iterate over each row in the CSV file
# Skip the header row
# Don't read more than the max number of elements
# Create the shape with the appropriate color at each coordinate
for row in itertools.islice(csv_reader, 1, self.max_elements):
name = row[0]
x = float(row[1])
y = float(row[2])
z = float(row[3])
cluster = row[4]
```
In the first statement, the `itertools` module is used to process only the correct rows. `islice()` will take rows from `csv_reader` starting at the index 1 (this skips the header) and until the end of the list or `self.max_elements`, whichever comes first.
The next few statements retrieve the name, coordinates, and cluster id from the given row.
If you would like to print out information as it runs in order to debug the code, you could add the following code:
```python
carb.log_info(f"X: {x} Y: {y} Z: {z}")
```
This would print the coordinates from each row to the console. Remove those lines after validating that reading was successful - no need to keep that kind of debugging in the final code.
## Step 4: Create each shape
This section will go through the creation of each shape at the correct location in the correct color.
### Step 4.1: Determine the Prim Path
The prim path is determined using the following code:
```python
# root prim
cluster_prim_path = self.root_path
# add group to path if the user has selected that option
if self.group_by_cluster:
cluster_prim_path += self.cluster_layer_root_path + cluster
cluster_prim = stage.GetPrimAtPath(cluster_prim_path)
# create the prim if it does not exist
if not cluster_prim.IsValid():
UsdGeom.Xform.Define(stage, cluster_prim_path)
shape_prim_path = cluster_prim_path + '/box_%d' % i
i += 1
```
First, all prims share the same root so the path of each shape prim is create using the root prim's path. Second, if the user has selected to have the prims grouped, a group is appended to the path. Next, if that cluster does not exist yet it is created. Finally, the name of the individual prim is appended to the end of the path and the iterator is incremented.
In the code above, prims are grouped if the user has selected the grouping option. Imagine that the `cluster` refers to the type of object (ie. `cluster 6` refers to `street lights` and `cluster 29` to mail boxes). In that situation grouping can be very useful because instead of selecting each `street light` one by one in the stage scene, their group can be selected instead. This would let a user easily hide/show the entire group or edit the group in some other way.
<p align="center">
<img width="75%" src="images/TheMagicEye.png">
<p>
### Step 4.2: Create a Reference
When working with USD scene composition, using a _reference_ helps refer to the same "asset" multiple times. You can read more References in the [USD Glossary](https://graphics.pixar.com/usd/docs/USD-Glossary.html#USDGlossary-References).
Here, instead of creating one prim per line in the CSV, a single prim is created and then a reference to that shape is made for each line in the CSV. This has several benefits:
1. If the referred shape is changed, all elements would also change.
2. If saved, the output file will be smaller
This is done with the following code:
```python
# Create prim to add the reference to.
ref_shape = stage.OverridePrim(shape_prim_path)
# Add the reference
ref_shape.GetReferences().AddReference(str(self.shape_file_path), '/MyRef/RefMesh')
```
Here the reference is created and then used.
### Step 4.3: Set the Position of the Prim
Next, the position of the prim is set as follows:
```python
# Get mesh from shape instance
next_shape = UsdGeom.Mesh.Get(stage, shape_prim_path)
# Set location
next_shape.AddTranslateOp().Set(
Gf.Vec3f(
self.scale_factor*x,
self.scale_factor*y,
self.scale_factor*z))
```
In the first statement, you get a `UsdGeom.Mesh` representation of the prim and assign it to the `next_shape` variable. In the next statement, it is transformed according to the data read from the CSV file. Note that each is scaled by a constant value. This is simply because the shapes are large relative to the values of in the CSV file and so the translations are scaled up until the shapes are separated by a reasonable amount of space.
### Step 4.4: Color the Shapes
Finally, the shapes are colored with this code:
```python
# Set Color
next_shape.GetDisplayColorAttr().Set(
category_colors[int(cluster) % self.max_num_clusters])
```
Here, the color display attribute is set on each prim according to its cluster attribute read from the CSV file.
## Step 5: Conclusions
The final result should match the block below:
```python
def generate(self):
# Clear the stage
stage = omni.usd.get_context().get_stage()
root_prim = stage.GetPrimAtPath(self.root_path)
if (root_prim.IsValid()):
stage.RemovePrim(self.root_path)
# create a new stage with Y up and in meters
if omni.usd.get_context().new_stage() is False:
carb.log_warn(f"Failed creating a new stage.")
return
stage = omni.usd.get_context().get_stage()
# set the up axis
UsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.y)
# set the unit of the world
UsdGeom.SetStageMetersPerUnit(stage, self.stage_unit_per_meter)
stage.SetDefaultPrim(root_prim)
# add a light
light_prim_path = self.root_path + '/DistantLight'
light_prim = UsdLux.DistantLight.Define(stage, light_prim_path)
light_prim.CreateAngleAttr(0.53)
light_prim.CreateColorAttr(Gf.Vec3f(1.0, 1.0, 0.745))
light_prim.CreateIntensityAttr(5000.0)
# check that CSV exists
if os.path.exists(self.csv_file_path):
# Read CSV file
with open(self.csv_file_path, newline='') as csvfile:
csv_reader = csv.reader(csvfile, delimiter=',')
i = 1
# Iterate over each row in the CSV file
# Skip the header row
# Don't read more than the max number of elements
# Create the shape with the appropriate color at each coordinate
for row in itertools.islice(csv_reader, 1, self.max_elements):
name = row[0]
x = float(row[1])
y = float(row[2])
z = float(row[3])
cluster = row[4]
# root prim
cluster_prim_path = self.root_path
# add group to path if the user has selected that option
if self.group_by_cluster:
cluster_prim_path += self.cluster_layer_root_path + cluster
cluster_prim = stage.GetPrimAtPath(cluster_prim_path)
# create the prim if it does not exist
if not cluster_prim.IsValid():
UsdGeom.Xform.Define(stage, cluster_prim_path)
shape_prim_path = cluster_prim_path + '/box_%d' % i
i += 1
# Create prim to add the reference to.
ref_shape = stage.OverridePrim(shape_prim_path)
# Add the reference
ref_shape.GetReferences().AddReference(str(self.shape_file_path), '/MyRef/RefMesh')
# Get mesh from shape instance
next_shape = UsdGeom.Mesh.Get(stage, shape_prim_path)
# Set location
next_shape.AddTranslateOp().Set(
Gf.Vec3f(
self.scale_factor*x,
self.scale_factor*y,
self.scale_factor*z))
# Set Color
next_shape.GetDisplayColorAttr().Set(
category_colors[int(cluster) % self.max_num_clusters])
```
This tutorial has demonstrated how to read a CSV file and use its data to place shape prims in a scene. Rather than place many unique shapes, the tutorial used references to place copies of the same shape. The shapes were located and colored based on data in the CSV file.
| 16,093 | Markdown | 38.738272 | 470 | 0.690424 |
NVIDIA-Omniverse/kit-extension-sample-csv-reader/exts/omni.csv.reader/config/extension.toml | [package]
# Semantic Versionning is used: https://semver.org/
version = "1.0.0"
# The title and description fields are primarily for displaying extension info in UI
title = "Extension for reading CSV file"
description="This extension is to read a CSV file and then display elements (cubes/quad/...) at the XYZ given in the CSV file, + color if any. For now CSV files contain X,Y,Z and cluster keywords"
# Path (relative to the root) or content of readme markdown file for UI.
readme = "docs/README.md"
changelog="docs/CHANGELOG.md"
# URL of the extension source repository.
repository = ""
# One of categories for UI.
category = "Other"
# Keywords for the extension
keywords = ["kit", "example", "CSV"]
# Preview image. Folder named "data" automatically goes in git lfs (see .gitattributes file).
preview_image = "data/OV_CSVReader_WhatToExpect.png"
icon = "data/CSV_reader_ico.png"
# Use omni.ui to build simple UI
[dependencies]
"omni.kit.uiapp" = {}
# Main python module this extension provides, it will be publicly available as "import omni.hello.world".
[[python.module]]
name = "omni.csv.reader"
| 1,111 | TOML | 32.696969 | 196 | 0.738074 |
NVIDIA-Omniverse/kit-extension-sample-csv-reader/exts/omni.csv.reader/omni/csv/reader/extension.py | ###############################################################################
#
# Copyright 2020 NVIDIA Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
###############################################################################
###############################################################################
#
# Extension create by BB.CM.EB June 2022 for CSV Reader_extension
# Target : read a CSV file and populate the 3D environment
# with shapes at location X,Y,Z
# Known limitations - June/07
# - CSV files must contains X,Y,Z and[optional] cluster columns
# - nothing happens if no CSV file (not found/wrong type/etc...)
#
###############################################################################
import carb
import omni.ext
from .models import MainModel
from .views import MainView
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`)
# will be instantiated when extension gets enabled and `on_startup(ext_id)` will be called.
# Later when extension gets disabled on_shutdown() is called
class MyExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
carb.log_info(f"[CSV_Reader] MyExtension startup")
Model = MainModel()
self._window = MainView(Model)
def on_shutdown(self):
carb.log_info(f"[CSV_Reader] MyExtension shutdown")
self._window.destroy()
self._window = None
| 2,630 | Python | 45.982142 | 119 | 0.647529 |
NVIDIA-Omniverse/kit-extension-sample-csv-reader/exts/omni.csv.reader/omni/csv/reader/models.py | # Model related
# Python built-in
import os.path
import carb
from pathlib import Path
# external python lib
import csv
import itertools
# USD imports
from pxr import Gf, UsdGeom, UsdLux
# omniverse
import omni.client
import omni.kit.app
from omni.kit.window.file_importer import get_file_importer
CURRENT_PATH = Path(__file__).parent
DATA_PATH = CURRENT_PATH.parent.parent.parent.joinpath("data")
category_colors = {
0: [(1, 0, 0)],
1: [(0, 1, 0)],
2: [(0, 0.4, 0)],
3: [(0, 0, 1)],
4: [(0.3, 0.5, 1)],
5: [(.5, .5, .5)],
6: [(0, 0, 0)],
7: [(0, 1, 1)],
8: [(.8, .5, .25)],
9: [(1, .5, 0)],
10: [(1, 1, 1)],
}
shape_usda_name = {
"cube": "BasicCubeAsRef.usda",
"sphere": "BasicSphereAsRef.usda",
}
class MainModel():
def __init__(self):
#root prim paths
self.root_path = '/World'
self.cluster_layer_root_path = '/Class_'
# stage_unit defines the number of unit per meter
self.stage_unit_per_meter = 1
# Default CSV Path (sample file deployed with extension)
self.csv_file_path = DATA_PATH.joinpath('CSVSample.csv')
# path to basic shape
self.shape_file_name = "BasicCubeAsRef.usda"
self.shape_file_path = DATA_PATH.joinpath(self.shape_file_name)
# Scale factor so that the shapes are well spaced
self.scale_factor = 100.0
# limit the number of rows read
self.max_elements = 5000
# whether or not the shapes should be grouped by cluster
self.group_by_cluster = False
# max number of different color clusters
self.max_num_clusters = 10
def generate(self):
# Clear the stage
stage = omni.usd.get_context().get_stage()
root_prim = stage.GetPrimAtPath(self.root_path)
if (root_prim.IsValid()):
stage.RemovePrim(self.root_path)
# create a new stage with Y up and in meters
if omni.usd.get_context().new_stage() is False:
carb.log_warn(f"Failed creating a new stage.")
return
stage = omni.usd.get_context().get_stage()
# set the up axis
UsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.y)
# set the unit of the world
UsdGeom.SetStageMetersPerUnit(stage, self.stage_unit_per_meter)
stage.SetDefaultPrim(root_prim)
# add a light
light_prim_path = self.root_path + '/DistantLight'
light_prim = UsdLux.DistantLight.Define(stage, light_prim_path)
light_prim.CreateAngleAttr(0.53)
light_prim.CreateColorAttr(Gf.Vec3f(1.0, 1.0, 0.745))
light_prim.CreateIntensityAttr(5000.0)
# check that CSV exists
if os.path.exists(self.csv_file_path):
# Read CSV file
with open(self.csv_file_path, newline='') as csvfile:
csv_reader = csv.reader(csvfile, delimiter=',')
i = 1
# Iterate over each row in the CSV file
# Skip the header row
# Don't read more than the max number of elements
# Create the shape with the appropriate color at each coordinate
for row in itertools.islice(csv_reader, 1, self.max_elements):
name = row[0]
x = float(row[1])
y = float(row[2])
z = float(row[3])
cluster = row[4]
# root prim
cluster_prim_path = self.root_path
# add group to path if the user has selected that option
if self.group_by_cluster:
cluster_prim_path += self.cluster_layer_root_path + cluster
cluster_prim = stage.GetPrimAtPath(cluster_prim_path)
# create the prim if it does not exist
if not cluster_prim.IsValid():
UsdGeom.Xform.Define(stage, cluster_prim_path)
shape_prim_path = cluster_prim_path + '/box_%d' % i
i += 1
# Create prim to add the reference to.
ref_shape = stage.OverridePrim(shape_prim_path)
# Add the reference
ref_shape.GetReferences().AddReference(str(self.shape_file_path), '/MyRef/RefMesh')
# Get mesh from shape instance
next_shape = UsdGeom.Mesh.Get(stage, shape_prim_path)
# Set location
next_shape.AddTranslateOp().Set(
Gf.Vec3f(
self.scale_factor*x,
self.scale_factor*y,
self.scale_factor*z))
# Set Color
next_shape.GetDisplayColorAttr().Set(
category_colors[int(cluster) % self.max_num_clusters])
# Handles the change between a cube and sphere shape in the UI
def shape_changed(self, choice):
chosen_key = list(shape_usda_name.keys())[choice]
self.shape_file_name = shape_usda_name[chosen_key]
self.shape_file_path = DATA_PATH.joinpath(self.shape_file_name)
# Handles the change of the 'group by cluster' checkbox
def group_by_cluster_changed(self, do_clustering):
self.group_by_cluster = do_clustering
# Handles the click of the Load button
def select_file(self):
self.file_importer = get_file_importer()
self.file_importer.show_window(
title="Select a CSV File",
import_button_label="Select",
import_handler=self._on_click_open,
file_extension_types=[(".csv", "CSV Files (*.csv)")],
file_filter_handler=self._on_filter_item
)
# Handles the click of the open button within the file importer dialog
def _on_click_open(self, filename: str, dirname: str, selections):
# File name should not be empty.
filename = filename.strip()
if not filename:
carb.log_warn(f"Filename must be provided.")
return
# create the full path to csv file
if dirname:
fullpath = f"{dirname}{filename}"
else:
fullpath = filename
self.csv_file_path = fullpath
self.csv_field_model.set_value(str(fullpath))
# Handles the filtering of files within the file importer dialog
def _on_filter_item(self, filename: str, filter_postfix: str, filter_ext: str) -> bool:
if not filename:
return True
# Show only .csv files
_, ext = os.path.splitext(filename)
if ext == filter_ext:
return True
else:
return False
| 6,976 | Python | 34.596939 | 103 | 0.544151 |
NVIDIA-Omniverse/kit-extension-sample-csv-reader/exts/omni.csv.reader/omni/csv/reader/views.py | # import from omniverse
import omni.ui as ui
from omni.ui.workspace_utils import TOP
# import from other extension py
from .models import MainModel
class MainView():
def __init__(self, csvmodel: MainModel):
self._window = ui.Window("CSV Reader", width=800, height=600, dockPreference=ui.DockPreference.RIGHT_TOP)
self._window.visible = True
csvmodel.csv_field_model = None
with self._window.frame:
with ui.VStack(alignment=TOP, style={"margin":5}):
# 2 - parameters to be set, in case not default values
with ui.VStack():
with ui.HStack(height=20):
ui.Label("CSV file path:", height=10, width=120)
self.csv_field = ui.StringField(height=10)
self.csv_field.enabled = False
self.csv_field.model.set_value(str(csvmodel.csv_file_path))
csvmodel.csv_field_model = self.csv_field.model
ui.Button("Load",
width=40,
clicked_fn=lambda: csvmodel.select_file())
with ui.HStack(height=20):
ui.Label("Shape:", height=0)
shape_combo = ui.ComboBox(0, "cube", "sphere")
shape_combo.model.add_item_changed_fn(
lambda m,
f=shape_combo: csvmodel.shape_changed(m.get_item_value_model().get_value_as_int()))
with ui.HStack(height=20):
ui.Label("Group By Cluster:", height=0)
cluster_cb = ui.CheckBox(width=20)
cluster_cb.model.add_value_changed_fn(
lambda a: csvmodel.group_by_cluster_changed(a.get_value_as_bool()))
ui.Line(style={"color": 0xff00b976}, height=20)
# 3 - button to populate the 3D scene
ui.Button( "Generate", height=50, clicked_fn=lambda: csvmodel.generate())
def destroy(self):
self._window.destroy()
self._window = None
| 2,199 | Python | 46.826086 | 113 | 0.51387 |
NVIDIA-Omniverse/kit-extension-sample-csv-reader/exts/omni.csv.reader/docs/CHANGELOG.md | # Changelog
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
## [1.0.0] - 2022-07-01
### Known limitations
- CSV files must contains X,Y,Z and cluster columns
- nothing happens if no CSV file (not found/wrong type/etc...)
- for now, the scene is reset prior generating the overlay...to be updated in next version
| 348 | Markdown | 30.72727 | 90 | 0.718391 |
NVIDIA-Omniverse/kit-extension-sample-csv-reader/exts/omni.csv.reader/docs/README.md | # CSV Reader Extension (omni.csv.reader)
![CVS Reader UI and Result](../data/OV_CSVReader_WhatToExpect.png)
## Overview
The CSV Reader extension offers the possibility for users to populate the scene with some default shapes (Cubes or Spheres) at locations X, Y, Z found in the CSV file.
Colors are also added depending on the _cluster_ values given as well in the CSV file.
## [Tutorial](../../../tutorial/tutorial.md)
This extension sample also includes a step-by-step tutorial to accelerate your growth as you learn to build your own
Omniverse Kit extensions. [Get started with the tutorial.](../../../tutorial/tutorial.md)
## Usage
### Prerequisites :
1) CSV Files place : either use the ones by default in the _data_ folder - or make sure to specify its place in the field of the UI
2) Note that the CSV file should contain X, Y, Z and cluster columns. Please look at examples provided in the _data_ folder
### Workflow using the extension:
Click on 'Generate' -> that will create elements here and there based on the info from the CSV file.
1. [Optional] : In the parameters UI window, select the location of your CSV, the type of shape, and if you want to be grouped by class/_cluster_...
2. If required : you can export/save the USD stage with the 'File->Save As...' option.
3. The scene is reset every time you press on Generate.
| 1,352 | Markdown | 42.64516 | 167 | 0.741864 |
NVIDIA-Omniverse/kit-extension-sample-csv-reader/exts/omni.csv.reader/docs/index.rst | CVS_Reader_ext
########################
This extension allows to read one CSV file (UI to enter the path to it)
and then generate the creation of one 3D map of the spatial representation
with X,Y,Z given by the CSV (and colors/Class/Cluster)
This extension is a service of NVIDIA's Developer Programs organization and is in first version.
Extensions Loaded
=================
.. toctree::
:maxdepth: 1
CHANGELOG
| 424 | reStructuredText | 21.36842 | 96 | 0.686321 |
NVIDIA-Omniverse/extension-contest/README.md | <!-- markdownlint-disable -->
<h1 align="center">
NVIDIA Omniverse Developer Contest
<br>
#ExtendOmniverse | Submissions Now Closed
</h1>
<h3 align="center">
Extend the Omniverse and Win
</h3>
<p align="center">Thank you to all of the incredible developers who used Omniverse Code to create an extension. Winners will be announced at the <a href="https://www.nvidia.com/gtc/session-catalog/?search=SE41388&search=SE41388%2C+SE41388&tab.catalogallsessionstab=16566177511100015Kus#/session/1658438748728001JAUi">Omniverse User Group</a> at GTC on September 20th at 3pm PDT, so be sure to register to find out if you won!
</p>
<p align="center">
<a href="https://www.nvidia.com/extend-omniverse-contest/"><img src="images/ov-dev-contest-1920x1080.jpg"></a>
</p>
<p align="center">
We are looking for extensions that fall into one of the three categories:
</p>
<p align="center">
<strong>
Layout or scene authoring tools
</strong>
</p>
<p align="center">
<strong>
Scene modifier or manipulator tools
</strong>
</p>
<p align="center">
<strong>
Use of Omni.ui
</strong>
</p>
## Getting Started
Ready to build your first extension? Check out the steps below to get up and running in no time. For additional details on prizes, eligibility, and requirements for the contest, visit the [official landing page](https://www.nvidia.com/en-us/omniverse/apps/code/developer-contest/) or the [announcement blog](https://developer.nvidia.com/blog/build-tools-for-the-3d-world-with-the-extend-the-omniverse-contest/).
### Installation Prerequisites
:heavy_check_mark: Install [NVIDIA Omniverse](https://www.nvidia.com/en-us/omniverse/download/)
:heavy_check_mark: Install [Omniverse Code](https://developer.nvidia.com/nvidia-omniverse-platform/code-app)
:heavy_check_mark: Install [Visual Studio Code](https://code.visualstudio.com/download)
### Building Your First Extension
Once the steps above are complete, getting started is easy. Simply launch Omniverse Code from the NVIDIA Omniverse Launcher, then navigate to the Extensions tab.
![Extensions Window](images/extensions-window.jpg)
Click the green + icon in the top left corner to create an extension from the template.
![New Extension](images/new-extension.jpg)
Choose the directory you'd like to create your extension, then provide a folder & project namespace to complete the project.
![Project Name](images/project-name.jpg)
![Extension Name](images/extension-name.jpg)
Visual Studio Code should automatically open with your newly created project, and you're ready to begin developing your first extension! Navigate to `exts\[your.project.namespace]\your\project\namespace\extension.py` to review the placeholder code and observe the extension window that is now open in Omniverse Code.
![VSCode Project](images/vscode-project.png)
You can also check out our [Spawn Primitives Extension Sample](https://github.com/NVIDIA-Omniverse/kit-extension-sample-spawn-prims) tutorial for getting up and running within 10 minutes.
## Samples & Resources
Below are a number of resources that will help accelerate your learning journey.
### Extension Samples
Kit comes bundled with a number of extensions, which can be found inside `app/kit/exts`, `app/kit/extscore`, and `app/exts`. Most of these are in Python, and the source is available for your continued learning
**Layout & Scene Authoring Samples**
* [Spawn Primitives Sample](https://github.com/NVIDIA-Omniverse/kit-extension-sample-spawn-prims) - Leverage the Command tab to spawn a set of primitives within your scene
* [Scatter Tool Sample](https://github.com/NVIDIA-Omniverse/kit-extension-sample-scatter) - Randomly distribute primitives within a given bounds
* [CSV Reader Sample](https://github.com/NVIDIA-Omniverse/kit-extension-sample-csv-reader) - Learn how to populate a scene using data from a CSV file
**Scene Modifier, Manipulator Tool Samples**
* [Viewport Info & Manipulator Samples](https://github.com/NVIDIA-Omniverse/kit-extension-sample-ui-scene) - A collection of samples demonstrating how to render additional metadata and create custom manipluators within the Omniverse viewport
* [Viewport Reticle Sample](https://github.com/NVIDIA-Omniverse/kit-extension-sample-reticle) - Use `omni.scene.ui` to draw GUI reticles & compositions within the Omniverse viewport
**Styling Samples**
* [UI Window Samples](https://github.com/NVIDIA-Omniverse/kit-extension-sample-ui-window) - A collection of samples demonstrating how to layout and style custom dialog windows using Omniverse Kit
### Technical Documentation
* [Omniverse Code Overview](https://www.youtube.com/watch?v=j1Pwi1KRkhk) - The Omniverse Code app contains interactive documentation experimenting with key building blocks available in Kit
* [Python Kit API Reference & Technical Documentation](https://docs.omniverse.nvidia.com/py/kit/index.html)
* [NVIDIA Omniverse Resource Center - Extensions](https://developer.nvidia.com/nvidia-omniverse-developer-resource-center#extensions) - includes videos and additional resources for learning how to develop extensions
### Additional Resources
We have a fantastic community of active developers in our forums and the official Omniverse Discord channel. See the links below for support and connecting with the broader Omniverse developer community:
* [Omniverse Extension Forums](https://forums.developer.nvidia.com/c/omniverse/extension/399)
* [NVIDIA Omniverse Discord](https://forums.developer.nvidia.com/t/omniverse-discord-server-is-live/178422)
## Submitting Your Extension
Below is a high level checklist of what you'll need to do in order to submit your entry. You can also check out the [How to Submit](https://www.youtube.com/watch?v=z8khQyHT_44) video for detailed instructions on how to correctly publish your extension.
### Prepare
:heavy_check_mark: Developer & test your extension
:heavy_check_mark: Update your `extension.toml` config file found in `exts\[project]\config`
:heavy_check_mark: Update your extension's `README.md` & `CHANGELOG.md` found in `exts\[project]\docs`
:heavy_check_mark: Update your extension's icon.png & preview.png images found in `exts\[project]\data`
### Publish
:heavy_check_mark: Publish your project to a public repo on GitHub. Ensure your repo's `root` directory contains the `exts\` folder (see our [template](https://github.com/NVIDIA-Omniverse/kit-extension-template) as an example)
:heavy_check_mark: Add the `omniverse-kit-extension` [Topic](https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/classifying-your-repository-with-topics) to your repo so that it shows up [here](https://github.com/topics/omniverse-kit-extension)
:heavy_check_mark: Publish a [Release](https://docs.github.com/en/repositories/releasing-projects-on-github/managing-releases-in-a-repository) for your project
:heavy_check_mark: Download and unzip your release's `source.zip` file to ensure the the root directory contains the `exts\` folder
### Submit
:heavy_check_mark: Create a short video demonstrating what your extension does
:heavy_check_mark: Complete the submission form from the contest landing page [here](https://www.nvidia.com/en-us/omniverse/apps/code/developer-contest/)
| 7,316 | Markdown | 67.383177 | 425 | 0.773783 |
NVIDIA-Omniverse/usd-plugin-samples/CHANGES.md | # Changelog
## 3.0.0
- Added several examples for Hydra 2 scene index plugins
- Fixed issue in build plugInfo.json file configuration for debug builds
- Updated dependencies to stock USD 23.05
- Updated openssl and libcurl dependencies
## 2.0.0
- Added support for general USD plugins beyond schemas
- Updated repo_usd to support flexible build files
- Updated dependencies to USD 22.11 and Python 3.10
- Added sample for dynamic payloads and file format plugins
## 1.0.0
- Initial open source release | 507 | Markdown | 25.736841 | 72 | 0.773176 |
NVIDIA-Omniverse/usd-plugin-samples/repo.toml | # common settings for repo_usd for all USD plug-ins
[repo_usd]
usd_root = "${root}/_build/usd-deps/nv-usd/%{config}"
usd_python_root = "${root}/_build/usd-deps/python"
generate_plugin_buildfiles = true
plugin_buildfile_format = "cmake"
generate_root_buildfile = true
# this tells repo_usd about our codeful schema extension
[repo_usd.plugin.omniExampleSchema]
schema_file = "${root}/src/usd-plugins/schema/omniExampleSchema/schema.usda"
plugin_dir = "${root}/src/usd-plugins/schema/omniExampleSchema"
generate_dir = "${root}/src/usd-plugins/schema/omniExampleSchema/generated"
install_root = "${root}/_install/%{platform}/%{config}/omniExampleSchema"
library_prefix = "OmniExample"
usd_lib_dependencies = [
"arch",
"tf",
"vt",
"sdf",
"usd"
]
# this tells repo_usd about our codeless schema extension
[repo_usd.plugin.omniExampleCodelessSchema]
schema_file = "${root}/src/usd-plugins/schema/omniExampleCodelessSchema/schema.usda"
plugin_dir = "${root}/src/usd-plugins/schema/omniExampleCodelessSchema"
generate_dir = "${root}/src/usd-plugins/schema/omniExampleCodelessSchema/generated"
install_root = "${root}/_install/%{platform}/%{config}/omniExampleCodelessSchema"
is_codeless = true
# this tells repo_usd about the codeless schema for use by
# our file format / dynamic payload infrastructure
[repo_usd.plugin.omniMetSchema]
schema_file = "${root}/src/usd-plugins/schema/omniMetSchema/schema.usda"
plugin_dir = "${root}/src/usd-plugins/schema/omniMetSchema"
generate_dir = "${root}/src/usd-plugins/schema/omniMetSchema/generated"
install_root = "${root}/_install/%{platform}/%{config}/omniMetSchema"
is_codeless = true
# this tells repo_usd about our file format plugin
[repo_usd.plugin.edfFileFormat]
plugin_dir = "${root}/src/usd-plugins/fileFormat/edfFileFormat"
install_root = "${root}/_install/%{platform}/%{config}/edfFileFormat"
include_dir = "include/edfFileFormat"
additional_include_dirs = [
"../../../../_build/usd-deps/nv_usd/%{config}/include/tbb"
]
public_headers = [
"api.h",
"iEdfDataProvider.h",
"edfDataProviderFactory.h"
]
private_headers = [
"edfData.h",
"edfPluginManager.h",
"edfFileFormat.h"
]
cpp_files = [
"edfData.cpp",
"edfDataProviderFactory.cpp",
"edfPluginManager.cpp",
"edfFileFormat.cpp",
"iEdfDataProvider.cpp"
]
resource_files = [
"plugInfo.json"
]
usd_lib_dependencies = [
"arch",
"tf",
"plug",
"vt",
"gf",
"sdf",
"js",
"pcp"
]
# this tells repo_usd about our EDF provider implementing the back-end
# functionality to fulfill the dynamic payload
[repo_usd.plugin.omniMetProvider]
plugin_dir = "${root}/src/usd-plugins/dynamicPayload/omniMetProvider"
install_root = "${root}/_install/%{platform}/%{config}/omniMetProvider"
include_dir = "include/omniMetProvider"
additional_include_dirs = [
"../../../../src/usd-plugins/fileFormat/edfFileFormat",
"../../../../_build/target-deps/libcurl/include"
]
preprocessor_defines = [
"CURL_STATICLIB"
]
depends_on = [
"edfFileFormat"
]
private_headers = [
"api.h",
"omniMetProvider.h"
]
cpp_files = [
"omniMetProvider.cpp"
]
resource_files = [
"plugInfo.json"
]
usd_lib_dependencies = [
"arch",
"tf",
"plug",
"vt",
"gf",
"sdf",
"js",
"pcp",
"usd"
]
[repo_usd.plugin.omniMetProvider."platform:windows-x86_64"]
additional_libs = [
"edfFileFormat",
"zlib",
"ws2_32",
"crypt32"
]
additional_static_libs = [
"libcurl"
]
additional_library_dirs = [
"../../../../_install/%{platform}/%{config}/edfFileFormat/lib",
"../../../../_build/target-deps/libcurl/lib",
"../../../../_build/target-deps/zlib/lib/rt_dynamic/release"
]
[repo_usd.plugin.omniMetProvider."platform:linux-x86_64"]
additional_libs = [
"edfFileFormat"
]
additional_static_libs = [
"curl",
"ssl",
"crypto",
"z"
]
additional_library_dirs = [
"../../../../_install/%{platform}/%{config}/edfFileFormat/lib",
"../../../../_build/target-deps/libcurl/lib",
"../../../../_build/target-deps/zlib/lib",
"../../../../_build/target-deps/openssl/lib"
]
[repo_usd.plugin.omniMetProvider."platform:linux-aarch64"]
additional_libs = [
"edfFileFormat"
]
additional_static_libs = [
"curl",
"ssl",
"crypto",
"z"
]
additional_library_dirs = [
"../../../../_install/%{platform}/%{config}/edfFileFormat/lib",
"../../../../_build/target-deps/libcurl/lib",
"../../../../_build/target-deps/zlib/lib",
"../../../../_build/target-deps/openssl/lib"
]
[repo_usd.plugin.omniGeoSceneIndex]
plugin_dir = "${root}/src/hydra-plugins/omniGeoSceneIndex"
install_root = "${root}/_install/%{platform}/%{config}/omniGeoSceneIndex"
include_dir = "include/omniGeoSceneIndex"
private_headers = [
"api.h",
"computedDependentDataSource.h",
"computedPrimDataSource.h",
"geospatialDataSource.h",
"geospatialSceneIndex.h",
"geospatialSceneIndexPlugin.h",
"localPositionAPIAdapter.h",
"localPositionDataSource.h",
"localPositionSchema.h",
"referencePositionAPIAdapter.h",
"referencePositionDataSource.h",
"referencePositionSchema.h"
]
cpp_files = [
"computedDependentDataSource.cpp",
"computedPrimDataSource.cpp",
"geospatialDataSource.cpp",
"geospatialSceneIndex.cpp",
"geospatialSceneIndexPlugin.cpp",
"localPositionAPIAdapter.cpp",
"localPositionDataSource.cpp",
"localPositionSchema.cpp",
"referencePositionAPIAdapter.cpp",
"referencePositionDataSource.cpp",
"referencePositionSchema.cpp"
]
resource_files = [
"plugInfo.json"
]
usd_lib_dependencies = [
"arch",
"tf",
"work",
"plug",
"vt",
"gf",
"hd",
"hf",
"sdf",
"usd",
"usdGeom",
"usdImaging"
]
additional_include_dirs = [
"${root}/_build/target-deps/omni-geospatial/include"
]
additional_library_dirs = [
"${root}/_build/target-deps/omni-geospatial/bin"
]
additional_libs = [
"omniGeospatial"
]
[repo_usd.plugin.omniMetricsAssembler]
plugin_dir = "${root}/src/hydra-plugins/omniMetricsAssembler"
install_root = "${root}/_install/%{platform}/%{config}/omniMetricsAssembler"
include_dir = "include/omniMetricsAssembler"
private_headers = [
"api.h",
"metricsAdapter.h",
"metricsDataSource.h",
"metricsDoubleDataSource.h",
"metricsSceneIndex.h",
"metricsSceneIndexPlugin.h",
"metricsSchema.h"
]
cpp_files = [
"metricsAdapter.cpp",
"metricsDataSource.cpp",
"metricsDoubleDataSource.cpp",
"metricsSceneIndex.cpp",
"metricsSceneIndexPlugin.cpp",
"metricsSchema.cpp"
]
resource_files = [
"plugInfo.json"
]
usd_lib_dependencies = [
"arch",
"tf",
"work",
"plug",
"vt",
"gf",
"hd",
"hf",
"sdf",
"usd",
"usdGeom",
"usdImaging"
]
[repo_usd.plugin.omniWarpSceneIndex]
plugin_dir = "${root}/src/hydra-plugins/omniWarpSceneIndex"
schema_file = "${root}/src/hydra-plugins/omniWarpSceneIndex/schema.usda"
library_prefix = "OmniWarpSceneIndex"
install_root = "${root}/_install/%{platform}/%{config}/omniWarpSceneIndex"
include_dir = "include/omniWarpSceneIndex"
private_headers = [
"api.h",
"tokens.h",
"warpComputationAPI.h",
"warpComputationAPIAdapter.h",
"warpComputationSchema.h",
"warpPythonModule.h",
"warpSceneIndex.h",
"warpSceneIndexPlugin.h"
]
cpp_files = [
"tokens.cpp",
"warpComputationAPI.cpp",
"warpComputationAPIAdapter.cpp",
"warpComputationSchema.cpp",
"warpPythonModule.cpp",
"warpSceneIndex.cpp",
"warpSceneIndexPlugin.cpp",
"moduleDeps.cpp"
]
pymodule_cpp_files = [
"module.cpp",
"wrapTokens.cpp",
"wrapWarpComputationAPI.cpp"
]
pymodule_files = [
"__init__.py",
"oceanSim/__init__.py",
"oceanSim/preferences.py",
"oceanSim/preferencesUI.py",
"warpModules/__init__.py",
"warpModules/cloth.py",
"warpModules/deform01.py",
"warpModules/deform02.py",
"warpModules/ocean.py",
"warpModules/particles.py"
]
resource_files = [
"plugInfo.json",
"schema.usda"
]
usd_lib_dependencies = [
"arch",
"tf",
"gf",
"plug",
"trace",
"vt",
"work",
"hio",
"garch",
"glf",
"hd",
"hdsi",
"hdx",
"hf",
"pxOsd",
"sdf",
"sdr",
"usd",
"usdGeom",
"usdShade",
"usdImaging",
] | 8,346 | TOML | 23.991018 | 84 | 0.654325 |
NVIDIA-Omniverse/usd-plugin-samples/deps/usd-deps.packman.xml | <project toolsVersion="5.6">
<dependency name="nv-usd" linkPath="../_build/usd-deps/nv-usd/${config}">
<package name="usd.py310.${platform}.usdview.${config}" version="0.23.05-tc.47+v23.05.b53573ea" />
</dependency>
<dependency name="python" linkPath="../_build/usd-deps/python">
<package name="python" version="3.10.13+nv1-${platform}" />
</dependency>
</project> | 380 | XML | 46.624994 | 102 | 0.665789 |
NVIDIA-Omniverse/usd-plugin-samples/src/kit-extension/exts/omni.example.schema/config/extension.toml | [core]
# tells kit that we shouldn't hot reload this extension
reloadable = false
# Load at the start, load all schemas with order -100 (with order -1000 the USD libs are loaded)
# this is necessary (as it to set the extension to auto load)
# so that the schemas get loaded into the UsdSchemaRegistry early enough
order = -100
[package]
# all packages should have this information so it is displayed in the UI properly
author = "NVIDIA USD Core Team"
repository = "https://github.com/NVIDIA-Omniverse/kit-sample-usd-schema"
category = "USD"
title = "USD Example Schema"
version = "1.0.0"
description="Kit extension illustrating how to package a schema extension for use in kit."
keywords = ["schema", "usd"]
readme = "docs/README.md"
changelog = "docs/CHANGES.md"
icon = "data/icon.png"
[dependencies]
# depends on core USD libraries being loaded
"omni.usd.libs" = {}
# when an extension is requested to be enabled, kit will load the python modules
# that are specified here in the order they are specified
# we specify two different python modules, the first is the module we create
# this one will register the plugin with USD so it knows in what module to
# find our schema types
[[python.module]]
name = "omni.example.schema"
# the second extension is the schema python module itself
# this is the module that developers will import to use the schema in Python
[[python.module]]
name = "OmniExampleSchema"
# this tells kit to load these C++ libraries when the extension loads
# (kit will also try to unload them when the extension is unloaded)
# note that this is required to also make loading the schema python module work
# (importing the schema python module will look for the C++ library as a dependency)
# if you don't load the C++ lib here, your PATH / LD_LIBRARY_PATH variables
# should contain the path to your C++ dll otherwise the python module will
# not load properly!
[[native.library]]
path = "OmniExampleSchema/lib/${lib_prefix}omniExampleSchema${lib_ext}" | 1,982 | TOML | 40.312499 | 96 | 0.757316 |
NVIDIA-Omniverse/usd-plugin-samples/src/kit-extension/exts/omni.example.schema/omni/example/schema/__init__.py | # Copyright 2023 NVIDIA CORPORATION
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pxr import Plug
# this extension is responsible for loading both plug-ins that represent the
# example codeful and codeless schema extensions
plugin_root = os.path.join(os.path.dirname(__file__), "..", "..",
"..", "OmniExampleSchema", "resources")
Plug.Registry().RegisterPlugins(plugin_root)
plugin_root = os.path.join(os.path.dirname(__file__), "..", "..",
"..", "OmniExampleCodelessSchema", "resources") | 1,012 | Python | 41.208332 | 76 | 0.737154 |
NVIDIA-Omniverse/usd-plugin-samples/src/kit-extension/exts/omni.example.schema/docs/CHANGES.md | # Changelog
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
## [1.0.0] - 2023-01-19
- Initial version of an illustrative example of a kit extension loading a set of USD schema extensions | 222 | Markdown | 30.857138 | 102 | 0.734234 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniMetricsAssembler/api.h | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef OMNI_METRICS_ASSEMBLER_API_H
#define OMNI_METRICS_ASSEMBLER_API_H
#include "pxr/base/arch/export.h"
#if defined(PXR_STATIC)
# define OMNIMETRICSASSEMBLER_API
# define OMNIMETRICSASSEMBLER_API_TEMPLATE_CLASS(...)
# define OMNIMETRICSASSEMBLER_API_TEMPLATE_STRUCT(...)
# define OMNIMETRICSASSEMBLER_LOCAL
#else
# if defined(OMNIMETRICSASSEMBLER_EXPORTS)
# define OMNIMETRICSASSEMBLER_API ARCH_EXPORT
# define OMNIMETRICSASSEMBLER_API_TEMPLATE_CLASS(...) ARCH_EXPORT_TEMPLATE(class, __VA_ARGS__)
# define OMNIMETRICSASSEMBLER_API_TEMPLATE_STRUCT(...) ARCH_EXPORT_TEMPLATE(struct, __VA_ARGS__)
# else
# define OMNIMETRICSASSEMBLER_API ARCH_IMPORT
# define OMNIMETRICSASSEMBLER_API_TEMPLATE_CLASS(...) ARCH_IMPORT_TEMPLATE(class, __VA_ARGS__)
# define OMNIMETRICSASSEMBLER_API_TEMPLATE_STRUCT(...) ARCH_IMPORT_TEMPLATE(struct, __VA_ARGS__)
# endif
# define OMNIMETRICSASSEMBLER_LOCAL ARCH_HIDDEN
#endif
#endif // OMNI_METRICS_ASSEMBLER_API_H
| 1,592 | C | 40.921052 | 102 | 0.743719 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniMetricsAssembler/metricsDataSource.h | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef HD_OMNI_METRICS_DATA_SOURCE_H_
#define HD_OMNI_METRICS_DATA_SOURCE_H_
#include <pxr/imaging/hd/dataSource.h>
#include <pxr/imaging/hd/dataSourceTypeDefs.h>
#include <pxr/imaging/hd/sceneIndex.h>
#include "api.h"
PXR_NAMESPACE_OPEN_SCOPE
//-----------------------------------------------------------------------------
#define HDOMNIMETRICSDATASOURCE_TOKENS \
(metricsPreservedXform)
TF_DECLARE_PUBLIC_TOKENS(HdOmniMetricsDataSourceTokens, OMNIMETRICSASSEMBLER_API,
HDOMNIMETRICSDATASOURCE_TOKENS);
//-----------------------------------------------------------------------------
/// \class HdOmniMetricsDataSource
///
/// A datasource representing a wrapped view of an existing flattened
/// datasource where the xform token is intercepted and a new metric-corrected
/// transform matrix is dynamically computed.
///
class HdOmniMetricsDataSource : public HdContainerDataSource
{
public:
HD_DECLARE_DATASOURCE(HdOmniMetricsDataSource);
HdOmniMetricsDataSource(const HdSceneIndexBase& sceneIndex, const SdfPath& primPath,
HdContainerDataSourceHandle wrappedDataSource);
void UpdateWrappedDataSource(HdContainerDataSourceHandle wrappedDataSource);
// data source overrides
TfTokenVector GetNames() override;
HdDataSourceBaseHandle Get(const TfToken& name) override;
// determines if the data source would be dirtied based on the locators given
bool IsPrimDirtied(const HdDataSourceLocatorSet& locators);
private:
bool _HasMetricsInformation(HdContainerDataSourceHandle dataSource);
HdBoolDataSourceHandle _GetInputResetXformStackSource();
HdDataSourceBaseHandle _ComputeCorrectedXform();
private:
const HdSceneIndexBase& _sceneIndex;
SdfPath _primPath;
HdContainerDataSourceHandle _wrappedDataSource;
// cached computed datasources
HdContainerDataSourceAtomicHandle _computedCorrectedXformDataSource;
class _MetricsCorrectedMatrixDataSource : public HdMatrixDataSource
{
public:
HD_DECLARE_DATASOURCE(_MetricsCorrectedMatrixDataSource);
_MetricsCorrectedMatrixDataSource(HdContainerDataSourceHandle inputDataSource,
HdContainerDataSourceHandle parentDataSource,
bool isMetricsCorrectiveSource);
// typed sampled data source overrides
VtValue GetValue(Time shutterOffset) override;
GfMatrix4d GetTypedValue(Time shutterOffset) override;
bool GetContributingSampleTimesForInterval(
Time startTime,
Time endTime,
std::vector<Time>* outSampleTimes) override;
private:
HdMatrixDataSourceHandle _GetInputMatrixDataSource() const;
HdMatrixDataSourceHandle _GetParentMatrixDataSource() const;
HdMatrixDataSourceHandle _GetMetricsPreservedMatrixDataSource() const;
HdMatrixDataSourceHandle _GetParentMetricsPreservedMatrixDataSource() const;
GfMatrix4d _ComputeCorrectedMatrix(Time shutterOffset);
GfMatrix4d _GetMpuCorrective();
HdContainerDataSourceHandle _inputDataSource;
HdContainerDataSourceHandle _parentDataSource;
bool _isMetricsCorrectiveSource;
};
HD_DECLARE_DATASOURCE_HANDLES(_MetricsCorrectedMatrixDataSource);
};
HD_DECLARE_DATASOURCE_HANDLES(HdOmniMetricsDataSource);
PXR_NAMESPACE_CLOSE_SCOPE
#endif // HD_OMNI_METRICS_DATA_SOURCE_H_ | 3,949 | C | 34.267857 | 88 | 0.736389 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniMetricsAssembler/metricsSceneIndexPlugin.cpp | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <pxr/imaging/hd/sceneIndexPluginRegistry.h>
#include "metricsSceneIndexPlugin.h"
#include "metricsSceneIndex.h"
PXR_NAMESPACE_OPEN_SCOPE
TF_DEFINE_PRIVATE_TOKENS(
_tokens,
((sceneIndexPluginName, "OmniMetricsSceneIndexPlugin"))
);
TF_REGISTRY_FUNCTION(TfType)
{
HdSceneIndexPluginRegistry::Define<OmniMetricsSceneIndexPlugin>();
}
TF_REGISTRY_FUNCTION(HdSceneIndexPlugin)
{
const HdSceneIndexPluginRegistry::InsertionPhase insertionPhase = 1;
// register this scene index plugin with all renderers
// and try to insert ourselves early in the phases at the start
HdSceneIndexPluginRegistry::GetInstance().RegisterSceneIndexForRenderer(
"",
_tokens->sceneIndexPluginName,
nullptr,
insertionPhase,
HdSceneIndexPluginRegistry::InsertionOrderAtStart);
}
OmniMetricsSceneIndexPlugin::OmniMetricsSceneIndexPlugin() = default;
HdSceneIndexBaseRefPtr OmniMetricsSceneIndexPlugin::_AppendSceneIndex(
const HdSceneIndexBaseRefPtr& inputScene,
const HdContainerDataSourceHandle& inputArgs)
{
return OmniMetricsSceneIndex::New(inputScene, inputArgs);
}
PXR_NAMESPACE_CLOSE_SCOPE | 1,755 | C++ | 31.518518 | 76 | 0.768091 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniMetricsAssembler/metricsSchema.cpp | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <pxr/imaging/hd/retainedDataSource.h>
#include "metricsSchema.h"
PXR_NAMESPACE_OPEN_SCOPE
TF_DEFINE_PUBLIC_TOKENS(HdOmniMetricsSchemaTokens, HDOMNI_METRICS_SCHEMA_TOKENS);
HdOmniMetricsSchema::HdOmniMetricsSchema(HdContainerDataSourceHandle container) :
HdSchema(container)
{
}
HdDoubleDataSourceHandle HdOmniMetricsSchema::GetLayerMpu()
{
return _GetTypedDataSource<HdDoubleDataSource>(HdOmniMetricsSchemaTokens->layerMpu);
}
HdDoubleDataSourceHandle HdOmniMetricsSchema::GetStageMpu()
{
return _GetTypedDataSource<HdDoubleDataSource>(HdOmniMetricsSchemaTokens->stageMpu);
}
HdContainerDataSourceHandle HdOmniMetricsSchema::BuildRetained(
const HdDoubleDataSourceHandle& layerMpu,
const HdDoubleDataSourceHandle& stageMpu)
{
TfToken names[2];
HdDataSourceBaseHandle values[2];
size_t count = 0;
if(layerMpu != nullptr)
{
names[count] = HdOmniMetricsSchemaTokens->layerMpu;
values[count++] = layerMpu;
}
if (stageMpu != nullptr)
{
names[count] = HdOmniMetricsSchemaTokens->stageMpu;
values[count++] = stageMpu;
}
return HdRetainedContainerDataSource::New(count, names, values);
}
HdOmniMetricsSchema HdOmniMetricsSchema::GetFromParent(const HdContainerDataSourceHandle& fromParentContainer)
{
return HdOmniMetricsSchema(fromParentContainer ?
HdContainerDataSource::Cast(fromParentContainer->Get(HdOmniMetricsSchemaTokens->metrics))
: nullptr);
}
const HdDataSourceLocator& HdOmniMetricsSchema::GetDefaultLocator()
{
static const HdDataSourceLocator locator(HdOmniMetricsSchemaTokens->metrics);
return locator;
}
HdOmniMetricsSchema::Builder& HdOmniMetricsSchema::Builder::SetLayerMpu(const HdDoubleDataSourceHandle& layerMpu)
{
_layerMpu = layerMpu;
return *this;
}
HdOmniMetricsSchema::Builder& HdOmniMetricsSchema::Builder::SetStageMpu(const HdDoubleDataSourceHandle& stageMpu)
{
_stageMpu = stageMpu;
return *this;
}
HdContainerDataSourceHandle HdOmniMetricsSchema::Builder::Build()
{
return HdOmniMetricsSchema::BuildRetained(
_layerMpu,
_stageMpu
);
}
PXR_NAMESPACE_CLOSE_SCOPE | 2,762 | C++ | 28.08421 | 113 | 0.761405 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniMetricsAssembler/metricsSceneIndexPlugin.h | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef OMNI_METRICS_SCENE_INDEX_PLUGIN_H_
#define OMNI_METRICS_SCENE_INDEX_PLUGIN_H_
#include <pxr/pxr.h>
#include <pxr/imaging/hd/sceneIndexPlugin.h>
#include "api.h"
PXR_NAMESPACE_OPEN_SCOPE
///
/// \class OmniMetricsSceneIndexPlugin
///
/// Defines the Hydra 2.0 scene index plugin that creates
/// the OmniMetricsSceneIndex.
///
class OmniMetricsSceneIndexPlugin : public HdSceneIndexPlugin
{
public:
OmniMetricsSceneIndexPlugin();
protected:
HdSceneIndexBaseRefPtr _AppendSceneIndex(const HdSceneIndexBaseRefPtr& inputScene,
const HdContainerDataSourceHandle& inputArgs) override;
};
PXR_NAMESPACE_CLOSE_SCOPE
#endif // OMNI_METRICS_SCENE_INDEX_PLUGIN_H_ | 1,275 | C | 29.380952 | 86 | 0.763137 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniMetricsAssembler/metricsDoubleDataSource.h | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef HD_OMNI_METRICS_DOUBLE_DATA_SOURCE_H_
#define HD_OMNI_METRICS_DOUBLE_DATA_SOURCE_H_
#include <pxr/imaging/hd/dataSource.h>
#include <pxr/imaging/hd/dataSourceTypeDefs.h>
PXR_NAMESPACE_OPEN_SCOPE
///
/// \class HdOmniMetricsDoubleDataSource
///
/// Concrete implementation for a simple data source that
/// holds a uniform double value.
///
class HdOmniMetricsDoubleDataSource : public HdDoubleDataSource
{
public:
HD_DECLARE_DATASOURCE(HdOmniMetricsDoubleDataSource);
VtValue GetValue(Time shutterOffset) override;
double GetTypedValue(Time shutterOffset) override;
bool GetContributingSampleTimesForInterval(
Time startTime,
Time endTime,
std::vector<Time>* outSampleTimes) override;
private:
HdOmniMetricsDoubleDataSource(double value);
double _value;
};
HD_DECLARE_DATASOURCE_HANDLES(HdOmniMetricsDoubleDataSource);
PXR_NAMESPACE_CLOSE_SCOPE
#endif // HD_OMNI_METRICS_DOUBLE_DATA_SOURCE_H_ | 1,550 | C | 29.411764 | 75 | 0.759355 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniMetricsAssembler/metricsDoubleDataSource.cpp | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "metricsDoubleDataSource.h"
PXR_NAMESPACE_OPEN_SCOPE
HdOmniMetricsDoubleDataSource::HdOmniMetricsDoubleDataSource(double value) : _value(value)
{
}
VtValue HdOmniMetricsDoubleDataSource::GetValue(Time shutterOffset)
{
return VtValue(this->GetTypedValue(shutterOffset));
}
double HdOmniMetricsDoubleDataSource::GetTypedValue(Time shutterOffset)
{
return _value;
}
bool HdOmniMetricsDoubleDataSource::GetContributingSampleTimesForInterval(
Time startTime,
Time endTime,
std::vector<Time>* outSampleTimes)
{
return false;
}
PXR_NAMESPACE_CLOSE_SCOPE | 1,178 | C++ | 27.756097 | 90 | 0.773345 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniMetricsAssembler/metricsSchema.h | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef OMNI_METRICS_SCHEMA_H_
#define OMNI_METRICS_SCHEMA_H_
#include <pxr/imaging/hd/schema.h>
#include "api.h"
PXR_NAMESPACE_OPEN_SCOPE
//-----------------------------------------------------------------------------
#define HDOMNI_METRICS_SCHEMA_TOKENS \
(metrics) \
(layerMpu) \
(stageMpu)
TF_DECLARE_PUBLIC_TOKENS(HdOmniMetricsSchemaTokens, OMNIMETRICSASSEMBLER_API,
HDOMNI_METRICS_SCHEMA_TOKENS);
//-----------------------------------------------------------------------------
class HdOmniMetricsSchema : public HdSchema
{
public:
HdOmniMetricsSchema(HdContainerDataSourceHandle container);
OMNIMETRICSASSEMBLER_API
HdDoubleDataSourceHandle GetLayerMpu();
OMNIMETRICSASSEMBLER_API
HdDoubleDataSourceHandle GetStageMpu();
OMNIMETRICSASSEMBLER_API
static HdContainerDataSourceHandle
BuildRetained(
const HdDoubleDataSourceHandle& layerMpu,
const HdDoubleDataSourceHandle& stageMpu);
class Builder
{
public:
OMNIMETRICSASSEMBLER_API
Builder& SetLayerMpu(const HdDoubleDataSourceHandle& layerMpu);
OMNIMETRICSASSEMBLER_API
Builder& SetStageMpu(const HdDoubleDataSourceHandle& stageMpu);
OMNIMETRICSASSEMBLER_API
HdContainerDataSourceHandle Build();
private:
HdDoubleDataSourceHandle _layerMpu;
HdDoubleDataSourceHandle _stageMpu;
};
OMNIMETRICSASSEMBLER_API
static HdOmniMetricsSchema GetFromParent(
const HdContainerDataSourceHandle& fromParentContainer);
OMNIMETRICSASSEMBLER_API
static const HdDataSourceLocator& GetDefaultLocator();
};
PXR_NAMESPACE_CLOSE_SCOPE
#endif // end OMNI_METRICS_SCHEMA_H_ | 2,292 | C | 28.025316 | 79 | 0.692845 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniMetricsAssembler/metricsAdapter.cpp | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <pxr/usd/usd/stage.h>
#include <pxr/usd/usdGeom/xformable.h>
#include <pxr/usd/usdGeom/sphere.h>
#include <pxr/imaging/hd/retainedDataSource.h>
#include <pxr/imaging/hd/overlayContainerDataSource.h>
#include <pxr/imaging/hd/xformSchema.h>
#include "metricsAdapter.h"
#include "metricsDoubleDataSource.h"
#include "metricsSchema.h"
PXR_NAMESPACE_OPEN_SCOPE
TF_REGISTRY_FUNCTION(TfType)
{
typedef OmniMetricsAssemblerAdapter Adapter;
TfType t = TfType::Define<Adapter, TfType::Bases<Adapter::BaseAdapter> >();
t.SetFactory<UsdImagingAPISchemaAdapterFactory<Adapter> >();
}
OmniMetricsAssemblerAdapter::~OmniMetricsAssemblerAdapter()
{
}
HdContainerDataSourceHandle OmniMetricsAssemblerAdapter::GetImagingSubprimData(
const UsdPrim& prim,
const TfToken& subprim,
const TfToken& appliedInstanceName,
const UsdImagingDataSourceStageGlobals& stageGlobals)
{
if (prim.IsA<UsdGeomSphere>())
{
double stageMpu = 0.0;
UsdStageRefPtr stage = prim.GetStage();
if (!this->_GetMpuFromLayer(stage->GetRootLayer(), stageMpu))
{
// no explicitly authored MPU, so assume the documented
// default value of centimeters
// Open Issue: interesting case is when it isn't defined on
// another layer - should we assume documented default of cm
// or assume this means we should use the stage MPU?
stageMpu = 0.01;
}
// this PoC only looks at Spheres as a simplification of a much more general problem
// in this case, an MPU divergence is defined as layer.MPU != stage.MPU for the layer
// containing the site of the strongest opinion of the `radius` property of the sphere
UsdGeomSphere sphere = UsdGeomSphere(prim);
UsdAttribute radiusAttr = sphere.GetRadiusAttr();
// GetPropertyStack will give us the property specs for the attribute
// in strongest to weakest order
SdfPropertySpecHandleVector propertySpecs = radiusAttr.GetPropertyStack(UsdTimeCode::Default());
if (propertySpecs.size() != 0)
{
// only need to process if there are any property specs for the attribute
// and we only want the strongest
// Open Issue: may need to take into account whether the property is blocked
// which would indicate that it has no authored value
SdfPropertySpecHandle strongestSpec = propertySpecs[0];
SdfLayerHandle targetLayer = strongestSpec->GetLayer();
double layerMpu = 0.0;
if (!this->_GetMpuFromLayer(targetLayer, layerMpu))
{
// no explicitly authored layerMpu, so assume
// it's in the same MPU as the stage
return nullptr;
}
// are the layer MPU and stage MPU different? if so, we have a metrics divergence
if (layerMpu != stageMpu)
{
// there is a divergence, we record this information
// in a hydra data source and send that data source back
HdDataSourceBaseHandle metricsDataSource = HdOmniMetricsSchema::Builder()
.SetLayerMpu(HdOmniMetricsDoubleDataSource::New(layerMpu))
.SetStageMpu(HdOmniMetricsDoubleDataSource::New(stageMpu))
.Build();
return HdRetainedContainerDataSource::New(
HdOmniMetricsSchemaTokens->metrics,
metricsDataSource);
}
}
else
{
// in this case, there are no authored values for the property spec
// this one is semantically tricky, because we rely on a (potential)
// fallback value from the schema - but we have no layer target on which
// this is technically assigned. As such, we assume tha the fallback
// value is defined on the root layer itself.
TF_STATUS("No property specs in the property stack for the radius attribute!");
}
}
return nullptr;
}
HdDataSourceLocatorSet OmniMetricsAssemblerAdapter::InvalidateImagingSubprim(
const UsdPrim& prim,
const TfToken& subprim,
const TfToken& appliedInstanceName,
const TfTokenVector& properties)
{
if (prim.IsA<UsdGeomSphere>())
{
// invalidate the prim by invalidating its xform
static const HdDataSourceLocatorSet locators {
HdXformSchema::GetDefaultLocator()
};
return locators;
}
return HdDataSourceLocatorSet();
}
bool OmniMetricsAssemblerAdapter::_GetMpuFromLayer(const SdfLayerHandle& layer, double& mpu)
{
SdfDataRefPtr metadata = layer->GetMetadata();
VtValue mpuValue;
if (metadata->Has(SdfPath::AbsoluteRootPath(), UsdGeomTokens->metersPerUnit, &mpuValue))
{
mpu = mpuValue.Get<double>();
}
else
{
TF_WARN("Unable to retrieve MPU metadata from layer!");
return false;
}
return true;
}
PXR_NAMESPACE_CLOSE_SCOPE | 5,659 | C++ | 36.986577 | 104 | 0.661778 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniMetricsAssembler/metricsSceneIndex.h | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef OMNI_METRICS_SCENE_INDEX_H_
#define OMNI_METRICS_SCENE_INDEX_H_
#include <pxr/pxr.h>
#include <pxr/usd/sdf/pathTable.h>
#include <pxr/imaging/hd/filteringSceneIndex.h>
#include "api.h"
PXR_NAMESPACE_OPEN_SCOPE
TF_DECLARE_REF_PTRS(OmniMetricsSceneIndex);
///
/// \class OmniMetricsSceneIndex
///
/// A scene index responsible for observing an input flattened scene
/// index and producing a comparable scene in which metrics correctives
/// have been added to the appropriate places in the scene hiearchy
/// to correct for metrics divergences.
///
/// Note that with Render Delegate 2.0 and the ability to pull data
/// from a non-flattened scene, this implementation will have to be
/// revisited to work with the unflattened xform representation of
/// the hydra prims.
///
class OmniMetricsSceneIndex : public HdSingleInputFilteringSceneIndexBase
{
public:
OMNIMETRICSASSEMBLER_API
static OmniMetricsSceneIndexRefPtr New(const HdSceneIndexBaseRefPtr& inputSceneIndex,
const HdContainerDataSourceHandle& inputArgs = nullptr);
OMNIMETRICSASSEMBLER_API
~OmniMetricsSceneIndex() override;
OMNIMETRICSASSEMBLER_API
HdSceneIndexPrim GetPrim(const SdfPath& primPath) const override;
OMNIMETRICSASSEMBLER_API
SdfPathVector GetChildPrimPaths(const SdfPath& primPath) const override;
protected:
OmniMetricsSceneIndex(const HdSceneIndexBaseRefPtr& inputSceneIndex,
const HdContainerDataSourceHandle& inputArgs);
// these three are provided by HdSingleInputFilteringSceneIndexBase
// and must be overridden by inheritors
virtual void _PrimsAdded(const HdSceneIndexBase& sender,
const HdSceneIndexObserver::AddedPrimEntries& entries) override;
virtual void _PrimsRemoved(const HdSceneIndexBase& sender,
const HdSceneIndexObserver::RemovedPrimEntries& entries) override;
virtual void _PrimsDirtied(const HdSceneIndexBase& sender,
const HdSceneIndexObserver::DirtiedPrimEntries& entries) override;
private:
void _DirtyHierarchy(const SdfPath& primPath, const HdDataSourceLocatorSet& locators, HdSceneIndexObserver::DirtiedPrimEntries* dirtyEntries);
void _WrapPrimsRecursively(const SdfPath& primPath);
private:
// wraps all prims in the scene with a metrics data source
SdfPathTable<HdSceneIndexPrim> _wrappedPrims;
};
PXR_NAMESPACE_CLOSE_SCOPE
#endif | 2,972 | C | 33.97647 | 146 | 0.773217 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniMetricsAssembler/metricsAdapter.h | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef OMNI_METRICS_ASSEMBLER_ADAPTER_H_
#define OMNI_METRICS_ASSEMBLER_ADAPTER_H_
#include <pxr/pxr.h>
#include <pxr/usdImaging/usdImaging/apiSchemaAdapter.h>
#include "api.h"
PXR_NAMESPACE_OPEN_SCOPE
///
/// \class OmniMetricsAssemblerAdapter
///
/// Stage scene index adapter which has the opportunity to evaluate each
/// prim in a scene to determine if the prim has divergent metrics present.
///
/// We use a trick here that a null adapter, while deriving from UsdImagingAPISchemaAdapter
/// gets a call for each USD prim traversed in the scene by the stage scene index. These
/// are known as "keyless adapters" and are supported by the UsdImagingAdapterRegistry.
///
class OmniMetricsAssemblerAdapter : public UsdImagingAPISchemaAdapter
{
public:
OMNIMETRICSASSEMBLER_API
~OmniMetricsAssemblerAdapter() override;
using BaseAdapter = UsdImagingAPISchemaAdapter;
OMNIMETRICSASSEMBLER_API
HdContainerDataSourceHandle GetImagingSubprimData(
const UsdPrim& prim,
const TfToken& subprim,
const TfToken& appliedInstanceName,
const UsdImagingDataSourceStageGlobals& stageGlobals
) override;
OMNIMETRICSASSEMBLER_API
HdDataSourceLocatorSet InvalidateImagingSubprim(
const UsdPrim& prim,
const TfToken& subprim,
const TfToken& appliedInstanceName,
const TfTokenVector& properties
) override;
private:
///
/// Retrieves the MPU value from the layer and returns it in mpu.
///
/// Returns true if the MPU value was able to be retrieved from the layer
/// and false otherwise.
///
bool _GetMpuFromLayer(const SdfLayerHandle& layer, double& mpu);
};
PXR_NAMESPACE_CLOSE_SCOPE
#endif // OMNI_METRICS_ASSEMBLER_ADAPTER_H_ | 2,348 | C | 31.178082 | 91 | 0.739779 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniMetricsAssembler/metricsDataSource.cpp | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <pxr/imaging/hd/xformSchema.h>
#include "metricsDataSource.h"
#include "metricsSchema.h"
PXR_NAMESPACE_OPEN_SCOPE
TF_DEFINE_PUBLIC_TOKENS(HdOmniMetricsDataSourceTokens,
HDOMNIMETRICSDATASOURCE_TOKENS);
HdOmniMetricsDataSource::HdOmniMetricsDataSource(const HdSceneIndexBase& index, const SdfPath& primPath,
HdContainerDataSourceHandle wrappedDataSource) :
_sceneIndex(index),
_primPath(primPath),
_wrappedDataSource(wrappedDataSource)
{
}
void HdOmniMetricsDataSource::UpdateWrappedDataSource(
HdContainerDataSourceHandle wrappedDataSource)
{
_wrappedDataSource = wrappedDataSource;
}
TfTokenVector HdOmniMetricsDataSource::GetNames()
{
// this will return everything supported by the wrapped
// data source - in some cases (xform) we will
// intercept, but most will be pass through
// we keep access to the underlying wrapped xform
// via a new token added by this datasource (this is required
// for computations involving a parent that has already
// been corrected)
TfTokenVector result = (_wrappedDataSource == nullptr) ? TfTokenVector() : _wrappedDataSource->GetNames();
result.push_back(HdOmniMetricsDataSourceTokens->metricsPreservedXform);
return result;
}
HdDataSourceBaseHandle HdOmniMetricsDataSource::Get(const TfToken& name)
{
if (name == HdXformSchemaTokens->xform)
{
// this is an intercept of the flattened transform matrix
// we need to (potentially) compute a metrics-corrected
// flattened transform matrix
return this->_ComputeCorrectedXform();
}
else if (name == HdOmniMetricsDataSourceTokens->metricsPreservedXform)
{
// this would be the original flattened matrix of the wrapped data source
if (_wrappedDataSource != nullptr)
{
return _wrappedDataSource->Get(HdXformSchemaTokens->xform);
}
}
// all other token values should be defer to the wrapped data source (if any)
if (_wrappedDataSource != nullptr)
{
return _wrappedDataSource->Get(name);
}
return nullptr;
}
bool HdOmniMetricsDataSource::IsPrimDirtied(const HdDataSourceLocatorSet& locators)
{
static const HdContainerDataSourceHandle containerNull(nullptr);
if (locators.Intersects(HdXformSchema::GetDefaultLocator()))
{
if (HdContainerDataSource::AtomicLoad(_computedCorrectedXformDataSource) != nullptr)
{
HdContainerDataSource::AtomicStore(_computedCorrectedXformDataSource, containerNull);
return true;
}
}
return false;
}
HdDataSourceBaseHandle HdOmniMetricsDataSource::_ComputeCorrectedXform()
{
// there are two cases to consider on the underlying wrapped data source:
// 1. The wrapped data source has metrics information.
// This means that the adapter determined there was a metrics
// divergence in the layers for the stage and the strongest
// opinionated layer for the xformOpOrder attribute. In this case
// it means that we have to correct the divergence directly by
// computing a new flattened local transform for the hydra prim
// 2. The wrapped data source does not have metrics information.
// This means that either the underlying prim has no Xformable data
// at all or that there was no metrics divergence detected.
// However, it could be the child of a divergent prim, and since
// all xforms have been flattened by the flattening scene index
// prior to us wrapping the data, we need to compute a new flattened
// matrix that takes into account the changes on the parent.
//
// the tricky thing is the dirtying associated with the cached data -
// computing whether a prim with divergence changed directly is easy
// but that change also invalidates the children (recusrively)
// if we have already cached the value, and the cache is valid
// return the computed cached value rather than recompute it
HdContainerDataSourceHandle computedCorrectedXformDataSource =
HdContainerDataSource::AtomicLoad(_computedCorrectedXformDataSource);
if (computedCorrectedXformDataSource != nullptr)
{
return computedCorrectedXformDataSource;
}
if (this->_HasMetricsInformation(_wrappedDataSource))
{
// in this case, we need the parent's flattened transform to recover
// the original local transform of the prim, once we have the original
// local transform we can apply the corrective as the last xformOp
// then reflatten by multiplying the parent transform again
SdfPath parentPath = _primPath.GetParentPath();
HdSceneIndexPrim parentPrim = _sceneIndex.GetPrim(parentPath);
computedCorrectedXformDataSource = HdXformSchema::Builder()
.SetMatrix(HdOmniMetricsDataSource::_MetricsCorrectedMatrixDataSource::New(
_wrappedDataSource, parentPrim.dataSource, true))
.SetResetXformStack(this->_GetInputResetXformStackSource())
.Build();
}
else
{
HdContainerDataSourceHandle metricsDataSource = nullptr;
if (_primPath == SdfPath::AbsoluteRootPath())
{
// just directly get whatever the absolute root path has
computedCorrectedXformDataSource = HdContainerDataSource::Cast(_wrappedDataSource->Get(HdXformSchemaTokens->xform));
}
else
{
for(SdfPath p = _primPath.GetParentPath(); p != SdfPath::AbsoluteRootPath(); p = p.GetParentPath())
{
HdSceneIndexPrim prim = _sceneIndex.GetPrim(p);
if (this->_HasMetricsInformation(prim.dataSource))
{
// a parent along the chain did have a metrics
// corrected xform, so we will need to recompute
metricsDataSource = prim.dataSource;
break;
}
}
if (metricsDataSource != nullptr)
{
// compute a new flattened xform from the parent
SdfPath parentPath = _primPath.GetParentPath();
HdSceneIndexPrim parentPrim = _sceneIndex.GetPrim(parentPath);
computedCorrectedXformDataSource = HdXformSchema::Builder()
.SetMatrix(HdOmniMetricsDataSource::_MetricsCorrectedMatrixDataSource::New(
_wrappedDataSource, parentPrim.dataSource, false))
.SetResetXformStack(this->_GetInputResetXformStackSource())
.Build();
}
else
{
// no parent in the chain had a metrics corrected xform
// so the result is really just the original flattened matrix
computedCorrectedXformDataSource = HdContainerDataSource::Cast(_wrappedDataSource->Get(HdXformSchemaTokens->xform));
}
}
}
// cache the data source we intend to use
HdContainerDataSource::AtomicStore(_computedCorrectedXformDataSource, computedCorrectedXformDataSource);
return computedCorrectedXformDataSource;
}
bool HdOmniMetricsDataSource::_HasMetricsInformation(HdContainerDataSourceHandle handle)
{
HdOmniMetricsSchema metricsSchema = HdOmniMetricsSchema::GetFromParent(handle);
return metricsSchema.IsDefined();
}
HdBoolDataSourceHandle HdOmniMetricsDataSource::_GetInputResetXformStackSource()
{
if (_wrappedDataSource != nullptr)
{
return HdBoolDataSource::Cast(
_wrappedDataSource->Get(HdXformSchemaTokens->resetXformStack)
);
}
return nullptr;
}
HdOmniMetricsDataSource::_MetricsCorrectedMatrixDataSource::_MetricsCorrectedMatrixDataSource(
HdContainerDataSourceHandle inputDataSource,
HdContainerDataSourceHandle parentDataSource,
bool isMetricsCorrectiveSource) :
_inputDataSource(inputDataSource),
_parentDataSource(parentDataSource),
_isMetricsCorrectiveSource(isMetricsCorrectiveSource)
{
}
VtValue HdOmniMetricsDataSource::_MetricsCorrectedMatrixDataSource::GetValue(Time shutterOffset)
{
return VtValue(this->GetTypedValue(shutterOffset));
}
GfMatrix4d HdOmniMetricsDataSource::_MetricsCorrectedMatrixDataSource::GetTypedValue(Time shutterOffset)
{
return this->_ComputeCorrectedMatrix(shutterOffset);
}
bool HdOmniMetricsDataSource::_MetricsCorrectedMatrixDataSource::GetContributingSampleTimesForInterval(
Time startTime,
Time endTime,
std::vector<Time>* outSampleTimes)
{
HdSampledDataSourceHandle sources[] = {
this->_GetInputMatrixDataSource(),
this->_GetParentMatrixDataSource()
};
return HdGetMergedContributingSampleTimesForInterval(
TfArraySize(sources),
sources,
startTime,
endTime,
outSampleTimes
);
}
HdMatrixDataSourceHandle HdOmniMetricsDataSource::_MetricsCorrectedMatrixDataSource::_GetParentMetricsPreservedMatrixDataSource() const
{
HdOmniMetricsDataSourceHandle metricsDataSource = HdOmniMetricsDataSource::Cast(_parentDataSource);
if (metricsDataSource != nullptr)
{
HdContainerDataSourceHandle xformDataSource =
HdContainerDataSource::Cast(
metricsDataSource->Get(HdOmniMetricsDataSourceTokens->metricsPreservedXform));
if (xformDataSource == nullptr)
{
return this->_GetParentMatrixDataSource();
}
HdMatrixDataSourceHandle matrixDataSource = HdMatrixDataSource::Cast(
xformDataSource->Get(HdXformSchemaTokens->matrix));
if (matrixDataSource == nullptr)
{
TF_WARN("Xform schema not defined on preserved container data source!");
}
return (matrixDataSource != nullptr) ? matrixDataSource : this->_GetParentMatrixDataSource();
}
// if it didn't have metrics information attached
// just get the original matrix
return this->_GetParentMatrixDataSource();
}
HdMatrixDataSourceHandle HdOmniMetricsDataSource::_MetricsCorrectedMatrixDataSource::_GetParentMatrixDataSource() const
{
HdXformSchema xformSchema = HdXformSchema::GetFromParent(_parentDataSource);
if (xformSchema.IsDefined())
{
return xformSchema.GetMatrix();
}
return nullptr;
}
HdMatrixDataSourceHandle HdOmniMetricsDataSource::_MetricsCorrectedMatrixDataSource::_GetInputMatrixDataSource() const
{
return HdXformSchema::GetFromParent(_inputDataSource).GetMatrix();
}
GfMatrix4d HdOmniMetricsDataSource::_MetricsCorrectedMatrixDataSource::_ComputeCorrectedMatrix(Time shutterOffset)
{
// since we are dealing with flattened transformations, we have to recover
// the local transform of the input data source in question
// we can do this by knowing the prim's flattened transform
// and the original transform of its parent (the _parentDataSource)
// Let FT be the flattened transform, P be the transform of the parent,
// and LT be the child's local transform. The flattened transform would
// then have been computed as FT = (P)(LT), thus to recover LT we divide
// out by P, which results in LT = (FT) / (P) = FT * (P)^-1
// so we need the inverse of the original parent transform
HdMatrixDataSourceHandle parentPreservedMatrixDataSource = this->_GetParentMetricsPreservedMatrixDataSource();
HdMatrixDataSourceHandle parentMatrixDataSource = this->_GetParentMatrixDataSource();
HdMatrixDataSourceHandle inputMatrixDataSource = this->_GetInputMatrixDataSource();
GfMatrix4d parentMatrix = (parentPreservedMatrixDataSource != nullptr) ?
parentPreservedMatrixDataSource->GetTypedValue(shutterOffset) :
GfMatrix4d(1.0);
GfMatrix4d currentFlattenedTransform = inputMatrixDataSource->GetTypedValue(shutterOffset);
GfMatrix4d inverseParentMatrix = parentMatrix.GetInverse();
GfMatrix4d originalLocalTransform = currentFlattenedTransform * inverseParentMatrix;
// do we need to apply a corrective?
if (_isMetricsCorrectiveSource)
{
// this is a computation requiring a new metrics corrected local
// transform computed from the original data rather than the
// flattened transform already there
GfMatrix4d mpuCorrective = this->_GetMpuCorrective();
GfMatrix4d correctedTransform = originalLocalTransform * mpuCorrective;
// now apply the parent transform to get the new flattened child transform
GfMatrix4d parentMatrix = (parentMatrixDataSource != nullptr) ?
parentMatrixDataSource->GetTypedValue(shutterOffset) :
GfMatrix4d(1.0);
return parentMatrix * correctedTransform;
}
else
{
// no local corrective necessary, just reconcatenate with the new parent
// transform to form the final new flattened child
GfMatrix4d parentUpdatedMatrix = (parentMatrixDataSource != nullptr) ?
parentMatrixDataSource->GetTypedValue(shutterOffset) :
GfMatrix4d(1.0);
return parentUpdatedMatrix * originalLocalTransform;
}
}
GfMatrix4d HdOmniMetricsDataSource::_MetricsCorrectedMatrixDataSource::_GetMpuCorrective()
{
// retrieve the layer and stage MPU values from the wrapped prim
HdOmniMetricsSchema metricsSchema = HdOmniMetricsSchema::GetFromParent(_inputDataSource);
if (!metricsSchema.IsDefined())
{
TF_WARN("MPU divergency was detected but data source has no metrics information!");
return GfMatrix4d(1.0);
}
double mpuCorrectiveValue = metricsSchema.GetLayerMpu()->GetTypedValue(0.0) /
metricsSchema.GetStageMpu()->GetTypedValue(0.0);
GfMatrix4d uniformScaleTransform(1.0);
uniformScaleTransform.SetScale(mpuCorrectiveValue);
return uniformScaleTransform;
}
PXR_NAMESPACE_CLOSE_SCOPE | 14,468 | C++ | 38.859504 | 135 | 0.709773 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniMetricsAssembler/metricsSceneIndex.cpp | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <pxr/base/work/utils.h>
#include <pxr/imaging/hd/xformSchema.h>
#include "metricsSceneIndex.h"
#include "metricsDataSource.h"
PXR_NAMESPACE_OPEN_SCOPE
OmniMetricsSceneIndexRefPtr OmniMetricsSceneIndex::New(
const HdSceneIndexBaseRefPtr& inputSceneIndex,
const HdContainerDataSourceHandle& inputArgs)
{
return TfCreateRefPtr(new OmniMetricsSceneIndex(inputSceneIndex, inputArgs));
}
OmniMetricsSceneIndex::OmniMetricsSceneIndex(const HdSceneIndexBaseRefPtr& inputSceneIndex,
const HdContainerDataSourceHandle& inputArgs) :
HdSingleInputFilteringSceneIndexBase(inputSceneIndex)
{
_WrapPrimsRecursively(SdfPath::AbsoluteRootPath());
}
OmniMetricsSceneIndex::~OmniMetricsSceneIndex() = default;
HdSceneIndexPrim OmniMetricsSceneIndex::GetPrim(const SdfPath &primPath) const
{
// if we have the prim wrapped, return the wrapped one
const auto it = _wrappedPrims.find(primPath);
if (it != _wrappedPrims.end())
{
return it->second;
}
// there shouldn't be a scenario where the prim isn't wrapped
// but in case there is, we return whatever the base scene index
// gives back
return this->_GetInputSceneIndex()->GetPrim(primPath);
}
SdfPathVector OmniMetricsSceneIndex::GetChildPrimPaths(const SdfPath& primPath) const
{
// no change in topology occurs as part of this scene index
// so we can ask the input scene to get the child prim paths directly
return this->_GetInputSceneIndex()->GetChildPrimPaths(primPath);
}
void OmniMetricsSceneIndex::_PrimsAdded(const HdSceneIndexBase& sender,
const HdSceneIndexObserver::AddedPrimEntries& entries)
{
HdSceneIndexObserver::DirtiedPrimEntries dirtyEntries;
for(const HdSceneIndexObserver::AddedPrimEntry& entry : entries)
{
HdSceneIndexPrim sceneIndexPrim = this->_GetInputSceneIndex()->GetPrim(entry.primPath);
HdContainerDataSourceHandle dataSource = sceneIndexPrim.dataSource;
// attempt to insert a wrapped version for this prim
auto it = _wrappedPrims.insert(
{
entry.primPath,
HdSceneIndexPrim()
});
// get a reference to the inserted prim
// this will be the existing one if insertion failed
HdSceneIndexPrim &prim = it.first->second;
prim.primType = entry.primType;
// if the wrapper does exist, we have to update the data source
if (prim.dataSource != nullptr)
{
HdOmniMetricsDataSource::Cast(prim.dataSource)->UpdateWrappedDataSource(dataSource);
}
else
{
// new insertion, so it wasn't wrapped previously
// wrap the data source here
prim.dataSource = HdOmniMetricsDataSource::New(*this, entry.primPath, dataSource);
}
// if this was a new insertion in the middle of the hieararchy
// we need to invalidate descendent flattened attributes
if (!it.second)
{
// Open Issue: we don't handle this here, because it's just a PoC
// looking at spheres, but in general, we would need to build a set
// containing the locators we are interested in (at minimum this would
// be the transform of the prim itself, HdXformSchemaTokens->xform)
// and make sure the entire prim hierarchy is dirtied if the data source
// associated is dirtied based on that locator
// since this likely requires a plug-in system to solve metrics assembly
// generically, we defer this to a more general solution
}
}
// forward on the notification
this->_SendPrimsAdded(entries);
// also, if we had to dirty entries because of an insertion in the middle
// of the stage hierarchy, send those along too
if (!dirtyEntries.empty())
{
this->_SendPrimsDirtied(dirtyEntries);
}
}
void OmniMetricsSceneIndex::_PrimsRemoved(const HdSceneIndexBase& sender,
const HdSceneIndexObserver::RemovedPrimEntries& entries)
{
for (const HdSceneIndexObserver::RemovedPrimEntry& entry : entries)
{
if (entry.primPath.IsAbsoluteRootPath())
{
// removing the whole scene
_wrappedPrims.ClearInParallel();
TfReset(_wrappedPrims);
}
else
{
auto startEndRangeIterator = _wrappedPrims.FindSubtreeRange(entry.primPath);
for (auto it = startEndRangeIterator.first; it != startEndRangeIterator.second; it++)
{
WorkSwapDestroyAsync(it->second.dataSource);
}
if(startEndRangeIterator.first != startEndRangeIterator.second)
{
_wrappedPrims.erase(startEndRangeIterator.first);
}
}
}
_SendPrimsRemoved(entries);
}
void OmniMetricsSceneIndex::_PrimsDirtied(const HdSceneIndexBase& sender,
const HdSceneIndexObserver::DirtiedPrimEntries& entries)
{
HdSceneIndexObserver::DirtiedPrimEntries dirtyEntries;
for (const HdSceneIndexObserver::DirtiedPrimEntry& entry : entries)
{
HdDataSourceLocatorSet locators;
if (entry.dirtyLocators.Intersects(HdXformSchema::GetDefaultLocator()))
{
locators.insert(HdXformSchema::GetDefaultLocator());
}
// Open Issue: what about the radius locator? we would need that, but
// it depends on where our scene index resides - it may already have
// been converted by the ImplicitSceneIndex into a mesh (and it's hard
// to know where exactly our scene index will be inserted)
// we don't solve it here because a general metrics assembler wouldn't
// be considering spheres only, so we defer that to a more general solution
if (!locators.IsEmpty())
{
this->_DirtyHierarchy(entry.primPath, locators, &dirtyEntries);
}
}
_SendPrimsDirtied(entries);
if (!dirtyEntries.empty())
{
_SendPrimsDirtied(dirtyEntries);
}
}
void OmniMetricsSceneIndex::_DirtyHierarchy(const SdfPath& primPath, const HdDataSourceLocatorSet& locators,
HdSceneIndexObserver::DirtiedPrimEntries* dirtyEntries)
{
// find subtree range retrieves a start end pair of children
// in the subtree of the given prim path
auto startEndRangeIterator = _wrappedPrims.FindSubtreeRange(primPath);
for (auto it = startEndRangeIterator.first; it != startEndRangeIterator.second;)
{
HdOmniMetricsDataSourceHandle dataSource = HdOmniMetricsDataSource::Cast(it->second.dataSource);
if (dataSource != nullptr)
{
if (dataSource->IsPrimDirtied(locators))
{
if (it->first != primPath)
{
dirtyEntries->emplace_back(it->first, locators);
}
it++;
}
else
{
it = it.GetNextSubtree();
}
}
else
{
it = it++;
}
}
}
void OmniMetricsSceneIndex::_WrapPrimsRecursively(const SdfPath& primPath)
{
HdSceneIndexPrim prim = this->_GetInputSceneIndex()->GetPrim(primPath);
HdOmniMetricsDataSourceHandle wrappedDataSource = HdOmniMetricsDataSource::New(*this, primPath, prim.dataSource);
_wrappedPrims.insert(
{
primPath,
HdSceneIndexPrim
{
prim.primType,
std::move(wrappedDataSource)
}
}
);
for (const SdfPath& childPath : this->_GetInputSceneIndex()->GetChildPrimPaths(primPath))
{
this->_WrapPrimsRecursively(childPath);
}
}
PXR_NAMESPACE_CLOSE_SCOPE | 8,306 | C++ | 34.5 | 117 | 0.663255 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniWarpSceneIndex/warpPythonModule.h | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef OMNI_WARP_SCENE_INDEX_WARP_PYTHON_MODULE_H
#define OMNI_WARP_SCENE_INDEX_WARP_PYTHON_MODULE_H
#include <string>
#include <pxr/pxr.h>
#include <pxr/base/tf/declarePtrs.h>
#include <pxr/base/vt/value.h>
#include <pxr/imaging/hd/meshSchema.h>
#include <pxr/usdImaging/usdImaging/stageSceneIndex.h>
#include "api.h"
PXR_NAMESPACE_OPEN_SCOPE
TF_DECLARE_REF_PTRS(OmniWarpPythonModule);
///
/// \class OmniWarpPythonModule
///
///
///
///
///
class OmniWarpPythonModule
{
public:
OmniWarpPythonModule(const SdfPath &primPath, const std::string& moduleName,
UsdImagingStageSceneIndexConstRefPtr usdImagingSi);
~OmniWarpPythonModule();
void InitMesh(VtIntArray indices, VtVec3fArray vertices,
VtIntArray depIndices, VtVec3fArray depVertices, VtDictionary simParams);
void InitParticles(VtVec3fArray positions,
VtIntArray depIndices, VtVec3fArray depVertices, VtDictionary simParams);
VtVec3fArray ExecSim(VtDictionary simParams);
VtVec3fArray ExecSim(VtDictionary simParams, VtVec3fArray dependentVertices);
private:
std::string _moduleName;
SdfPath _primPath;
UsdImagingStageSceneIndexConstRefPtr _usdImagingSi;
};
using OmniWarpPythonModuleSharedPtr = std::shared_ptr<class OmniWarpPythonModule>;
PXR_NAMESPACE_CLOSE_SCOPE
#endif // OMNI_WARP_SCENE_INDEX_WARP_PYTHON_MODULE_H | 1,956 | C | 29.107692 | 82 | 0.75818 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniWarpSceneIndex/warpSceneIndexPlugin.cpp | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include <pxr/imaging/hd/sceneIndexPluginRegistry.h>
#include <pxr/imaging/hio/glslfx.h>
#include "warpSceneIndexPlugin.h"
#include "warpSceneIndex.h"
PXR_NAMESPACE_OPEN_SCOPE
TF_DEFINE_PRIVATE_TOKENS(
_tokens,
((sceneIndexPluginName, "Omni_WarpSceneIndexPlugin")));
static const char* const _pluginDisplayName = "GL";
TF_REGISTRY_FUNCTION(TfType)
{
HdSceneIndexPluginRegistry::Define<
Omni_WarpSceneIndexPlugin>();
}
TF_REGISTRY_FUNCTION(HdSceneIndexPlugin)
{
const HdSceneIndexPluginRegistry::InsertionPhase insertionPhase = 0;
HdSceneIndexPluginRegistry::GetInstance().RegisterSceneIndexForRenderer(
_pluginDisplayName, _tokens->sceneIndexPluginName, nullptr,
insertionPhase, HdSceneIndexPluginRegistry::InsertionOrderAtStart);
}
Omni_WarpSceneIndexPlugin::
Omni_WarpSceneIndexPlugin() = default;
Omni_WarpSceneIndexPlugin::
~Omni_WarpSceneIndexPlugin() = default;
HdSceneIndexBaseRefPtr
Omni_WarpSceneIndexPlugin::_AppendSceneIndex(
const HdSceneIndexBaseRefPtr& inputSceneIndex,
const HdContainerDataSourceHandle& inputArgs)
{
TF_UNUSED(inputArgs);
return OmniWarpSceneIndex::New(
inputSceneIndex);
}
PXR_NAMESPACE_CLOSE_SCOPE
| 1,807 | C++ | 28.16129 | 76 | 0.767571 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniWarpSceneIndex/warpComputationSchema.h | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef OMNI_WARP_SCENE_INDEX_WARP_COMPUTATION_SCHEMA_H
#define OMNI_WARP_SCENE_INDEX_WARP_COMPUTATION_SCHEMA_H
#include <pxr/imaging/hd/schema.h>
#include "api.h"
PXR_NAMESPACE_OPEN_SCOPE
//-----------------------------------------------------------------------------
#define OMNIWARPCOMPUTATION_SCHEMA_TOKENS \
(warpComputation) \
(sourceFile) \
(dependentPrims) \
(simulationParams) \
TF_DECLARE_PUBLIC_TOKENS(OmniWarpComputationSchemaTokens, OMNIWARPSCENEINDEX_API,
OMNIWARPCOMPUTATION_SCHEMA_TOKENS);
//-----------------------------------------------------------------------------
class OmniWarpComputationSchema : public HdSchema
{
public:
OmniWarpComputationSchema(HdContainerDataSourceHandle container)
: HdSchema(container) {}
//ACCESSORS
OMNIWARPSCENEINDEX_API
HdStringDataSourceHandle GetSourceFile();
OMNIWARPSCENEINDEX_API
HdPathArrayDataSourceHandle GetDependentPrims();
OMNIWARPSCENEINDEX_API
HdSampledDataSourceHandle GetSimulationParams();
// RETRIEVING AND CONSTRUCTING
/// Builds a container data source which includes the provided child data
/// sources. Parameters with nullptr values are excluded. This is a
/// low-level interface. For cases in which it's desired to define
/// the container with a sparse set of child fields, the Builder class
/// is often more convenient and readable.
OMNIWARPSCENEINDEX_API
static HdContainerDataSourceHandle
BuildRetained(
const HdStringDataSourceHandle &sourceFile,
const HdPathArrayDataSourceHandle &dependentPrims,
const HdSampledDataSourceHandle &simulationParams
);
/// \class OmniWarpComputationSchema::Builder
///
/// Utility class for setting sparse sets of child data source fields to be
/// filled as arguments into BuildRetained. Because all setter methods
/// return a reference to the instance, this can be used in the "builder
/// pattern" form.
class Builder
{
public:
OMNIWARPSCENEINDEX_API
Builder &SetSourceFile(
const HdStringDataSourceHandle &sourceFile);
OMNIWARPSCENEINDEX_API
Builder &SetDependentPrims(
const HdPathArrayDataSourceHandle &dependentPrims);
Builder &SetSimulationParams(
const HdSampledDataSourceHandle &simulationParams);
/// Returns a container data source containing the members set thus far.
OMNIWARPSCENEINDEX_API
HdContainerDataSourceHandle Build();
private:
HdStringDataSourceHandle _sourceFile;
HdPathArrayDataSourceHandle _dependentPrims;
HdSampledDataSourceHandle _simulationParams;
};
/// Retrieves a container data source with the schema's default name token
/// "warpComputation" from the parent container and constructs a
/// OmniWarpComputationSchema instance.
/// Because the requested container data source may not exist, the result
/// should be checked with IsDefined() or a bool comparison before use.
OMNIWARPSCENEINDEX_API
static OmniWarpComputationSchema GetFromParent(
const HdContainerDataSourceHandle &fromParentContainer);
/// Returns a token where the container representing this schema is found in
/// a container by default.
OMNIWARPSCENEINDEX_API
static const TfToken &GetSchemaToken();
/// Returns an HdDataSourceLocator (relative to the prim-level data source)
/// where the container representing this schema is found by default.
OMNIWARPSCENEINDEX_API
static const HdDataSourceLocator &GetDefaultLocator();
/// Returns an HdDataSourceLocator (relative to the prim-level data source)
/// where the source file can be found.
/// This is often useful for checking intersection against the
/// HdDataSourceLocatorSet sent with HdDataSourceObserver::PrimsDirtied.
OMNIWARPSCENEINDEX_API
static const HdDataSourceLocator &GetSourceFileLocator();
/// Returns an HdDataSourceLocator (relative to the prim-level data source)
/// where the dependent prims.
/// This is often useful for checking intersection against the
/// HdDataSourceLocatorSet sent with HdDataSourceObserver::PrimsDirtied.
OMNIWARPSCENEINDEX_API
static const HdDataSourceLocator &GetDependentPrimsLocator();
/// Returns an HdDataSourceLocator (relative to the prim-level data source)
/// where the simulation params can be found.
/// This is often useful for checking intersection against the
/// HdDataSourceLocatorSet sent with HdDataSourceObserver::PrimsDirtied.
OMNIWARPSCENEINDEX_API
static const HdDataSourceLocator &GetSimulationParamsLocator();
};
PXR_NAMESPACE_CLOSE_SCOPE
#endif | 5,329 | C | 36.013889 | 81 | 0.719835 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniWarpSceneIndex/warpComputationAPIAdapter.h | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef OMNI_WARP_SCENE_INDEX_WARP_COMPUTATION_API_ADAPTER_H
#define OMNI_WARP_SCENE_INDEX_WARP_COMPUTATION_API_ADAPTER_H
#include <pxr/usdImaging/usdImaging/apiSchemaAdapter.h>
#include "api.h"
PXR_NAMESPACE_OPEN_SCOPE
class WarpComputationAPIAdapter : public UsdImagingAPISchemaAdapter
{
public:
using BaseAdapter = UsdImagingAPISchemaAdapter;
OMNIWARPSCENEINDEX_API
HdContainerDataSourceHandle GetImagingSubprimData(
UsdPrim const& prim,
TfToken const& subprim,
TfToken const& appliedInstanceName,
const UsdImagingDataSourceStageGlobals &stageGlobals) override;
#if PXR_VERSION < 2308
OMNIWARPSCENEINDEX_API
HdDataSourceLocatorSet InvalidateImagingSubprim(
UsdPrim const& prim,
TfToken const& subprim,
TfToken const& appliedInstanceName,
TfTokenVector const& properties) override;
#else
OMNIWARPSCENEINDEX_API
HdDataSourceLocatorSet InvalidateImagingSubprim(
UsdPrim const& prim,
TfToken const& subprim,
TfToken const& appliedInstanceName,
TfTokenVector const& properties,
UsdImagingPropertyInvalidationType invalidationType) override;
#endif
};
PXR_NAMESPACE_CLOSE_SCOPE
#endif // OMNI_WARP_SCENE_INDEX_WARP_COMPUTATION_API_ADAPTER_H
| 1,927 | C | 31.677966 | 75 | 0.732226 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniWarpSceneIndex/warpSceneIndex.cpp | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include <string>
#include <pxr/base/tf/pyInvoke.h>
#include <pxr/base/tf/errorMark.h>
#include <pxr/base/tf/pyExceptionState.h>
#include <pxr/base/tf/pyInterpreter.h>
#include <pxr/imaging/hd/primvarSchema.h>
#include <pxr/imaging/hd/primvarsSchema.h>
#include <pxr/imaging/hd/retainedDataSource.h>
#include <pxr/imaging/hd/tokens.h>
#include <pxr/imaging/hd/meshSchema.h>
#include "pxr/imaging/hd/instancerTopologySchema.h"
#include "warpSceneIndex.h"
#include "tokens.h"
#ifdef PXR_PYTHON_SUPPORT_ENABLED
#include <pxr/base/tf/pyInterpreter.h>
#endif // PXR_PYTHON_SUPPORT_ENABLED
PXR_NAMESPACE_OPEN_SCOPE
static VtDictionary
GetSimulationParams(HdContainerDataSourceHandle ds)
{
VtDictionary vtSimParams;
auto warpContainer = HdContainerDataSource::Cast(ds->Get(OmniWarpComputationSchemaTokens->warpComputation));
if (warpContainer)
{
TfTokenVector names = warpContainer->GetNames();
if (std::find(names.begin(), names.end(), OmniWarpComputationSchemaTokens->simulationParams) != names.end())
{
OmniWarpComputationSchema warpSchema = OmniWarpComputationSchema::GetFromParent(ds);
if (warpSchema)
{
VtValue metaData = warpSchema.GetSimulationParams()->GetValue(0);
if (metaData.IsHolding<VtDictionary>())
{
vtSimParams = metaData.UncheckedGet<VtDictionary>();
}
}
}
}
return vtSimParams;
}
static UsdImagingStageSceneIndexRefPtr
FindUsdImagingSceneIndex(const std::vector<HdSceneIndexBaseRefPtr>& inputScenes)
{
TfRefPtr<UsdImagingStageSceneIndex> retVal;
for (size_t i = 0; i < inputScenes.size(); i++)
{
HdSceneIndexBaseRefPtr const &sceneIdx = inputScenes[i];
if (UsdImagingStageSceneIndexRefPtr const imagingSI = TfDynamic_cast<UsdImagingStageSceneIndexRefPtr>(sceneIdx))
{
retVal = imagingSI;
break;
}
if (HdFilteringSceneIndexBaseRefPtr const filteringSi = TfDynamic_cast<HdFilteringSceneIndexBaseRefPtr>(sceneIdx))
{
retVal = FindUsdImagingSceneIndex(filteringSi->GetInputScenes());
if (retVal)
{
break;
}
}
}
return retVal;
}
OmniWarpSceneIndexRefPtr
OmniWarpSceneIndex::New(
const HdSceneIndexBaseRefPtr &inputSceneIndex)
{
return TfCreateRefPtr(
new OmniWarpSceneIndex(
inputSceneIndex));
}
OmniWarpSceneIndex::OmniWarpSceneIndex(
const HdSceneIndexBaseRefPtr &inputSceneIndex)
: HdSingleInputFilteringSceneIndexBase(inputSceneIndex)
{
}
/// A convenience data source implementing the primvar schema from
/// a triple of primvar value, interpolation and role. The latter two
/// are given as tokens. The value can be given either as data source
/// or as thunk returning a data source which is evaluated on each
/// Get.
class _PrimvarDataSource final : public HdContainerDataSource
{
public:
HD_DECLARE_DATASOURCE(_PrimvarDataSource);
TfTokenVector GetNames() override {
return {HdPrimvarSchemaTokens->primvarValue,
HdPrimvarSchemaTokens->interpolation,
HdPrimvarSchemaTokens->role};
}
HdDataSourceBaseHandle Get(const TfToken &name) override {
if (name == HdPrimvarSchemaTokens->primvarValue) {
return _primvarValueSrc;
}
if (name == HdPrimvarSchemaTokens->interpolation) {
return
HdPrimvarSchema::BuildInterpolationDataSource(
_interpolation);
}
if (name == HdPrimvarSchemaTokens->role) {
return
HdPrimvarSchema::BuildRoleDataSource(
_role);
}
return nullptr;
}
private:
_PrimvarDataSource(
const HdDataSourceBaseHandle &primvarValueSrc,
const TfToken &interpolation,
const TfToken &role)
: _primvarValueSrc(primvarValueSrc)
, _interpolation(interpolation)
, _role(role)
{
}
HdDataSourceBaseHandle _primvarValueSrc;
TfToken _interpolation;
TfToken _role;
};
class _PointsDataSource : public HdVec3fArrayDataSource
{
public:
HD_DECLARE_DATASOURCE(_PointsDataSource);
VtValue GetValue(const Time shutterOffset) override {
return VtValue(GetTypedValue(shutterOffset));
}
VtVec3fArray GetTypedValue(const Time shutterOffset) override
{
HdPrimvarsSchema depPrimVarsSchema = HdPrimvarsSchema::GetFromParent(_depDs);
if (depPrimVarsSchema)
{
HdPrimvarSchema depPrimVar = depPrimVarsSchema.GetPrimvar(HdTokens->points);
if (depPrimVar)
{
HdSampledDataSourceHandle valueDataSource = depPrimVar.GetPrimvarValue();
auto pointsVt = valueDataSource->GetValue(0.f);
VtVec3fArray pointsArray = pointsVt.UncheckedGet<VtArray<GfVec3f>>();
return _pythonModule->ExecSim(GetSimulationParams(_simParamsDs), pointsArray);
}
}
return _pythonModule->ExecSim(GetSimulationParams(_simParamsDs));
}
bool GetContributingSampleTimesForInterval(
const Time startTime,
const Time endTime,
std::vector<Time> * const outSampleTimes) override
{
return false;
}
private:
_PointsDataSource(HdPrimvarsSchema &primVarSchema, OmniWarpPythonModuleSharedPtr pythonModule,
const HdContainerDataSourceHandle &simParamsDataSource,
const HdContainerDataSourceHandle &depDataSource)
: _schema(primVarSchema),
_pythonModule(pythonModule),
_simParamsDs(simParamsDataSource),
_depDs(depDataSource)
{
}
HdPrimvarsSchema& _schema;
OmniWarpPythonModuleSharedPtr _pythonModule;
HdContainerDataSourceHandle const _depDs;
HdContainerDataSourceHandle const _simParamsDs;
};
class _InstancePositionsDataSource : public HdVec3fArrayDataSource
{
public:
HD_DECLARE_DATASOURCE(_InstancePositionsDataSource);
VtValue GetValue(const Time shutterOffset) override {
return VtValue(GetTypedValue(shutterOffset));
}
VtVec3fArray GetTypedValue(const Time shutterOffset) override
{
HdPrimvarsSchema depPrimVarsSchema = HdPrimvarsSchema::GetFromParent(_depDs);
if (depPrimVarsSchema)
{
HdPrimvarSchema depPrimVar = depPrimVarsSchema.GetPrimvar(HdTokens->points);
if (depPrimVar)
{
HdSampledDataSourceHandle valueDataSource = depPrimVar.GetPrimvarValue();
auto pointsVt = valueDataSource->GetValue(0.f);
VtVec3fArray pointsArray = pointsVt.UncheckedGet<VtArray<GfVec3f>>();
return _pythonModule->ExecSim(GetSimulationParams(_simParamsDs), pointsArray);
}
}
return _pythonModule->ExecSim(GetSimulationParams(_simParamsDs));
}
bool GetContributingSampleTimesForInterval(
const Time startTime,
const Time endTime,
std::vector<Time> * const outSampleTimes) override
{
return false;
}
private:
_InstancePositionsDataSource(HdPrimvarsSchema &primVarSchema, OmniWarpPythonModuleSharedPtr pythonModule,
const HdContainerDataSourceHandle &depDataSource,
const HdContainerDataSourceHandle &simParamsDataSource)
: _schema(primVarSchema),
_pythonModule(pythonModule),
_depDs(depDataSource),
_simParamsDs(simParamsDataSource)
{
}
HdPrimvarsSchema& _schema;
HdContainerDataSourceHandle _depDs;
HdContainerDataSourceHandle _simParamsDs;
OmniWarpPythonModuleSharedPtr _pythonModule;
};
class _MeshPrimVarsOverrideDataSource : public HdContainerDataSource
{
public:
HD_DECLARE_DATASOURCE(_MeshPrimVarsOverrideDataSource);
TfTokenVector GetNames() override
{
if (!_inputDs) {
return {};
}
return _inputDs->GetNames();
}
HdDataSourceBaseHandle Get(const TfToken &name) override
{
if (name == HdTokens->points)
{
return _PrimvarDataSource::New(
_PointsDataSource::New(_schema, _pythonModule, _simParamsDs, _depDs),
HdPrimvarSchemaTokens->vertex,
HdPrimvarSchemaTokens->point);
}
HdDataSourceBaseHandle result = _inputDs->Get(name);
return result;
}
private:
_MeshPrimVarsOverrideDataSource(const HdContainerDataSourceHandle &primDataSource,
HdPrimvarsSchema &primVarSchema, OmniWarpPythonModuleSharedPtr pythonModule,
const HdContainerDataSourceHandle &simParamsDataSource,
const HdContainerDataSourceHandle &depDataSource)
: _schema(primVarSchema),
_pythonModule(pythonModule),
_inputDs(primDataSource),
_simParamsDs(simParamsDataSource),
_depDs(depDataSource)
{
}
HdPrimvarsSchema _schema;
OmniWarpPythonModuleSharedPtr _pythonModule;
HdContainerDataSourceHandle const _depDs;
HdContainerDataSourceHandle const _inputDs;
HdContainerDataSourceHandle const _simParamsDs;
};
class _InstancerPrimVarsOverrideDataSource : public HdContainerDataSource
{
public:
HD_DECLARE_DATASOURCE(_InstancerPrimVarsOverrideDataSource);
TfTokenVector GetNames() override
{
if (!_inputDs) {
return {};
}
return _inputDs->GetNames();
}
HdDataSourceBaseHandle Get(const TfToken &name) override
{
if (name == HdInstancerTokens->translate)
{
return _PrimvarDataSource::New(
_InstancePositionsDataSource::New(_schema, _pythonModule, _depDs, _simParamsDs),
HdPrimvarSchemaTokens->instance,
HdPrimvarRoleTokens->vector);
}
HdDataSourceBaseHandle result = _inputDs->Get(name);
return result;
}
private:
_InstancerPrimVarsOverrideDataSource(const HdContainerDataSourceHandle &primDataSource,
HdPrimvarsSchema &primVarSchema, OmniWarpPythonModuleSharedPtr pythonModule,
const HdContainerDataSourceHandle &depDataSource,
const HdContainerDataSourceHandle &simParamsDataSource)
: _schema(primVarSchema),
_pythonModule(pythonModule),
_inputDs(primDataSource),
_depDs(depDataSource),
_simParamsDs(simParamsDataSource)
{
}
HdPrimvarsSchema _schema;
HdContainerDataSourceHandle _depDs;
OmniWarpPythonModuleSharedPtr _pythonModule;
HdContainerDataSourceHandle const _inputDs;
HdContainerDataSourceHandle const _simParamsDs;
};
class _WarpMeshDataSource : public HdContainerDataSource
{
public:
HD_DECLARE_DATASOURCE(_WarpMeshDataSource);
TfTokenVector GetNames() override
{
if (!_inputDs) {
return {};
}
// We append our token for the WarpMesh python file token
// We do our init for indices here. Only on reload?
return _inputDs->GetNames();
}
HdDataSourceBaseHandle Get(const TfToken &name) override
{
auto result = _inputDs->Get(name);
if (name == HdPrimvarsSchemaTokens->primvars)
{
auto primVarSchema = HdPrimvarsSchema::GetFromParent(_inputDs);
if (auto primVarContainer = HdContainerDataSource::Cast(result))
{
return _MeshPrimVarsOverrideDataSource::New(primVarContainer, primVarSchema, _pythonModule, _inputDs, _depDs);
}
}
return result;
}
private:
_WarpMeshDataSource(const SdfPath& primPath,
const HdContainerDataSourceHandle &primDataSource,
OmniWarpPythonModuleSharedPtr pythonModule,
const HdContainerDataSourceHandle &depDataSource)
: _primPath(primPath),
_inputDs(primDataSource),
_pythonModule(pythonModule),
_depDs(depDataSource)
{
}
HdContainerDataSourceHandle _depDs;
HdContainerDataSourceHandle _inputDs;
OmniWarpPythonModuleSharedPtr _pythonModule;
const SdfPath& _primPath;
};
class _WarpInstancerDataSource : public HdContainerDataSource
{
public:
HD_DECLARE_DATASOURCE(_WarpInstancerDataSource);
TfTokenVector GetNames() override
{
if (!_inputDs) {
return {};
}
// We append our token for the WarpMesh python file token
// We do our init for indices here. Only on reload?
return _inputDs->GetNames();
}
HdDataSourceBaseHandle Get(const TfToken &name) override
{
auto result = _inputDs->Get(name);
if (name == HdPrimvarsSchemaTokens->primvars)
{
auto primVarSchema = HdPrimvarsSchema::GetFromParent(_inputDs);
if (auto primVarContainer = HdContainerDataSource::Cast(result))
{
return _InstancerPrimVarsOverrideDataSource::New(primVarContainer, primVarSchema, _pythonModule, _depDs, _simParamsDs);
}
}
return result;
}
private:
_WarpInstancerDataSource(const SdfPath& primPath,
const HdContainerDataSourceHandle &primDataSource,
OmniWarpPythonModuleSharedPtr pythonModule,
const HdContainerDataSourceHandle &depDataSource,
const HdContainerDataSourceHandle &simParamsDataSource)
: _primPath(primPath),
_inputDs(primDataSource),
_pythonModule(pythonModule),
_depDs(depDataSource),
_simParamsDs(simParamsDataSource)
{
}
HdContainerDataSourceHandle _inputDs;
HdContainerDataSourceHandle _depDs;
HdContainerDataSourceHandle _simParamsDs;
OmniWarpPythonModuleSharedPtr _pythonModule;
const SdfPath& _primPath;
};
HdSceneIndexPrim
OmniWarpSceneIndex::GetPrim(const SdfPath& primPath) const
{
HdSceneIndexPrim prim = _GetInputSceneIndex()->GetPrim(primPath);
if (prim.primType == HdPrimTypeTokens->mesh && prim.dataSource)
{
if (OmniWarpComputationSchema warpSchema = OmniWarpComputationSchema::GetFromParent(prim.dataSource))
{
HdContainerDataSourceHandle _depDs;
if (HdPathArrayDataSourceHandle dependentsDs = warpSchema.GetDependentPrims())
{
VtArray<SdfPath> dependentPrims = dependentsDs->GetTypedValue(0);
if (dependentPrims.size())
{
auto depPrim = _GetInputSceneIndex()->GetPrim(dependentPrims[0]);
if (depPrim.dataSource)
{
_depDs = depPrim.dataSource;
}
}
}
prim.dataSource = _WarpMeshDataSource::New(
primPath, prim.dataSource, GetWarpPythonModule(primPath), _depDs);
}
}
else if (prim.primType == HdPrimTypeTokens->instancer && prim.dataSource)
{
HdInstancerTopologySchema topologySchema = HdInstancerTopologySchema::GetFromParent(prim.dataSource);
if (HdPathArrayDataSourceHandle const ds = topologySchema.GetPrototypes())
{
auto protoTypes = ds->GetTypedValue(0.0f);
for (size_t i = 0; i < protoTypes.size(); ++i)
{
auto protoPrim = _GetInputSceneIndex()->GetPrim(protoTypes[i]);
OmniWarpComputationSchema warpSchema = OmniWarpComputationSchema::GetFromParent(protoPrim.dataSource);
if (warpSchema)
{
// Look for particles to be dependent on a mesh
HdContainerDataSourceHandle _depDs;
if (HdPathArrayDataSourceHandle dependentsDs = warpSchema.GetDependentPrims())
{
VtArray<SdfPath> dependentPrims = dependentsDs->GetTypedValue(0);
if (dependentPrims.size())
{
auto depPrim = _GetInputSceneIndex()->GetPrim(dependentPrims[0]);
if (depPrim.dataSource)
{
_depDs = depPrim.dataSource;
}
}
}
prim.dataSource = _WarpInstancerDataSource::New(
primPath, prim.dataSource, GetWarpPythonModule(primPath), _depDs, protoPrim.dataSource);
}
}
}
}
return prim;
}
SdfPathVector
OmniWarpSceneIndex::GetChildPrimPaths(const SdfPath &primPath) const
{
return _GetInputSceneIndex()->GetChildPrimPaths(primPath);
}
void OmniWarpSceneIndex::_PrimsAdded(
const HdSceneIndexBase &sender,
const HdSceneIndexObserver::AddedPrimEntries &entries)
{
if (!_IsObserved()) {
return;
}
for (const HdSceneIndexObserver::AddedPrimEntry& entry : entries)
{
if (entry.primType == HdPrimTypeTokens->mesh)
{
auto prim = _GetInputSceneIndex()->GetPrim(entry.primPath);
HdMeshSchema meshSchema = HdMeshSchema::GetFromParent(prim.dataSource);
HdPrimvarsSchema primVarsSchema = HdPrimvarsSchema::GetFromParent(prim.dataSource);
OmniWarpComputationSchema warpSchema = OmniWarpComputationSchema::GetFromParent(prim.dataSource);
if (meshSchema && warpSchema && primVarsSchema)
{
assert(GetWarpPythonModule(entry.primPath) == nullptr);
HdMeshTopologySchema meshTopologySchema = meshSchema.GetTopology();
UsdImagingStageSceneIndexRefPtr usdImagingSi;
if (auto filteringIdx = dynamic_cast<HdFilteringSceneIndexBase const*>(&sender))
{
// SceneIndexPlugins do not have access to the current stage/frame time.
// Only the UsdImagingStageSceneIndex has this. We store this for each Mesh,
// nullptr is a valid value. If valid, warp simulation can use the exact
// stage time. If null, the warp has to emulate frame time
usdImagingSi = FindUsdImagingSceneIndex(filteringIdx->GetInputScenes());
}
auto vtSimParams = GetSimulationParams(prim.dataSource);
HdPrimvarSchema origPoints = primVarsSchema.GetPrimvar(HdTokens->points);
CreateWarpPythonModule(entry.primPath, warpSchema, meshTopologySchema, origPoints, usdImagingSi, vtSimParams);
}
}
else if (entry.primType == HdPrimTypeTokens->instancer)
{
auto prim = _GetInputSceneIndex()->GetPrim(entry.primPath);
HdPrimvarsSchema primVarSchema = HdPrimvarsSchema::GetFromParent(prim.dataSource);
HdInstancerTopologySchema topologySchema = HdInstancerTopologySchema::GetFromParent(prim.dataSource);
HdPathArrayDataSourceHandle const ds = topologySchema.GetPrototypes();
if (primVarSchema && ds)
{
auto protoTypes = ds->GetTypedValue(0.0f);
for (size_t i = 0; i < protoTypes.size(); ++i)
{
auto protoPrim = _GetInputSceneIndex()->GetPrim(protoTypes[i]);
if (protoPrim.primType == TfToken())
{
continue;
}
OmniWarpComputationSchema warpSchema = OmniWarpComputationSchema::GetFromParent(protoPrim.dataSource);
if (warpSchema)
{
assert(GetWarpPythonModule(entry.primPath) == nullptr);
UsdImagingStageSceneIndexRefPtr usdImagingSi;
if (auto filteringIdx = dynamic_cast<HdFilteringSceneIndexBase const*>(&sender))
{
// SceneIndexPlugins do not have access to the current stage/frame time.
// Only the UsdImagingStageSceneIndex has this. We store this for each Mesh,
// nullptr is a valid value. If valid, warp simulation can use the exact
// stage time. If null, the warp has to emulate frame time
usdImagingSi = FindUsdImagingSceneIndex(filteringIdx->GetInputScenes());
}
auto vtSimParams = GetSimulationParams(protoPrim.dataSource);
HdPrimvarSchema positionsPos = primVarSchema.GetPrimvar(HdInstancerTokens->translate);
CreateWarpPythonModule(entry.primPath, warpSchema, positionsPos, usdImagingSi, vtSimParams);
break;
}
}
}
}
}
_SendPrimsAdded(entries);
return;
}
void
OmniWarpSceneIndex::_PrimsRemoved(
const HdSceneIndexBase &sender,
const HdSceneIndexObserver::RemovedPrimEntries &entries)
{
if (!_IsObserved()) {
return;
}
_WarpPythonModuleMap::iterator it = _pythonModuleMap.begin();
while (it != _pythonModuleMap.end())
{
bool bErased = false;
for (const HdSceneIndexObserver::RemovedPrimEntry& entry : entries)
{
if (it->first.HasPrefix(entry.primPath))
{
bErased = true;
it = _pythonModuleMap.erase(it);
break;
}
}
if (!bErased)
{
it++;
}
}
_SendPrimsRemoved(entries);
}
void
OmniWarpSceneIndex::_PrimsDirtied(
const HdSceneIndexBase &sender,
const HdSceneIndexObserver::DirtiedPrimEntries &entries)
{
if (!_IsObserved()) {
return;
}
// +++ Not sure this is the right locator for points data
static const HdDataSourceLocatorSet pointDeformLocators
{
HdPrimvarsSchema::GetDefaultLocator().Append(
HdPrimvarSchemaTokens->point),
OmniWarpComputationSchema::GetDefaultLocator().Append(
OmniWarpComputationSchema::GetSourceFileLocator())
};
// If mesh original points or python module path changes
// remove our _pythonModule for this prim and allow
// it to be re-created
//+++ Multithreaded access to _pythonModuleMap
_WarpPythonModuleMap::iterator it = _pythonModuleMap.begin();
while (it != _pythonModuleMap.end())
{
bool bErased = false;
for (const HdSceneIndexObserver::DirtiedPrimEntry &entry : entries)
{
if (it->first.HasPrefix(entry.primPath))
{
if (pointDeformLocators.Intersects(entry.dirtyLocators))
{
bErased = true;
it = _pythonModuleMap.erase(it);
break;
}
}
}
if (!bErased)
{
it++;
}
}
_SendPrimsDirtied(entries);
}
OmniWarpPythonModuleSharedPtr
OmniWarpSceneIndex::GetWarpPythonModule(const SdfPath &primPath) const
{
//+++ Multithreaded access to _pythonModuleMap
auto pythonModule = _pythonModuleMap.find(primPath);
if (pythonModule == _pythonModuleMap.end())
{
return OmniWarpPythonModuleSharedPtr(nullptr);
}
return pythonModule->second;
}
OmniWarpPythonModuleSharedPtr
OmniWarpSceneIndex::CreateWarpPythonModule(const SdfPath &primPath,
OmniWarpComputationSchema& warpSchema,
HdMeshTopologySchema& topologySchema,
HdPrimvarSchema& primVarSchema,
UsdImagingStageSceneIndexRefPtr usdImagingSi,
VtDictionary vtSimParams)
{
//+++ Multithreaded access to _pythonModuleMap
std::string moduleName = warpSchema.GetSourceFile()->GetTypedValue(0);
HdIntArrayDataSourceHandle faceIndicesDs = topologySchema.GetFaceVertexIndices();
VtIntArray indices = faceIndicesDs->GetTypedValue(0.f);
HdSampledDataSourceHandle valueDataSource = primVarSchema.GetPrimvarValue();
auto pointsVt = valueDataSource->GetValue(0.f);
VtVec3fArray pointsArray = pointsVt.UncheckedGet<VtArray<GfVec3f>>();
// Force terminate of old module
_pythonModuleMap[primPath] = nullptr;
OmniWarpPythonModuleSharedPtr pythonModule =
std::make_shared<OmniWarpPythonModule>(primPath, moduleName, usdImagingSi);
VtIntArray depIndices;
VtVec3fArray depPointsArray;
GetDependentMeshData(warpSchema, depIndices, depPointsArray);
pythonModule->InitMesh(indices, pointsArray, depIndices, depPointsArray, vtSimParams);
_pythonModuleMap[primPath] = pythonModule;
return _pythonModuleMap.find(primPath)->second;
}
OmniWarpPythonModuleSharedPtr
OmniWarpSceneIndex::CreateWarpPythonModule(const SdfPath &primPath,
OmniWarpComputationSchema& warpSchema,
HdPrimvarSchema& primVarSchema,
UsdImagingStageSceneIndexRefPtr usdImagingSi,
VtDictionary vtSimParams)
{
//+++ Multithreaded access to _pythonModuleMap
std::string moduleName = warpSchema.GetSourceFile()->GetTypedValue(0);
// Force terminate of old module
_pythonModuleMap[primPath] = nullptr;
HdSampledDataSourceHandle valueDataSource = primVarSchema.GetPrimvarValue();
auto positionsVt = valueDataSource->GetValue(0.f);
VtVec3fArray positionsArray = positionsVt.UncheckedGet<VtArray<GfVec3f>>();
OmniWarpPythonModuleSharedPtr pythonModule =
std::make_shared<OmniWarpPythonModule>(primPath, moduleName, usdImagingSi);
VtIntArray indices;
VtVec3fArray pointsArray;
GetDependentMeshData(warpSchema, indices, pointsArray);
pythonModule->InitParticles(positionsArray, indices, pointsArray, vtSimParams);
_pythonModuleMap[primPath] = pythonModule;
return _pythonModuleMap.find(primPath)->second;
}
void
OmniWarpSceneIndex::GetDependentMeshData(OmniWarpComputationSchema warpSchema, VtIntArray& outIndices, VtVec3fArray& outVertices)
{
VtArray<SdfPath> dependentPrims;
if (HdPathArrayDataSourceHandle dependentsDs = warpSchema.GetDependentPrims())
{
dependentPrims = dependentsDs->GetTypedValue(0);
}
if (!dependentPrims.size())
{
return;
}
//+++ Only support a single dependent prim
auto depPrim = _GetInputSceneIndex()->GetPrim(dependentPrims[0]);
if (depPrim.dataSource)
{
HdPrimvarsSchema depPrimVarsSchema = HdPrimvarsSchema::GetFromParent(depPrim.dataSource);
if (depPrimVarsSchema)
{
HdPrimvarSchema depPrimVar = depPrimVarsSchema.GetPrimvar(HdTokens->points);
if (depPrimVar)
{
HdSampledDataSourceHandle valueDataSource = depPrimVar.GetPrimvarValue();
auto pointsVt = valueDataSource->GetValue(0.f);
outVertices = pointsVt.UncheckedGet<VtArray<GfVec3f>>();
}
}
HdMeshSchema meshSchema = HdMeshSchema::GetFromParent(depPrim.dataSource);
if (meshSchema)
{
HdMeshTopologySchema topologySchema = meshSchema.GetTopology();
HdIntArrayDataSourceHandle faceIndicesDs = topologySchema.GetFaceVertexIndices();
outIndices = faceIndicesDs->GetTypedValue(0.f);
}
}
}
PXR_NAMESPACE_CLOSE_SCOPE | 27,828 | C++ | 34.496173 | 135 | 0.650496 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniWarpSceneIndex/__init__.py | from pxr import Tf
# PreparePythonModule didn't make it's way into USD
# until 21.08 - older versions import the module
# manually and call PrepareModule
if hasattr(Tf, "PreparePythonModule"):
Tf.PreparePythonModule()
else:
from . import _omniWarpSceneIndex
Tf.PrepareModule(_omniWarpSceneIndex, locals())
del Tf | 327 | Python | 24.230767 | 51 | 0.75841 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniWarpSceneIndex/warpComputationSchema.cpp | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include <pxr/base/trace/trace.h>
#include <pxr/imaging/hd/retainedDataSource.h>
#include "warpComputationSchema.h"
PXR_NAMESPACE_OPEN_SCOPE
TF_DEFINE_PUBLIC_TOKENS(OmniWarpComputationSchemaTokens,
OMNIWARPCOMPUTATION_SCHEMA_TOKENS);
HdStringDataSourceHandle
OmniWarpComputationSchema::GetSourceFile()
{
return _GetTypedDataSource<HdStringDataSource>(
OmniWarpComputationSchemaTokens->sourceFile);
}
HdPathArrayDataSourceHandle
OmniWarpComputationSchema::GetDependentPrims()
{
return _GetTypedDataSource<HdPathArrayDataSource>(
OmniWarpComputationSchemaTokens->dependentPrims);
}
HdSampledDataSourceHandle
OmniWarpComputationSchema::GetSimulationParams()
{
return _GetTypedDataSource<HdSampledDataSource>(
OmniWarpComputationSchemaTokens->simulationParams);
}
HdContainerDataSourceHandle
OmniWarpComputationSchema::BuildRetained(
const HdStringDataSourceHandle &sourceFile,
const HdPathArrayDataSourceHandle &dependentPrims,
const HdSampledDataSourceHandle &simulationParams
)
{
TfToken names[3];
HdDataSourceBaseHandle values[3];
size_t count = 0;
if (sourceFile) {
names[count] = OmniWarpComputationSchemaTokens->sourceFile;
values[count++] = sourceFile;
}
if (dependentPrims) {
names[count] = OmniWarpComputationSchemaTokens->dependentPrims;
values[count++] = dependentPrims;
}
if (simulationParams) {
names[count] = OmniWarpComputationSchemaTokens->simulationParams;
values[count++] = simulationParams;
}
return HdRetainedContainerDataSource::New(count, names, values);
}
/*static*/
OmniWarpComputationSchema
OmniWarpComputationSchema::GetFromParent(
const HdContainerDataSourceHandle &fromParentContainer)
{
return OmniWarpComputationSchema(
fromParentContainer
? HdContainerDataSource::Cast(fromParentContainer->Get(
OmniWarpComputationSchemaTokens->warpComputation))
: nullptr);
}
/*static*/
const TfToken &
OmniWarpComputationSchema::GetSchemaToken()
{
return OmniWarpComputationSchemaTokens->warpComputation;
}
/*static*/
const HdDataSourceLocator &
OmniWarpComputationSchema::GetDefaultLocator()
{
static const HdDataSourceLocator locator(
OmniWarpComputationSchemaTokens->warpComputation
);
return locator;
}
/*static*/
const HdDataSourceLocator &
OmniWarpComputationSchema::GetSourceFileLocator()
{
static const HdDataSourceLocator locator(
OmniWarpComputationSchemaTokens->warpComputation,
OmniWarpComputationSchemaTokens->sourceFile
);
return locator;
}
/*static*/
const HdDataSourceLocator &
OmniWarpComputationSchema::GetDependentPrimsLocator()
{
static const HdDataSourceLocator locator(
OmniWarpComputationSchemaTokens->warpComputation,
OmniWarpComputationSchemaTokens->dependentPrims
);
return locator;
}
/*static*/
const HdDataSourceLocator &
OmniWarpComputationSchema::GetSimulationParamsLocator()
{
static const HdDataSourceLocator locator(
OmniWarpComputationSchemaTokens->warpComputation,
OmniWarpComputationSchemaTokens->simulationParams
);
return locator;
}
OmniWarpComputationSchema::Builder &
OmniWarpComputationSchema::Builder::SetSourceFile(
const HdStringDataSourceHandle &sourceFile)
{
_sourceFile = sourceFile;
return *this;
}
OmniWarpComputationSchema::Builder &
OmniWarpComputationSchema::Builder::SetDependentPrims(
const HdPathArrayDataSourceHandle &depdendentPrims)
{
_dependentPrims = depdendentPrims;
return *this;
}
OmniWarpComputationSchema::Builder &
OmniWarpComputationSchema::Builder::SetSimulationParams(
const HdSampledDataSourceHandle &simulationParams)
{
_simulationParams = simulationParams;
return *this;
}
HdContainerDataSourceHandle
OmniWarpComputationSchema::Builder::Build()
{
return OmniWarpComputationSchema::BuildRetained(
_sourceFile,
_dependentPrims,
_simulationParams
);
}
PXR_NAMESPACE_CLOSE_SCOPE | 4,653 | C++ | 26.702381 | 75 | 0.762089 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniWarpSceneIndex/warpSceneIndex.h | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef OMNI_WARP_SCENE_INDEX_WARP_SCENE_INDEX_H
#define OMNI_WARP_SCENE_INDEX_WARP_SCENE_INDEX_H
#include <pxr/pxr.h>
#include <pxr/imaging/hd/filteringSceneIndex.h>
#include <pxr/usdImaging/usdImaging/stageSceneIndex.h>
#include "pxr/imaging/hd/primvarSchema.h"
#include "pxr/imaging/hd/meshSchema.h"
#include "api.h"
#include "warpPythonModule.h"
#include "warpComputationSchema.h"
PXR_NAMESPACE_OPEN_SCOPE
TF_DECLARE_REF_PTRS(OmniWarpSceneIndex);
class _PointsDataSource;
class _WarpMeshDataSource;
///
/// \class OmniWarpSceneIndex
///
///
///
///
///
class OmniWarpSceneIndex :
public HdSingleInputFilteringSceneIndexBase
{
public:
OMNIWARPSCENEINDEX_API
static OmniWarpSceneIndexRefPtr
New(const HdSceneIndexBaseRefPtr &inputSceneIndex);
OMNIWARPSCENEINDEX_API
HdSceneIndexPrim GetPrim(const SdfPath &primPath) const override;
OMNIWARPSCENEINDEX_API
SdfPathVector GetChildPrimPaths(const SdfPath &primPath) const override;
protected:
OmniWarpSceneIndex(
const HdSceneIndexBaseRefPtr &inputSceneIndex);
void _PrimsAdded(
const HdSceneIndexBase &sender,
const HdSceneIndexObserver::AddedPrimEntries &entries) override;
void _PrimsRemoved(
const HdSceneIndexBase &sender,
const HdSceneIndexObserver::RemovedPrimEntries &entries) override;
void _PrimsDirtied(
const HdSceneIndexBase &sender,
const HdSceneIndexObserver::DirtiedPrimEntries &entries) override;
private:
OmniWarpPythonModuleSharedPtr GetWarpPythonModule(const SdfPath &primPath) const;
OmniWarpPythonModuleSharedPtr CreateWarpPythonModule(const SdfPath &primPath,
OmniWarpComputationSchema& warpSchema,
HdMeshTopologySchema& topologySchema,
HdPrimvarSchema& primVarSchema,
UsdImagingStageSceneIndexRefPtr usdImagingSi,
VtDictionary vtSimParams);
OmniWarpPythonModuleSharedPtr CreateWarpPythonModule(const SdfPath &primPath,
OmniWarpComputationSchema& warpSchema,
HdPrimvarSchema& primVarSchema,
UsdImagingStageSceneIndexRefPtr usdImagingSi,
VtDictionary vtSimParams);
void GetDependentMeshData(OmniWarpComputationSchema warpSchema,
VtIntArray& outIndices,
VtVec3fArray& outVertices);
// Each prim with a WarpComputationAPI gets it's own Python Module instance
typedef std::unordered_map<SdfPath, OmniWarpPythonModuleSharedPtr, SdfPath::Hash> _WarpPythonModuleMap;
mutable _WarpPythonModuleMap _pythonModuleMap;
};
PXR_NAMESPACE_CLOSE_SCOPE
#endif // OMNI_WARP_SCENE_INDEX_WARP_SCENE_INDEX_H | 3,199 | C | 31.323232 | 107 | 0.760863 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniWarpSceneIndex/wrapTokens.cpp | //
// Copyright 2016 Pixar
//
// Licensed under the Apache License, Version 2.0 (the "Apache License")
// with the following modification; you may not use this file except in
// compliance with the Apache License and the following modification to it:
// Section 6. Trademarks. is deleted and replaced with:
//
// 6. Trademarks. This License does not grant permission to use the trade
// names, trademarks, service marks, or product names of the Licensor
// and its affiliates, except as required to comply with Section 4(c) of
// the License and to reproduce the content of the NOTICE file.
//
// You may obtain a copy of the Apache License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the Apache License with the above modification is
// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the Apache License for the specific
// language governing permissions and limitations under the Apache License.
//
// GENERATED FILE. DO NOT EDIT.
#include <boost/python/class.hpp>
#include ".//tokens.h"
PXR_NAMESPACE_USING_DIRECTIVE
namespace {
// Helper to return a static token as a string. We wrap tokens as Python
// strings and for some reason simply wrapping the token using def_readonly
// bypasses to-Python conversion, leading to the error that there's no
// Python type for the C++ TfToken type. So we wrap this functor instead.
class _WrapStaticToken {
public:
_WrapStaticToken(const TfToken* token) : _token(token) { }
std::string operator()() const
{
return _token->GetString();
}
private:
const TfToken* _token;
};
template <typename T>
void
_AddToken(T& cls, const char* name, const TfToken& token)
{
cls.add_static_property(name,
boost::python::make_function(
_WrapStaticToken(&token),
boost::python::return_value_policy<
boost::python::return_by_value>(),
boost::mpl::vector1<std::string>()));
}
} // anonymous
void wrapOmniWarpSceneIndexTokens()
{
boost::python::class_<OmniWarpSceneIndexTokensType, boost::noncopyable>
cls("Tokens", boost::python::no_init);
_AddToken(cls, "warpDependentPrims", OmniWarpSceneIndexTokens->warpDependentPrims);
_AddToken(cls, "warpSourceFile", OmniWarpSceneIndexTokens->warpSourceFile);
_AddToken(cls, "OmniWarpComputationAPI", OmniWarpSceneIndexTokens->OmniWarpComputationAPI);
}
| 2,626 | C++ | 35.999999 | 95 | 0.690023 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniWarpSceneIndex/warpComputationAPIAdapter.cpp | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include <pxr/base/tf/stringUtils.h>
#include <pxr/imaging/hd/retainedDataSource.h>
#include <pxr/usdImaging/usdImaging/dataSourceAttribute.h>
#include "warpComputationAPIAdapter.h"
#include "warpComputationAPI.h"
PXR_NAMESPACE_OPEN_SCOPE
TF_DEFINE_PRIVATE_TOKENS(
_tokens,
(warpComputation)
(sourceFile)
(dependentPrims)
(simulationParams)
);
TF_REGISTRY_FUNCTION(TfType)
{
typedef WarpComputationAPIAdapter Adapter;
TfType t = TfType::Define<Adapter, TfType::Bases<Adapter::BaseAdapter> >();
t.SetFactory< UsdImagingAPISchemaAdapterFactory<Adapter> >();
}
// ----------------------------------------------------------------------------
namespace
{
class SimulationParamsDataSource : public HdSampledDataSource
{
public:
HD_DECLARE_DATASOURCE(SimulationParamsDataSource);
SimulationParamsDataSource(
const VtDictionary &dict)
: _customData(dict)
{
}
VtValue
GetValue(Time shutterOffset)
{
return VtValue(_customData);
}
bool
GetContributingSampleTimesForInterval(
Time startTime,
Time endTime,
std::vector<Time> * outSampleTimes)
{
return false;
}
VtDictionary _customData;
};
class DependentPrimsDataSource : public HdPathArrayDataSource
{
public:
HD_DECLARE_DATASOURCE(DependentPrimsDataSource);
DependentPrimsDataSource(
const UsdRelationship &rel)
: _usdRel(rel)
{
}
VtValue
GetValue(
HdSampledDataSource::Time shutterOffset)
{
return VtValue(GetTypedValue(shutterOffset));
}
VtArray<SdfPath>
GetTypedValue(
HdSampledDataSource::Time shutterOffset)
{
SdfPathVector paths;
_usdRel.GetForwardedTargets(&paths);
VtArray<SdfPath> vtPaths(paths.begin(), paths.end());
return vtPaths;
}
bool
GetContributingSampleTimesForInterval(
HdSampledDataSource::Time startTime,
HdSampledDataSource::Time endTime,
std::vector<HdSampledDataSource::Time> *outSampleTimes)
{
return false;
}
private:
UsdRelationship _usdRel;
};
HD_DECLARE_DATASOURCE_HANDLES(DependentPrimsDataSource);
class _WarpComputationDataSource : public HdContainerDataSource
{
public:
HD_DECLARE_DATASOURCE(_WarpComputationDataSource);
_WarpComputationDataSource(
const UsdPrim &prim,
const UsdImagingDataSourceStageGlobals &stageGlobals)
: _api(prim)
, _stageGlobals(stageGlobals)
{
}
TfTokenVector GetNames() override
{
TfTokenVector result;
result.reserve(4);
result.push_back(_tokens->warpComputation);
if (UsdAttribute attr = _api.GetSourceFileAttr()) {
result.push_back(_tokens->sourceFile);
VtDictionary customData = attr.GetCustomData();
VtDictionary::iterator iter = customData.begin();
if (iter != customData.end())
{
result.push_back(_tokens->simulationParams);
}
}
if (_api.GetDependentPrimsRel()) {
result.push_back(_tokens->dependentPrims);
}
return result;
}
HdDataSourceBaseHandle Get(const TfToken &name) override {
if (name == _tokens->sourceFile)
{
if (UsdAttribute attr = _api.GetSourceFileAttr())
{
return UsdImagingDataSourceAttributeNew(attr, _stageGlobals);
}
}
else if (name == _tokens->dependentPrims)
{
if (UsdRelationship rel = _api.GetDependentPrimsRel())
{
return DependentPrimsDataSource::New(rel);
}
}
else if (name == _tokens->simulationParams)
{
if (UsdAttribute attr = _api.GetSourceFileAttr())
{
VtDictionary customData = attr.GetCustomData();
VtDictionary::iterator iter = customData.begin();
if (iter != customData.end())
{
return SimulationParamsDataSource::New(customData);
}
}
}
return nullptr;
}
private:
OmniWarpSceneIndexWarpComputationAPI _api;
const UsdImagingDataSourceStageGlobals &_stageGlobals;
};
HD_DECLARE_DATASOURCE_HANDLES(_WarpComputationDataSource);
} // anonymous namespace
// ----------------------------------------------------------------------------
HdContainerDataSourceHandle
WarpComputationAPIAdapter::GetImagingSubprimData(
UsdPrim const& prim,
TfToken const& subprim,
TfToken const& appliedInstanceName,
const UsdImagingDataSourceStageGlobals &stageGlobals)
{
OmniWarpSceneIndexWarpComputationAPI _api(prim);
std::string pythonModuleName;
UsdAttribute attr = _api.GetSourceFileAttr();
attr.Get(&pythonModuleName, 0.f);
if (pythonModuleName.length())
{
return HdRetainedContainerDataSource::New(
_tokens->warpComputation,
_WarpComputationDataSource::New(
prim, stageGlobals));
}
return nullptr;
}
#if PXR_VERSION < 2308
HdDataSourceLocatorSet
WarpComputationAPIAdapter::InvalidateImagingSubprim(
UsdPrim const& prim,
TfToken const& subprim,
TfToken const& appliedInstanceName,
TfTokenVector const& properties)
#else
HdDataSourceLocatorSet
WarpComputationAPIAdapter::InvalidateImagingSubprim(
UsdPrim const& prim,
TfToken const& subprim,
TfToken const& appliedInstanceName,
TfTokenVector const& properties,
const UsdImagingPropertyInvalidationType invalidationType)
#endif
{
#if 0
if (!subprim.IsEmpty() || appliedInstanceName.IsEmpty()) {
return HdDataSourceLocatorSet();
}
std::string prefix = TfStringPrintf(
"collections:%s:", appliedInstanceName.data());
for (const TfToken &propertyName : properties) {
if (TfStringStartsWith(propertyName.GetString(), prefix)) {
return HdDataSourceLocator(
_tokens->usdCollections, appliedInstanceName);
}
}
#endif
return HdDataSourceLocatorSet();
}
PXR_NAMESPACE_CLOSE_SCOPE
| 6,767 | C++ | 25.4375 | 79 | 0.645338 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniWarpSceneIndex/warpPythonModule.cpp | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include <pxr/base/tf/pyInvoke.h>
#include <pxr/base/tf/errorMark.h>
#include <pxr/base/tf/pyExceptionState.h>
#include <pxr/base/tf/pyInterpreter.h>
#include <pxr/imaging/hd/tokens.h>
#include "warpPythonModule.h"
#include "tokens.h"
PXR_NAMESPACE_OPEN_SCOPE
OmniWarpPythonModule::OmniWarpPythonModule(const SdfPath &primPath,
const std::string& moduleName, UsdImagingStageSceneIndexConstRefPtr usdImagingSi)
: _primPath(primPath),
_moduleName(moduleName),
_usdImagingSi(usdImagingSi)
{
}
OmniWarpPythonModule::~OmniWarpPythonModule()
{
TfPyLock pyLock;
boost::python::object result;
TfPyInvokeAndReturn(_moduleName.c_str(), "terminate_sim", &result, _primPath);
}
void OmniWarpPythonModule::InitMesh(VtIntArray indices, VtVec3fArray vertices,
VtIntArray depIndices, VtVec3fArray depVertices, VtDictionary simParams)
{
TfPyLock pyLock;
boost::python::object result;
TfPyInvokeAndReturn(_moduleName.c_str(), "initialize_sim_mesh", &result, _primPath, indices, vertices,
depIndices, depVertices, simParams);
}
void OmniWarpPythonModule::InitParticles(
VtVec3fArray positions, VtIntArray depIndices, VtVec3fArray depVertices, VtDictionary simParams)
{
TfPyLock pyLock;
boost::python::object result;
TfPyInvokeAndReturn(_moduleName.c_str(), "initialize_sim_particles", &result,
_primPath, positions, depIndices, depVertices, simParams);
}
VtVec3fArray OmniWarpPythonModule::ExecSim(VtDictionary simParams)
{
return ExecSim(simParams, VtVec3fArray());
}
VtVec3fArray OmniWarpPythonModule::ExecSim(VtDictionary simParams, VtVec3fArray dependentVertices)
{
TfPyLock pyLock;
boost::python::object result;
float dt = 0.f;
if (_usdImagingSi)
{
dt = _usdImagingSi->GetTime().GetValue();
}
if (TfPyInvokeAndReturn(_moduleName.c_str(), "exec_sim", &result, _primPath, dt, dependentVertices, simParams))
{
boost::python::extract<VtVec3fArray> theResults(result);
if (theResults.check())
{
return theResults();
}
}
return VtVec3fArray();
}
PXR_NAMESPACE_CLOSE_SCOPE | 2,735 | C++ | 30.090909 | 115 | 0.729068 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniWarpSceneIndex/warpModules/particles.py | # Copyright 2023 NVIDIA CORPORATION
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import warp as wp
import warp.sim
import warp.sim.render
import numpy as np
from pxr import Vt, Sdf
wp.init()
global_examples = {}
# need radius of spehere
class Example2:
def __init__(self):
self.frame_dt = 1.0 / 60
self.frame_count = 400
self.sim_substeps = 64
self.sim_dt = self.frame_dt / self.sim_substeps
self.sim_steps = self.frame_count * self.sim_substeps
self.sim_time = 0.0
self.radius = 0.1
self.builder = wp.sim.ModelBuilder()
self.builder.default_particle_radius = self.radius
def update(self):
self.model.particle_grid.build(self.state_0.particle_q, self.radius * 2.0)
for s in range(self.sim_substeps):
self.state_0.clear_forces()
self.integrator.simulate(self.model, self.state_0, self.state_1, self.sim_dt)
# swap states
(self.state_0, self.state_1) = (self.state_1, self.state_0)
def terminate_sim(primPath: Sdf.Path):
global global_examples
global_examples[primPath] = None
def initialize_sim_particles(primPath: Sdf.Path,
src_positions: Vt.Vec3fArray, dep_mesh_indices: Vt.IntArray = None, dep_mesh_points: Vt.Vec3fArray = None, sim_params: dict = None):
global global_examples
global_examples[primPath] = Example2()
for pt in src_positions:
global_examples[primPath].builder.add_particle(pt, (5.0, 0.0, 0.0), 0.1)
global_examples[primPath].model = global_examples[primPath].builder.finalize()
global_examples[primPath].model.particle_kf = 25.0
global_examples[primPath].model.soft_contact_kd = 100.0
global_examples[primPath].model.soft_contact_kf *= 2.0
global_examples[primPath].state_0 = global_examples[primPath].model.state()
global_examples[primPath].state_1 = global_examples[primPath].model.state()
global_examples[primPath].integrator = wp.sim.SemiImplicitIntegrator()
def exec_sim(primPath: Sdf.Path, sim_dt: float, dep_mesh_points: Vt.Vec3fArray = None, sim_params: dict = None):
# Not respecting sim_dt at all, using internal time
global global_examples
global_examples[primPath].update()
return Vt.Vec3fArray.FromNumpy(global_examples[primPath].state_0.particle_q.numpy())
| 2,841 | Python | 33.658536 | 136 | 0.697994 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniWarpSceneIndex/warpModules/cloth.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
###########################################################################
# Example Sim Cloth
#
# Shows a simulation of an FEM cloth model colliding against a static
# rigid body mesh using the wp.sim.ModelBuilder().
#
###########################################################################
import os
import math
import numpy as np
import warp as wp
import warp.sim
import warp.sim.render
from pxr import Usd, UsdGeom, Vt, Sdf
import sys
wp.init()
global_examples = {}
class Example:
def __init__(self, indices: Vt.IntArray, points: Vt.Vec3fArray):
self.sim_width = 64
self.sim_height = 64
self.frame_dt = 1.0 / 60
self.frame_count = 400
self.sim_substeps = 32
self.sim_dt = self.frame_dt / self.sim_substeps
self.sim_steps = self.frame_count * self.sim_substeps
self.sim_time = 0.0
builder = wp.sim.ModelBuilder()
# sim BCs
clothEdgeBendingStiffness = 0.01
clothEdgeDampingStiffness = 0.0
clothTriAreaStiffness = 1000000.0
clothTriDampingStiffness = 100.0
clothTriElasticStiffness = 1000000.0
colliderContactDistance = 1.0
colliderContactQueryRange = 100.0
contactDampingStiffness = 10000.0
contactElasticStiffness = 500000.0
contactFrictionCoeff = 0.75
contactFrictionStiffness = 10000.0
globalScale = 0.01
# cloth grid
builder.add_cloth_grid(
pos=(0.0, 50.0, -25.0),
rot=wp.quat_from_axis_angle((1.0, 0.0, 0.0), math.pi * 0.5),
vel=(0.0, 0.0, 0.0),
dim_x=self.sim_width,
dim_y=self.sim_height,
cell_x=1.0,
cell_y=1.0,
mass=0.1,
fix_left=True,
tri_ke=clothTriElasticStiffness * globalScale,
tri_ka=clothTriAreaStiffness * globalScale,
tri_kd=clothTriDampingStiffness * globalScale,
edge_ke=clothEdgeBendingStiffness * globalScale,
edge_kd=clothEdgeDampingStiffness * globalScale
)
# add collider (must have identity transform until we xforms piped through Hydra plugin)
mesh = wp.sim.Mesh(points, indices)
builder.add_shape_mesh(
body=-1,
mesh=mesh,
pos=(0.0, 0.0, 0.0),
rot=wp.quat_identity(),
scale=(1.0, 1.0, 1.0),
ke=1.0e2,
kd=1.0e2,
kf=1.0e1,
)
# set sim BCs
self.model = builder.finalize()
self.model.ground = True
self.model.allocate_soft_contacts(self.model.particle_count)
self.model.gravity = (0, -980, 0)
self.model.soft_contact_ke = contactElasticStiffness * globalScale
self.model.soft_contact_kf = contactFrictionStiffness * globalScale
self.model.soft_contact_mu = contactFrictionCoeff
self.model.soft_contact_kd = contactDampingStiffness * globalScale
self.model.soft_contact_margin = colliderContactDistance * colliderContactQueryRange
self.model.particle_radius = colliderContactDistance
self.integrator = wp.sim.SemiImplicitIntegrator()
self.state_0 = self.model.state()
self.state_1 = self.model.state()
def update(self, sim_time: float):
wp.sim.collide(self.model, self.state_0)
for s in range(self.sim_substeps):
self.state_0.clear_forces()
self.integrator.simulate(self.model, self.state_0, self.state_1, self.sim_dt)
(self.state_0, self.state_1) = (self.state_1, self.state_0)
def terminate_sim(primPath: Sdf.Path):
global global_examples
global_examples[primPath] = None
def initialize_sim_mesh(primPath: Sdf.Path, src_indices: Vt.IntArray, src_points: Vt.Vec3fArray,
dep_mesh_indices: Vt.IntArray = None, dep_mesh_points: Vt.Vec3fArray = None, sim_params: dict = None):
global global_examples
global_examples[primPath] = Example(dep_mesh_indices, dep_mesh_points)
def exec_sim(primPath: Sdf.Path, sim_dt: float, dep_mesh_points: Vt.Vec3fArray = None, sim_params: dict = None):
# Not respecting sim_dt at all, using internal time
global global_examples
global_examples[primPath].update(sim_dt)
return Vt.Vec3fArray.FromNumpy(global_examples[primPath].state_0.particle_q.numpy()) | 4,791 | Python | 33.978102 | 112 | 0.625339 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniWarpSceneIndex/warpModules/deform01.py | # Copyright 2023 NVIDIA CORPORATION
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warp as wp
import numpy as np
from pxr import Vt, Sdf
@wp.kernel
def deform(positions: wp.array(dtype=wp.vec3), t: float):
tid = wp.tid()
x = positions[tid]
offset = -wp.sin(x[0]) * 0.06
scale = wp.sin(t)
x = x + wp.vec3(0.0, offset * scale, 0.0)
positions[tid] = x
class Example:
def __init__(self, indices: Vt.IntArray, points: Vt.Vec3fArray):
self.mesh = wp.Mesh(
points=wp.array(points, dtype=wp.vec3),
indices=wp.array(indices, dtype=int),
)
def update(self, sim_time: float):
wp.launch(kernel=deform, dim=len(self.mesh.points), inputs=[self.mesh.points, sim_time])
# refit the mesh BVH to account for the deformation
self.mesh.refit()
wp.init()
global_examples = {}
def terminate_sim(primPath: Sdf.Path):
global global_examples
global_examples[primPath] = None
def initialize_sim_mesh(primPath: Sdf.Path, src_indices: Vt.IntArray, src_points: Vt.Vec3fArray,
dep_mesh_indices: Vt.IntArray = None, dep_mesh_points: Vt.Vec3fArray = None, sim_params: dict = None):
global global_examples
global_examples[primPath] = Example(src_indices, src_points)
def exec_sim(primPath: Sdf.Path, sim_dt: float, dep_mesh_points: Vt.Vec3fArray = None, sim_params: dict = None):
global global_examples
# Sim expects 60 samples per second (or hydra time of 1.0)
global_examples[primPath].update(sim_dt / 60.0)
return Vt.Vec3fArray.FromNumpy(global_examples[primPath].mesh.points.numpy())
def is_enabled():
return True
| 2,140 | Python | 31.439393 | 112 | 0.693458 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniWarpSceneIndex/warpModules/ocean.py | # Copyright 2023 NVIDIA CORPORATION
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warp as wp
import numpy as np
from pxr import Vt, Sdf
wp.init()
sim_params_global = {
'wave_amplitude': 1.5,
'wave_directionality': 0.0,
'wind_speed': 10.0,
'water_depth': 50.0,
'scale': 1.0,
'direction': 0.0,
}
#warp function definitions
# fractional part of a (w.r.t. floor(a))
@wp.func
def frac(a: float):
return a - wp.floor(a)
# square of a
@wp.func
def sqr(a: float):
return a * a
@wp.func
def alpha_beta_spectrum(omega: float,
peak_omega: float,
alpha: float,
beta: float,
gravity: float):
return ( (alpha * gravity * gravity / wp.pow(omega, 5.0)) * wp.exp(- beta * wp.pow(peak_omega/omega, 4.0)) )
@wp.func
def jonswap_peak_sharpening(omega: float,
peak_omega: float,
gamma: float):
sigma = float(0.07)
if omega > peak_omega:
sigma = float(0.09)
return wp.pow(gamma, wp.exp(- 0.5 * sqr( (omega - peak_omega) / (sigma * peak_omega)) ))
@wp.func
def jonswap_spectrum(omega: float,
gravity: float,
wind_speed: float,
fetch_km: float,
gamma: float):
#https://wikiwaves.org/Ocean-Wave_Spectra#JONSWAP_Spectrum
fetch = 1000.0 * fetch_km
alpha = 0.076 * wp.pow(wind_speed * wind_speed / (gravity * fetch), 0.22)
peak_omega = 22.0 * wp.pow(wp.abs(gravity * gravity / (wind_speed * fetch)), 1.0/3.0)
return (jonswap_peak_sharpening(omega, peak_omega, gamma) * alpha_beta_spectrum(omega, peak_omega, alpha, 1.25, gravity))
@wp.func
def TMA_spectrum(omega: float,
gravity: float,
wind_speed: float,
fetch_km: float,
gamma: float,
waterdepth: float):
#https://dl.acm.org/doi/10.1145/2791261.2791267
omegaH = omega * wp.sqrt(waterdepth/gravity)
omegaH = wp.max(0.0, wp.min(2.2, omegaH))
phi = 0.5 * omegaH * omegaH
if omegaH > 1.0:
phi = 1.0 - 0.5 * sqr(2.0 - omegaH)
return phi * jonswap_spectrum(omega, gravity, wind_speed, fetch_km, gamma);
#warp kernel definitions
@wp.kernel
def update_profile(profile: wp.array(dtype=wp.vec3),
profile_res: int,
profile_data_num: int,
lambdaMin: float,
lambdaMax: float,
profile_extend: float,
time: float,
windspeed: float,
waterdepth: float
):
x = wp.tid()
randself = wp.rand_init(7)
# sampling parameters
omega0 = wp.sqrt(2.0 * 3.14159 * 9.80665 / lambdaMin)
omega1 = wp.sqrt(2.0 * 3.14159 * 9.80665 / lambdaMax)
omega_delta = wp.abs(omega1 - omega0) / float(profile_data_num)
# we blend three displacements for seamless spatial profile tiling
space_pos_1 = profile_extend * float(x) / float(profile_res)
space_pos_2 = space_pos_1 + profile_extend
space_pos_3 = space_pos_1 - profile_extend
p1 = wp.vec2(0.0,0.0)
p2 = wp.vec2(0.0,0.0)
p3 = wp.vec2(0.0,0.0)
for i in range(0, profile_data_num):
omega = wp.abs(omega0 + (omega1 - omega0) * float(i) / float(profile_data_num)) # linear sampling of omega
k = omega * omega / 9.80665
phase = -time * omega + wp.randf(randself) * 2.0 * 3.14159
amplitude = float(10000.0) * wp.sqrt(wp.abs(2.0 * omega_delta * TMA_spectrum(omega, 9.80665, windspeed, 100.0, 3.3, waterdepth)))
p1 = wp.vec2( p1[0] + amplitude * wp.sin(phase + space_pos_1 * k), p1[1] - amplitude * wp.cos(phase + space_pos_1 * k) )
p2 = wp.vec2( p2[0] + amplitude * wp.sin(phase + space_pos_2 * k), p2[1] - amplitude * wp.cos(phase + space_pos_2 * k) )
p3 = wp.vec2( p3[0] + amplitude * wp.sin(phase + space_pos_3 * k), p3[1] - amplitude * wp.cos(phase + space_pos_3 * k) )
# cubic blending coefficients
s = float(float(x) / float(profile_res))
c1 = float(2.0 * s * s * s - 3.0 * s * s + 1.0)
c2 = float(-2.0 * s * s * s + 3.0 * s * s)
disp_out = wp.vec3( (p1[0] + c1 * p2[0] + c2 * p3[0]) / float(profile_data_num), (p1[1] + c1 * p2[1] + c2 * p3[1]) / float(profile_data_num), 0. )
wp.store(profile, x, disp_out)
@wp.kernel
def update_points(out_points: wp.array(dtype=wp.vec3),
in_points: wp.array(dtype=wp.vec3),
profile: wp.array(dtype=wp.vec3),
profile_res: int,
profile_extent: float,
amplitude: float,
directionality: float,
direction: float,
antiAlias: int,
camPosX: float,
camPosY: float,
camPosZ: float):
tid = wp.tid()
p_crd = in_points[tid]
p_crd = wp.vec3(p_crd[0], p_crd[2], p_crd[1])
randself = wp.rand_init(7)
disp_x = float(0.)
disp_y = float(0.)
disp_z = float(0.)
w_sum = float(0.)
direction_count = (int)(128)
for d in range(0, direction_count):
r = float(d) * 2. * 3.14159265359 / float(direction_count) + 0.02
dir_x = wp.cos(r)
dir_y = wp.sin(r)
# directional amplitude
t = wp.abs( direction - r )
if (t > 3.14159265359):
t = 2.0 * 3.14159265359 - t
t = pow(t, 1.2)
dirAmp = (2.0 * t * t * t - 3.0 * t * t + 1.0) * 1.0 + (- 2.0 * t * t * t + 3.0 * t * t) * (1.0 - directionality)
dirAmp = dirAmp / (1.0 + 10.0 * directionality)
rand_phase = wp.randf(randself)
x_crd = (p_crd[0] * dir_x + p_crd[2] * dir_y) / profile_extent + rand_phase
pos_0 = int(wp.floor(x_crd * float(profile_res))) % profile_res
if x_crd < 0.:
pos_0 = pos_0 + profile_res - 1
pos_1 = int(pos_0 + 1) % profile_res
p_disp_0 = profile[pos_0]
p_disp_1 = profile[pos_1]
w = frac( x_crd * float(profile_res) )
prof_height_x = dirAmp * float((1. - w) * p_disp_0[0] + w * p_disp_1[0])
prof_height_y = dirAmp * float((1. - w) * p_disp_0[1] + w * p_disp_1[1])
disp_x = disp_x + dir_x * prof_height_x
disp_y = disp_y + prof_height_y
disp_z = disp_z + dir_y * prof_height_x
w_sum = w_sum + 1.
# simple anti-aliasing: reduce amplitude with increasing distance to viewpoint
if (antiAlias > 0):
v1 = wp.normalize( wp.vec3( p_crd[0] - camPosX, max( 100.0, wp.abs(p_crd[1] - camPosY)), p_crd[2] - camPosZ) )
amplitude *= wp.sqrt( wp.abs(v1[1]) )
# write output vertex position
outP = wp.vec3(p_crd[0] + amplitude * disp_x / w_sum, p_crd[1] + amplitude * disp_y / w_sum, p_crd[2] + amplitude * disp_z / w_sum)
wp.store(out_points, tid, wp.vec3(outP[0], outP[2], outP[1]))
class Example:
def __init__(self, indices: Vt.IntArray, points: Vt.Vec3fArray):
# profile buffer intializations
print('[Ocean deformer] Initializing profile buffer.')
self.profile_extent = 410.0 #physical size of profile, should be around half the resolution
self.profile_res = int(8192)
self.profile_wavenum = int(1000)
self.profile_CUDA = wp.zeros(self.profile_res, dtype=wp.vec3, device="cuda:0")
self.points_in = wp.array(points, dtype=wp.vec3, device="cuda:0")
self.points_out = wp.array(points, dtype=wp.vec3, device="cuda:0")
print(self.points_in)
print(self.points_out)
def update(self, sim_time: float):
global sim_params_global
# params
wave_amplitude = sim_params_global["wave_amplitude"]
wave_directionality = sim_params_global["wave_directionality"]
wind_speed = sim_params_global["wind_speed"]
water_depth = sim_params_global["water_depth"]
scale = sim_params_global["scale"]
direction = sim_params_global["direction"]
# Parameters
time = float(sim_time)
amplitude = max(0.0001, min(1000.0, float(wave_amplitude)))
minWavelength = 0.1
maxWavelength = 250.0
direction = float(direction) % 6.28318530718
directionality = max(0.0, min(1.0, 0.02 * float(wave_directionality)))
windspeed = max(0.0, min(30.0, float(wind_speed)))
waterdepth = max(1.0, min(1000.0, float(water_depth)))
scale = min(10000.0, max(0.001, float(scale)))
antiAlias = int(0)
campos = [0.0, 0.0, 0.0]
# create 1D profile buffer for this timestep using wave paramters stored in internal self CUDA memory
wp.launch(
kernel=update_profile,
dim=self.profile_res,
inputs=[self.profile_CUDA, int(self.profile_res), int(self.profile_wavenum), float(minWavelength), float(maxWavelength), float(self.profile_extent), float(time), float(windspeed), float(waterdepth)],
outputs=[],
device="cuda:0")
# update point positions using the profile buffer created above
wp.launch(
kernel=update_points,
dim=len(self.points_out),
inputs=[self.points_out, self.points_in, self.profile_CUDA, int(self.profile_res), float(self.profile_extent*scale), float(amplitude), float(directionality), float(direction), int(antiAlias), float(campos[0]), float(campos[1]), float(campos[2]) ],
outputs=[],
device="cuda:0")
global_examples = {}
def terminate_sim(primPath: Sdf.Path):
global global_examples
global_examples[primPath] = None
def initialize_sim_mesh(primPath: Sdf.Path, src_indices: Vt.IntArray, src_points: Vt.Vec3fArray,
dep_mesh_indices: Vt.IntArray = None, dep_mesh_points: Vt.Vec3fArray = None, sim_params: dict = None):
global global_examples
global sim_params_global
if sim_params:
sim_params_global = sim_params
global_examples[primPath] = Example(src_indices, src_points)
def exec_sim(primPath: Sdf.Path, sim_dt: float, dep_mesh_points: Vt.Vec3fArray = None, sim_params: dict = None):
global global_examples
global sim_params_global
if sim_params:
sim_params_global = sim_params
# Sim expects 60 samples per second (or hydra time of 1.0)
global_examples[primPath].update(sim_dt / 60.0)
return Vt.Vec3fArray.FromNumpy(global_examples[primPath].points_out.numpy())
| 11,029 | Python | 37.838028 | 260 | 0.580288 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniWarpSceneIndex/oceanSim/preferences.py | #
# Copyright 2016 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
from pxr.Usdviewq.qt import QtCore, QtGui, QtWidgets
from .preferencesUI import Ui_Preferences
class Preferences(QtWidgets.QDialog):
def __init__(self, parent, attr):
super(Preferences, self).__init__(parent)
self._ui = Ui_Preferences()
self._ui.setupUi(self)
self._attr = attr
metadata = self._attr.GetMetadata("customData")
self._ui.scaleSpinBox.setValue(metadata["scale"])
self._ui.directionSpinBox.setValue(metadata["direction"])
self._ui.windSpeedSpinBox.setValue(metadata["wind_speed"])
self._ui.waterDepthSpinBox.setValue(metadata["water_depth"])
self._ui.waveAmplitudeSpinBox.setValue(metadata["wave_amplitude"])
self._ui.waveDirectionalitySpinBox.setValue(metadata["wave_directionality"])
self._ui.buttonBox.clicked.connect(self._buttonBoxButtonClicked)
def _apply(self):
self._attr.SetMetadataByDictKey('customData', 'scale', self._ui.scaleSpinBox.value())
self._attr.SetMetadataByDictKey('customData', 'direction', self._ui.directionSpinBox.value())
self._attr.SetMetadataByDictKey('customData', 'wind_speed', self._ui.windSpeedSpinBox.value())
self._attr.SetMetadataByDictKey('customData', 'water_depth', self._ui.waterDepthSpinBox.value())
self._attr.SetMetadataByDictKey('customData', 'wave_amplitude', self._ui.waveAmplitudeSpinBox.value())
self._attr.SetMetadataByDictKey('customData', 'wave_directionality', self._ui.waveDirectionalitySpinBox.value())
def _buttonBoxButtonClicked(self, button):
role = self._ui.buttonBox.buttonRole(button)
Roles = QtWidgets.QDialogButtonBox.ButtonRole
if role == Roles.AcceptRole or role == Roles.ApplyRole:
self._apply()
if role == Roles.AcceptRole or role == Roles.RejectRole:
self.close()
| 2,923 | Python | 46.16129 | 120 | 0.718782 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniWarpSceneIndex/oceanSim/__init__.py | from pxr import Tf
from pxr.Usdviewq.plugin import PluginContainer
from .preferences import Preferences
def launchPreferences(usdviewApi):
prim = usdviewApi.stage.GetPrimAtPath("/World/grid/Grid")
attr = prim.GetAttribute("warp:sourceFile")
_preferencesDlg = Preferences(usdviewApi.qMainWindow, attr)
_preferencesDlg.show()
_preferencesDlg = None
class OceanSimPluginContainer(PluginContainer):
def registerPlugins(self, plugRegistry, usdviewApi):
self._launchPreferences = plugRegistry.registerCommandPlugin(
"OceanSimPluginContainer.launchPreferences",
"Launch Preferences",
launchPreferences)
def configureView(self, plugRegistry, plugUIBuilder):
tutMenu = plugUIBuilder.findOrCreateMenu("OceanSim")
tutMenu.addItem(self._launchPreferences)
Tf.Type.Define(OceanSimPluginContainer) | 878 | Python | 32.807691 | 69 | 0.749431 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniWarpSceneIndex/oceanSim/preferencesUI_pyside6.py | # -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'preferencesUI.ui'
##
## Created by: Qt User Interface Compiler version 6.5.1
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide6.QtCore import (QCoreApplication, QDate, QDateTime, QLocale,
QMetaObject, QObject, QPoint, QRect,
QSize, QTime, QUrl, Qt)
from PySide6.QtGui import (QBrush, QColor, QConicalGradient, QCursor,
QFont, QFontDatabase, QGradient, QIcon,
QImage, QKeySequence, QLinearGradient, QPainter,
QPalette, QPixmap, QRadialGradient, QTransform)
from PySide6.QtWidgets import (QAbstractButton, QApplication, QDialog, QDialogButtonBox,
QDoubleSpinBox, QFrame, QHBoxLayout, QLabel,
QSizePolicy, QSpacerItem, QVBoxLayout, QWidget)
class Ui_Preferences(object):
def setupUi(self, Ocean_Simulation_Settings):
if not Ocean_Simulation_Settings.objectName():
Ocean_Simulation_Settings.setObjectName(u"Ocean_Simulation_Settings")
Ocean_Simulation_Settings.resize(295, 99)
self.verticalLayout = QVBoxLayout()
self.verticalLayout.setObjectName(u"verticalLayout")
self.prefsOverButtonsLayout = QVBoxLayout()
self.prefsOverButtonsLayout.setObjectName(u"prefsOverButtonsLayout")
self.horizontalLayout_3 = QHBoxLayout()
self.horizontalLayout_3.setObjectName(u"horizontalLayout_3")
self.scaleLabel = QLabel()
self.scaleLabel.setObjectName(u"scaleLabel")
self.horizontalLayout_3.addWidget(self.scaleLabel)
self.horizontalSpacer_2a = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(self.horizontalSpacer_2a)
self.scaleSpinBox = QDoubleSpinBox()
self.scaleSpinBox.setObjectName(u"scaleSpinBox")
self.scaleSpinBox.setDecimals(2)
self.scaleSpinBox.setMinimum(0.000000000000000)
self.scaleSpinBox.setValue(1.000000000000000)
self.horizontalLayout_3.addWidget(self.scaleSpinBox)
self.prefsOverButtonsLayout.addLayout(self.horizontalLayout_3)
self.horizontalLayout_4 = QHBoxLayout()
self.horizontalLayout_4.setObjectName(u"horizontalLayout_4")
self.directionLabel = QLabel()
self.directionLabel.setObjectName(u"directionLabel")
self.horizontalLayout_4.addWidget(self.directionLabel)
self.horizontalSpacer_2b = QSpacerItem(26, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(self.horizontalSpacer_2b)
self.directionSpinBox = QDoubleSpinBox()
self.directionSpinBox.setObjectName(u"directionSpinBox")
self.directionSpinBox.setDecimals(2)
self.directionSpinBox.setMinimum(0.000000000000000)
self.directionSpinBox.setValue(0.000000000000000)
self.horizontalLayout_4.addWidget(self.directionSpinBox)
self.prefsOverButtonsLayout.addLayout(self.horizontalLayout_4)
self.horizontalLayout_5 = QHBoxLayout()
self.horizontalLayout_5.setObjectName(u"horizontalLayout_5")
self.windSpeedLabel = QLabel()
self.windSpeedLabel.setObjectName(u"windSpeedLabel")
self.horizontalLayout_5.addWidget(self.windSpeedLabel)
self.horizontalSpacer_2c = QSpacerItem(24, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.horizontalLayout_5.addItem(self.horizontalSpacer_2c)
self.windSpeedSpinBox = QDoubleSpinBox()
self.windSpeedSpinBox.setObjectName(u"windSpeedSpinBox")
self.windSpeedSpinBox.setDecimals(2)
self.windSpeedSpinBox.setMinimum(0.000000000000000)
self.windSpeedSpinBox.setValue(10.000000000000000)
self.horizontalLayout_5.addWidget(self.windSpeedSpinBox)
self.prefsOverButtonsLayout.addLayout(self.horizontalLayout_5)
self.horizontalLayout_6 = QHBoxLayout()
self.horizontalLayout_6.setObjectName(u"horizontalLayout_6")
self.waterDepthLabel = QLabel()
self.waterDepthLabel.setObjectName(u"waterDepthLabel")
self.horizontalLayout_6.addWidget(self.waterDepthLabel)
self.horizontalSpacer_2d = QSpacerItem(24, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.horizontalLayout_6.addItem(self.horizontalSpacer_2d)
self.waterDepthSpinBox = QDoubleSpinBox()
self.waterDepthSpinBox.setObjectName(u"waterDepthSpinBox")
self.waterDepthSpinBox.setDecimals(2)
self.waterDepthSpinBox.setMinimum(0.000000000000000)
self.waterDepthSpinBox.setValue(50.000000000000000)
self.horizontalLayout_6.addWidget(self.waterDepthSpinBox)
self.prefsOverButtonsLayout.addLayout(self.horizontalLayout_6)
self.horizontalLayout_7 = QHBoxLayout()
self.horizontalLayout_7.setObjectName(u"horizontalLayout_7")
self.waveAmplitudeLabel = QLabel()
self.waveAmplitudeLabel.setObjectName(u"waveAmplitudeLabel")
self.horizontalLayout_7.addWidget(self.waveAmplitudeLabel)
self.horizontalSpacer_2e = QSpacerItem(21, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.horizontalLayout_7.addItem(self.horizontalSpacer_2e)
self.waveAmplitudeSpinBox = QDoubleSpinBox()
self.waveAmplitudeSpinBox.setObjectName(u"waveAmplitudeSpinBox")
self.waveAmplitudeSpinBox.setDecimals(2)
self.waveAmplitudeSpinBox.setMinimum(0.000000000000000)
self.waveAmplitudeSpinBox.setValue(1.500000000000000)
self.horizontalLayout_7.addWidget(self.waveAmplitudeSpinBox)
self.prefsOverButtonsLayout.addLayout(self.horizontalLayout_7)
self.horizontalLayout_8 = QHBoxLayout()
self.horizontalLayout_8.setObjectName(u"horizontalLayout_8")
self.waveDirectionalityLabel = QLabel()
self.waveDirectionalityLabel.setObjectName(u"waveDirectionalityLabel")
self.horizontalLayout_8.addWidget(self.waveDirectionalityLabel)
self.horizontalSpacer_2f = QSpacerItem(17, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.horizontalLayout_8.addItem(self.horizontalSpacer_2f)
self.waveDirectionalitySpinBox = QDoubleSpinBox()
self.waveDirectionalitySpinBox.setObjectName(u"waveDirectionalitySpinBox")
self.waveDirectionalitySpinBox.setMinimum(0.000000000000000)
self.waveDirectionalitySpinBox.setValue(0.000000000000000)
self.horizontalLayout_8.addWidget(self.waveDirectionalitySpinBox)
self.prefsOverButtonsLayout.addLayout(self.horizontalLayout_8)
self.verticalSpacer = QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding)
self.prefsOverButtonsLayout.addItem(self.verticalSpacer)
self.line = QFrame()
self.line.setObjectName(u"line")
self.line.setFrameShape(QFrame.HLine)
self.line.setFrameShadow(QFrame.Sunken)
self.prefsOverButtonsLayout.addWidget(self.line)
self.horizontalLayout_2 = QHBoxLayout()
self.horizontalLayout_2.setObjectName(u"horizontalLayout_2")
self.horizontalSpacer = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(self.horizontalSpacer)
self.buttonBox = QDialogButtonBox()
self.buttonBox.setObjectName(u"buttonBox")
self.buttonBox.setStandardButtons(QDialogButtonBox.Apply|QDialogButtonBox.Cancel|QDialogButtonBox.Ok)
self.horizontalLayout_2.addWidget(self.buttonBox)
self.prefsOverButtonsLayout.addLayout(self.horizontalLayout_2)
self.verticalLayout.addLayout(self.prefsOverButtonsLayout)
self.retranslateUi(Ocean_Simulation_Settings)
QMetaObject.connectSlotsByName(Ocean_Simulation_Settings)
# setupUi
def retranslateUi(self, Ocean_Simulation_Settings):
Ocean_Simulation_Settings.setWindowTitle(QCoreApplication.translate("Preferences", u"Ocean Simulation Settings", None))
Ocean_Simulation_Settings.setProperty("comment", QCoreApplication.translate("Preferences", u"\n"
" Copyright 2020 Pixar \n"
" \n"
" Licensed under the Apache License, Version 2.0 (the \"Apache License\") \n"
" with the following modification; you may not use this file except in \n"
" compliance with the Apache License and the following modification to it: \n"
" Section 6. Trademarks. is deleted and replaced with: \n"
" \n"
" 6. Trademarks. This License does not grant permission to use the trade \n"
" names, trademarks, service marks, or product names of the Licensor \n"
" and its affiliates, except as required to comply with Section 4(c) of \n"
" the License and to reproduce the content of the NOTI"
"CE file. \n"
" \n"
" You may obtain a copy of the Apache License at \n"
" \n"
" http://www.apache.org/licenses/LICENSE-2.0 \n"
" \n"
" Unless required by applicable law or agreed to in writing, software \n"
" distributed under the Apache License with the above modification is \n"
" distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY \n"
" KIND, either express or implied. See the Apache License for the specific \n"
" language governing permissions and limitations under the Apache License. \n"
" ", None))
self.scaleLabel.setText(QCoreApplication.translate("Preferences", u"Scale", None))
self.directionLabel.setText(QCoreApplication.translate("Preferences", u"Direction", None))
self.windSpeedLabel.setText(QCoreApplication.translate("Preferences", u"Wind Speed", None))
self.waterDepthLabel.setText(QCoreApplication.translate("Preferences", u"Water Depth", None))
self.waveAmplitudeLabel.setText(QCoreApplication.translate("Preferences", u"Wave Amplitude", None))
self.waveDirectionalityLabel.setText(QCoreApplication.translate("Preferences", u"Wave Directionality", None))
# retranslateUi
| 10,887 | Python | 46.134199 | 127 | 0.669055 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/computedPrimDataSource.cpp | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <pxr/base/gf/transform.h>
#include <pxr/usd/usdGeom/tokens.h>
#include <pxr/imaging/hd/xformSchema.h>
#include "computedPrimDataSource.h"
#include "localPositionSchema.h"
#include "referencePositionSchema.h"
PXR_NAMESPACE_OPEN_SCOPE
HdOmniGeospatialComputedPrimDataSource::HdOmniGeospatialComputedPrimDataSource(
HdContainerDataSourceHandle inputDataSource) :
_inputDataSource(inputDataSource)
{
_matrixDataSource =
HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::New(_inputDataSource);
}
#if PXR_VERSION < 2302
bool HdOmniGeospatialComputedPrimDataSource::Has(const TfToken& name)
{
return (name == HdXformSchemaTokens->resetXformStack) ||
(name == HdXformSchemaTokens->matrix);
}
#endif
TfTokenVector HdOmniGeospatialComputedPrimDataSource::GetNames()
{
// this container data source retrieves the xform tokens
TfTokenVector result;
result.push_back(HdXformSchemaTokens->resetXformStack);
result.push_back(HdXformSchemaTokens->matrix);
return result;
}
HdDataSourceBaseHandle HdOmniGeospatialComputedPrimDataSource::Get(const TfToken& name)
{
if (_inputDataSource != nullptr)
{
if (name == HdXformSchemaTokens->resetXformStack)
{
// we don't modify the underlying time-sampled data
// for resetXformStack, so return that directly
HdXformSchema xformSchema = HdXformSchema::GetFromParent(_inputDataSource);
return xformSchema.IsDefined() ? xformSchema.GetResetXformStack() : nullptr;
}
else if (name == HdXformSchemaTokens->matrix)
{
// note even if resetXformStack was true we consider
// the geospatial data to override that
return _matrixDataSource;
}
}
return nullptr;
}
HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_GeospatialMatrixDataSource(
HdContainerDataSourceHandle inputDataSource) : _inputDataSource(inputDataSource)
{
}
VtValue HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::GetValue(Time shutterOffset)
{
return VtValue(this->GetTypedValue(shutterOffset));
}
GfMatrix4d HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::GetTypedValue(Time shutterOffset)
{
return this->_ComputeTransformedMatrix(shutterOffset);
}
bool HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::GetContributingSampleTimesForInterval(
Time startTime,
Time endTime,
std::vector<Time>* outSampleTimes)
{
HdSampledDataSourceHandle sources[] = {
this->_GetMatrixSource(),
this->_GetLocalPositionSource()
};
return HdGetMergedContributingSampleTimesForInterval(
TfArraySize(sources),
sources,
startTime,
endTime,
outSampleTimes);
}
HdMatrixDataSourceHandle HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_GetMatrixSource() const
{
return HdXformSchema::GetFromParent(_inputDataSource).GetMatrix();
}
HdVec3dDataSourceHandle HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_GetLocalPositionSource() const
{
return HdOmniGeospatialWGS84LocalPositionSchema::GetFromParent(_inputDataSource).GetPosition();
}
HdTokenDataSourceHandle HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_GetTangentPlaneSource() const
{
return HdOmniGeospatialWGS84ReferencePositionSchema::GetFromParent(_inputDataSource).GetTangentPlane();
}
HdVec3dDataSourceHandle HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_GetReferencePositionSource() const
{
return HdOmniGeospatialWGS84ReferencePositionSchema::GetFromParent(_inputDataSource).GetReferencePosition();
}
HdVec3dDataSourceHandle HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_GetOrientationSource() const
{
return HdOmniGeospatialWGS84ReferencePositionSchema::GetFromParent(_inputDataSource).GetOrientation();
}
HdTokenDataSourceHandle HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_GetStageUpAxisSource() const
{
return HdOmniGeospatialWGS84ReferencePositionSchema::GetFromParent(_inputDataSource).GetStageUpAxis();
}
HdDoubleDataSourceHandle HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_GetStageMetersPerUnitSource() const
{
return HdOmniGeospatialWGS84ReferencePositionSchema::GetFromParent(_inputDataSource).GetStageMetersPerUnit();
}
GfMatrix4d HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_GetMatrix(const Time shutterOffset) const
{
HdMatrixDataSourceHandle dataSource = this->_GetMatrixSource();
if (dataSource != nullptr)
{
return dataSource->GetTypedValue(shutterOffset);
}
return GfMatrix4d(1.0);
}
GfVec3d HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_GetLocalPosition(const Time shutterOffset) const
{
HdVec3dDataSourceHandle dataSource = this->_GetLocalPositionSource();
if (dataSource != nullptr)
{
return dataSource->GetTypedValue(shutterOffset);
}
return GfVec3d(1.0);
}
TfToken HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_GetTangentPlane() const
{
HdTokenDataSourceHandle dataSource = this->_GetTangentPlaneSource();
if (dataSource != nullptr)
{
return dataSource->GetTypedValue(0.0f);
}
return TfToken();
}
GfVec3d HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_GetReferencePosition() const
{
HdVec3dDataSourceHandle dataSource = this->_GetReferencePositionSource();
if (dataSource != nullptr)
{
return dataSource->GetTypedValue(0.0f);
}
return GfVec3d(1.0);
}
GfVec3d HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_GetOrientation() const
{
HdVec3dDataSourceHandle dataSource = this->_GetOrientationSource();
if (dataSource != nullptr)
{
return dataSource->GetTypedValue(0.0f);
}
return GfVec3d(1.0);
}
TfToken HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_GetStageUpAxis() const
{
HdTokenDataSourceHandle dataSource = this->_GetStageUpAxisSource();
if (dataSource != nullptr)
{
return dataSource->GetTypedValue(0.0f);
}
return UsdGeomTokens->y;
}
double HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_GetStageMetersPerUnit() const
{
HdDoubleDataSourceHandle dataSource = this->_GetStageMetersPerUnitSource();
if (dataSource != nullptr)
{
return dataSource->GetTypedValue(0.0f);
}
return 0.01;
}
GfMatrix4d HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_ComputeTransformedMatrix(const Time shutterOffset) const
{
// NOTE: in the case of the geospatially applied prim, we are completely
// ignoring the fact that resetXformStack may be true at any given time sample
// that is, geospatial positioning takes priority over local transformation reset
// to compute the local position, we need to first get the geodetic reference
TfToken targetFrame = this->_GetTangentPlane();
GfVec3d tangentPosition = this->_GetReferencePosition();
GfVec3d orientation = this->_GetOrientation();
GfVec3d localPosition = this->_GetLocalPosition(shutterOffset);
double metersPerUnit = this->_GetStageMetersPerUnit();
TfToken upAxis = this->_GetStageUpAxis();
// calculate the new geodetic translation
auto enu = this->_EcefToEnu(this->_GeodeticToEcef(localPosition), tangentPosition);
GfVec3d translation = this->_EnuToCartesian(enu, upAxis, metersPerUnit, tangentPosition);
// we only want to replace the translation piece
// but since the transform may have orientation and scale
// information, we need to extract that from the existing
// matrix first
GfTransform currentTransform(this->_GetMatrix(shutterOffset));
GfVec3d existingScale = currentTransform.GetScale();
GfRotation existingRotation = currentTransform.GetRotation();
GfRotation existingPivotOrientation = currentTransform.GetPivotOrientation();
GfVec3d existingPivotPosition = currentTransform.GetPivotPosition();
// now combine the new translation with the existing scale / rotation
GfTransform newTransform(existingScale, existingPivotOrientation,
existingRotation, existingPivotPosition, translation);
return newTransform.GetMatrix();
}
// Geospatial transform functions
// For reference:
// https://onlinelibrary.wiley.com/doi/pdf/10.1002/9780470099728.app3
// https://en.wikipedia.org/wiki/Geographic_coordinate_conversion
// Implementation of Ferrari's solution
GfVec3d HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_GeodeticToEcef(const GfVec3d & llh) const
{
double lambda = llh[0] * GeoConstants::radians;
double phi = llh[1] * GeoConstants::radians;
double sin_lambda = sin(lambda);
double N = GeoConstants::semiMajorAxis / sqrt(1 - GeoConstants::eccentricity * sin_lambda * sin_lambda);
double cos_lambda = cos(lambda);
double cos_phi = cos(phi);
double sin_phi = sin(phi);
return PXR_NS::GfVec3d((llh[2] + N) * cos_lambda * cos_phi, (llh[2] + N) * cos_lambda * sin_phi,
(llh[2] + (1 - GeoConstants::eccentricity) * N) * sin_lambda);
}
GfVec3d HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_EcefToEnu(const GfVec3d& ecef, const GfVec3d& llh) const
{
double lambda = llh[0] * GeoConstants::radians;
double phi = llh[1] * GeoConstants::radians;
double sin_lambda = sin(lambda);
double N = GeoConstants::semiMajorAxis / sqrt(1 - GeoConstants::eccentricity * sin_lambda * sin_lambda);
double cos_lambda = cos(lambda);
double cos_phi = cos(phi);
double sin_phi = sin(phi);
PXR_NS::GfVec3d pt((llh[2] + N) * cos_lambda * cos_phi,
(llh[2] + N) * cos_lambda * sin_phi,
(llh[2] + (1 - GeoConstants::eccentricity) * N) * sin_lambda);
auto delta = ecef - pt;
return PXR_NS::GfVec3d(-sin_phi * delta[0] + cos_phi * delta[1],
-cos_phi * sin_lambda * delta[0] - sin_lambda * sin_phi * delta[1] + cos_lambda * delta[2],
cos_lambda * cos_phi * delta[0] + cos_lambda * sin_phi * delta[1] + sin_lambda * delta[2]);
}
GfVec3d HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_EnuToCartesian(
const GfVec3d& enu,
const TfToken& upAxis,
const double& metersPerUnit,
const GfVec3d& reference) const
{
auto cartesian = GfVec3d(reference[0] < 0.0 ? -enu[0] : enu[0],
upAxis == UsdGeomTokens->y ? enu[2] : enu[1],
upAxis == UsdGeomTokens->z ? enu[2] : enu[1]);
cartesian /= metersPerUnit;
return cartesian;
}
PXR_NAMESPACE_CLOSE_SCOPE | 11,354 | C++ | 35.394231 | 137 | 0.747314 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/referencePositionSchema.h | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef HD_OMNI_GEOSPATIAL_WGS84_REFERENCE_POSITION_SCHEMA_H_
#define HD_OMNI_GEOSPATIAL_WGS84_REFERENCE_POSITION_SCHEMA_H_
#include <pxr/imaging/hd/schema.h>
#include <pxr/imaging/hd/dataSourceLocator.h>
#include "api.h"
PXR_NAMESPACE_OPEN_SCOPE
//-----------------------------------------------------------------------------
#define HDOMNIGEOSPATIALWGS84REFERENCEPOSITION_SCHEMA_TOKENS \
(referencePositionApi) \
(tangentPlane) \
(referencePosition) \
(orientation) \
(stageUpAxis) \
(stageMetersPerUnit) \
TF_DECLARE_PUBLIC_TOKENS(HdOmniGeospatialWGS84ReferencePositionSchemaTokens, OMNIGEOSCENEINDEX_API,
HDOMNIGEOSPATIALWGS84REFERENCEPOSITION_SCHEMA_TOKENS);
//-----------------------------------------------------------------------------
class HdOmniGeospatialWGS84ReferencePositionSchema : public HdSchema
{
public:
HdOmniGeospatialWGS84ReferencePositionSchema(HdContainerDataSourceHandle container)
: HdSchema(container) { }
OMNIGEOSCENEINDEX_API
HdTokenDataSourceHandle GetTangentPlane();
OMNIGEOSCENEINDEX_API
HdVec3dDataSourceHandle GetReferencePosition();
OMNIGEOSCENEINDEX_API
HdVec3dDataSourceHandle GetOrientation();
OMNIGEOSCENEINDEX_API
HdTokenDataSourceHandle GetStageUpAxis();
OMNIGEOSCENEINDEX_API
HdDoubleDataSourceHandle GetStageMetersPerUnit();
OMNIGEOSCENEINDEX_API
static HdOmniGeospatialWGS84ReferencePositionSchema GetFromParent(
const HdContainerDataSourceHandle& fromParentContainer);
OMNIGEOSCENEINDEX_API
static const HdDataSourceLocator& GetDefaultLocator();
OMNIGEOSCENEINDEX_API
static HdContainerDataSourceHandle BuildRetained(
const HdTokenDataSourceHandle& tangentPlane,
const HdVec3dDataSourceHandle& referencePosition,
const HdVec3dDataSourceHandle& orientation,
const HdTokenDataSourceHandle& stageUpAxis,
const HdDoubleDataSourceHandle& stageMetersPerUnit
);
};
PXR_NAMESPACE_CLOSE_SCOPE
#endif // HD_OMNI_GEOSPATIAL_WGS84_REFERENCE_POSITION_SCHEMA_H_ | 2,662 | C | 32.70886 | 99 | 0.730278 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/api.h | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef OMNI_GEO_SCENE_INDEX_API_H
#define OMNI_GEO_SCENE_INDEX_API_H
#include "pxr/base/arch/export.h"
#if defined(PXR_STATIC)
# define OMNIGEOSCENEINDEX_API
# define OMNIGEOSCENEINDEX_API_TEMPLATE_CLASS(...)
# define OMNIGEOSCENEINDEX_API_TEMPLATE_STRUCT(...)
# define OMNIGEOSCENEINDEX_LOCAL
#else
# if defined(OMNIGEOSCENEINDEX_EXPORTS)
# define OMNIGEOSCENEINDEX_API ARCH_EXPORT
# define OMNIGEOSCENEINDEX_API_TEMPLATE_CLASS(...) ARCH_EXPORT_TEMPLATE(class, __VA_ARGS__)
# define OMNIGEOSCENEINDEX_API_TEMPLATE_STRUCT(...) ARCH_EXPORT_TEMPLATE(struct, __VA_ARGS__)
# else
# define OMNIGEOSCENEINDEX_API ARCH_IMPORT
# define OMNIGEOSCENEINDEX_API_TEMPLATE_CLASS(...) ARCH_IMPORT_TEMPLATE(class, __VA_ARGS__)
# define OMNIGEOSCENEINDEX_API_TEMPLATE_STRUCT(...) ARCH_IMPORT_TEMPLATE(struct, __VA_ARGS__)
# endif
# define OMNIGEOSCENEINDEX_LOCAL ARCH_HIDDEN
#endif
#endif // OMNI_GEO_INDEX_API_H
| 1,544 | C | 39.657894 | 99 | 0.734456 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/localPositionDataSource.h | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef HD_OMNI_GEOSPATIAL_WGS84_LOCAL_POSITION_DATA_SOURCE_H_
#define HD_OMNI_GEOSPATIAL_WGS84_LOCAL_POSITION_DATA_SOURCE_H_
#include <pxr/imaging/hd/dataSource.h>
#include <pxr/usdImaging/usdImaging/dataSourceStageGlobals.h>
#include <omniGeospatial/wGS84LocalPositionAPI.h>
#include "localPositionSchema.h"
PXR_NAMESPACE_OPEN_SCOPE
class HdOmniGeospatialWGS84LocalPositionDataSource : public HdContainerDataSource
{
public:
HD_DECLARE_DATASOURCE(HdOmniGeospatialWGS84LocalPositionDataSource);
HdOmniGeospatialWGS84LocalPositionDataSource(const UsdPrim& prim,
const UsdImagingDataSourceStageGlobals& stageGlobals);
TfTokenVector GetNames() override;
HdDataSourceBaseHandle Get(const TfToken& name) override;
#if PXR_VERSION < 2302
bool Has(const TfToken& name) override;
#endif
private:
OmniGeospatialWGS84LocalPositionAPI _localPositionApi;
const UsdImagingDataSourceStageGlobals& _stageGlobals;
};
HD_DECLARE_DATASOURCE_HANDLES(HdOmniGeospatialWGS84LocalPositionDataSource);
PXR_NAMESPACE_CLOSE_SCOPE
#endif // HD_OMNI_GEOSPATIAL_WGS84_LOCAL_POSITION_DATA_SOURCE_H_ | 1,710 | C | 33.219999 | 81 | 0.792398 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/computedDependentDataSource.h | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef HD_OMNI_GEOSPATIAL_COMPUTED_DEPENDENT_DATA_SOURCE_H_
#define HD_OMNI_GEOSPATIAL_COMPUTED_DEPENDENT_DATA_SOURCE_H_
#include <pxr/imaging/hd/dataSource.h>
#include <pxr/imaging/hd/dataSourceTypeDefs.h>
PXR_NAMESPACE_OPEN_SCOPE
/// \class HdOmniGeospatialComputedDependentDataSource
///
/// A datasource representing a container data source mimicing
/// that of a container data source for xform data, but returning
/// computed values based on geospatial data applied to the parent
/// (or some parent in the hierarchy) of this prim.
///
class HdOmniGeospatialComputedDependentDataSource : public HdContainerDataSource
{
public:
HD_DECLARE_DATASOURCE(HdOmniGeospatialComputedDependentDataSource);
HdOmniGeospatialComputedDependentDataSource(HdContainerDataSourceHandle inputDataSource,
HdContainerDataSourceHandle parentDataSource);
// data source overrides
TfTokenVector GetNames() override;
HdDataSourceBaseHandle Get(const TfToken& name) override;
#if PXR_VERSION < 2302
bool Has(const TfToken& name) override;
#endif
private:
HdDataSourceBaseHandle _ComputeGeospatiallyAffectedXform();
private:
HdContainerDataSourceHandle _inputDataSource;
HdContainerDataSourceHandle _parentDataSource;
HdMatrixDataSourceHandle _matrixDataSource;
class _GeospatiallyAffectedMatrixDataSource : public HdMatrixDataSource
{
public:
HD_DECLARE_DATASOURCE(_GeospatiallyAffectedMatrixDataSource);
VtValue GetValue(Time shutterOffset) override;
GfMatrix4d GetTypedValue(Time shutterOffset) override;
bool GetContributingSampleTimesForInterval(
Time startTime,
Time endTime,
std::vector<Time>* outSampleTimes) override;
private:
_GeospatiallyAffectedMatrixDataSource(HdContainerDataSourceHandle inputDataSource,
HdContainerDataSourceHandle parentDataSource);
HdMatrixDataSourceHandle _GetMatrixSource() const;
HdBoolDataSourceHandle _GetResetXformStackSource() const;
HdMatrixDataSourceHandle _GetParentMatrixSource() const;
HdMatrixDataSourceHandle _GetParentOriginalMatrixSource() const;
GfMatrix4d _GetMatrix(const Time shutterOffset) const;
bool _GetResetXformStack(const Time shutterOffset) const;
GfMatrix4d _GetParentMatrix(const Time shutterOffset) const;
GfMatrix4d _GetParentOriginalMatrix(const Time shutterOffset) const;
// geospatial transform methods
GfMatrix4d _ComputeTransformedMatrix(const Time shutterOffset) const;
HdContainerDataSourceHandle _inputDataSource;
HdContainerDataSourceHandle _parentDataSource;
};
HD_DECLARE_DATASOURCE_HANDLES(_GeospatiallyAffectedMatrixDataSource);
};
HD_DECLARE_DATASOURCE_HANDLES(HdOmniGeospatialComputedDependentDataSource);
PXR_NAMESPACE_CLOSE_SCOPE
#endif // HD_OMNI_GEOSPATIAL_COMPUTED_DEPENDENT_DATA_SOURCE_H_ | 3,530 | C | 35.402061 | 92 | 0.768272 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/geospatialDataSource.cpp | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <pxr/imaging/hd/xformSchema.h>
#include "geospatialDataSource.h"
#include "computedPrimDataSource.h"
#include "computedDependentDataSource.h"
#include "localPositionSchema.h"
PXR_NAMESPACE_OPEN_SCOPE
TF_DEFINE_PUBLIC_TOKENS(HdOmniGeospatialDataSourceTokens,
HDOMNIGEOSPATIALDATASOURCE_TOKENS);
HdOmniGeospatialDataSource::HdOmniGeospatialDataSource(const HdSceneIndexBase& index, const SdfPath& primPath,
HdContainerDataSourceHandle wrappedDataSource) :
_sceneIndex(index),
_primPath(primPath),
_wrappedDataSource(wrappedDataSource)
{
}
void HdOmniGeospatialDataSource::UpdateWrappedDataSource(
HdContainerDataSourceHandle wrappedDataSource)
{
_wrappedDataSource = wrappedDataSource;
}
#if PXR_VERSION < 2302
bool HdOmniGeospatialDataSource::Has(const TfToken& name)
{
if (name == HdOmniGeospatialDataSourceTokens->geospatialPreservedXform)
{
return true;
}
return (_wrappedDataSource != nullptr) ? _wrappedDataSource->Has(name) : false;
}
#endif
TfTokenVector HdOmniGeospatialDataSource::GetNames()
{
// since we only wrapped Xformables, this should
// also return HdXformSchemaTokens->xform
TfTokenVector result = (_wrappedDataSource == nullptr) ? TfTokenVector() : _wrappedDataSource->GetNames();
result.push_back(HdOmniGeospatialDataSourceTokens->geospatialPreservedXform);
return result;
}
HdDataSourceBaseHandle HdOmniGeospatialDataSource::Get(const TfToken& name)
{
if (name == HdXformSchemaTokens->xform)
{
// this is an intercept of the flattened transform matrix
// we need to dynamically compute a geospatial one
return this->_ComputeGeospatialXform();
}
else if (name == HdOmniGeospatialDataSourceTokens->geospatialPreservedXform)
{
// this would be the original flattened matrix of the wrapped data source
if (_wrappedDataSource != nullptr)
{
return _wrappedDataSource->Get(HdXformSchemaTokens->xform);
}
}
// all other token values should be defer to the wrapped data source (if any)
if (_wrappedDataSource != nullptr)
{
return _wrappedDataSource->Get(name);
}
return nullptr;
}
bool HdOmniGeospatialDataSource::IsPrimDirtied(const HdDataSourceLocatorSet& locators)
{
static const HdContainerDataSourceHandle containerNull(nullptr);
if (locators.Intersects(HdXformSchema::GetDefaultLocator()))
{
if (HdContainerDataSource::AtomicLoad(_computedGeospatialPrimDataSource) != nullptr ||
HdContainerDataSource::AtomicLoad(_computedGeospatialDependentDataSource) != nullptr)
{
HdContainerDataSource::AtomicStore(_computedGeospatialPrimDataSource, containerNull);
HdContainerDataSource::AtomicStore(_computedGeospatialDependentDataSource, containerNull);
return true;
}
}
return false;
}
HdDataSourceBaseHandle HdOmniGeospatialDataSource::_ComputeGeospatialXform()
{
// since matrices are time sampled, we actually don't compute anything
// here, we just setup the right HdMatrixDataSources to be able to
// compute a final value at a specific time sample when asked
// to do that, we have two cases:
// 1. The wrapped prim in question has a local geodetic position applied
// In this case, all of the information we need to compute the position
// is stored inside of the wrapped prim itself (i.e. the geodetic root
// tangentFrame and geodtic position from the applied API schema)
// 2. The wrapped prim in question does not have a local geodetic position
// applied, but it's parent in the stage hierarchy does, which means
// that we need the wrapped prim plus it's parent prim to be able to
// compute the new correct transform
//
// Case 1 is easy - we can detect whether we have the information or not
// and create the right data source to return.
//
// Case 2 is a bit more difficult to do performantly - at the moment
// we will walk the parent prim hierarchy to the root to determine
// this information, but likely you would want to cache this locally
// on the wrapped prim. We can certainly do that, but then we have to
// be concerned about invalidating it at the right time. We'll leave this
// as a TODO for the future.
//
if (this->_HasGeospatialInformation(_wrappedDataSource))
{
// this is case 1, and we can create a data source specifically
// catered to do that computation
HdContainerDataSourceHandle computedGeospatialPrimDataSource =
HdContainerDataSource::AtomicLoad(_computedGeospatialPrimDataSource);
if (computedGeospatialPrimDataSource != nullptr)
{
// we have a previously cached value so can return that directly
return computedGeospatialPrimDataSource;
}
// otherwise we have to compute a new one
// since the container responsible for the xform token
// needs to take into account both resetXform and matrix
// and since both of those can be time-sampled, we have to make
// sure we can respond appropriately to any query
// so we will need a complete view of the wrapped data source
// to perform the computation
computedGeospatialPrimDataSource = HdOmniGeospatialComputedPrimDataSource::New(_wrappedDataSource);
HdContainerDataSource::AtomicStore(_computedGeospatialPrimDataSource, computedGeospatialPrimDataSource);
return computedGeospatialPrimDataSource;
}
else
{
// this is case 2, in order to perform this transformation appropriately
// we have to walk the parent hierarchy to find the parent with a local position
// geospatial API attached to it - if none exists we can return the wrapped
// data source directly, but if one does exist we need a new data source capable
// of handling the dynamic compute at any time sample
HdContainerDataSourceHandle computedGeospatialDependentDataSource =
HdContainerDataSource::AtomicLoad(_computedGeospatialDependentDataSource);
if (computedGeospatialDependentDataSource != nullptr)
{
// we have a previously cached value and can return that directly
return computedGeospatialDependentDataSource;
}
// otherwise we have to compute a new one
// so we need to follow the prim hierarchy up until we reach
// a geospatially applied one (if any)
if (_primPath != SdfPath::AbsoluteRootPath())
{
HdContainerDataSourceHandle geospatialDataSource = nullptr;
for (SdfPath p = _primPath.GetParentPath(); p != SdfPath::AbsoluteRootPath(); p = p.GetParentPath())
{
HdSceneIndexPrim prim = _sceneIndex.GetPrim(p);
if (this->_HasGeospatialInformation(prim.dataSource))
{
// found it!
geospatialDataSource = prim.dataSource;
}
}
// if we didn't find a geospatially applied parent, we don't need to do anything
if (geospatialDataSource == nullptr)
{
if (_wrappedDataSource != nullptr)
{
HdContainerDataSourceHandle dataSource = HdContainerDataSource::Cast(_wrappedDataSource->Get(HdXformSchemaTokens->xform));
if (dataSource != nullptr)
{
HdContainerDataSource::AtomicStore(_computedGeospatialDependentDataSource, dataSource);
return _computedGeospatialDependentDataSource;
}
return nullptr;
}
return nullptr;
}
// otherwise we need a new datasource that can perform the compute between
// the immediate parent and the prim in question
SdfPath parentPath = _primPath.GetParentPath();
HdSceneIndexPrim parentSceneIndexPrim = _sceneIndex.GetPrim(parentPath);
computedGeospatialDependentDataSource = HdOmniGeospatialComputedDependentDataSource::New(_wrappedDataSource,
parentSceneIndexPrim.dataSource);
HdContainerDataSource::AtomicStore(_computedGeospatialDependentDataSource, computedGeospatialDependentDataSource);
return computedGeospatialDependentDataSource;
}
else
{
// it's the root path, and we don't have to do anything here
// NOTE: this makes the assumption that root never has geospatial information applied
if (_wrappedDataSource != nullptr)
{
return _wrappedDataSource->Get(HdXformSchemaTokens->xform);
}
}
}
return nullptr;
}
bool HdOmniGeospatialDataSource::_HasGeospatialInformation(HdContainerDataSourceHandle handle)
{
HdOmniGeospatialWGS84LocalPositionSchema localPositionSchema = HdOmniGeospatialWGS84LocalPositionSchema::GetFromParent(handle);
return localPositionSchema.IsDefined();
}
PXR_NAMESPACE_CLOSE_SCOPE | 9,813 | C++ | 40.235294 | 142 | 0.689086 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/localPositionDataSource.cpp | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <pxr/usdImaging/usdImaging/dataSourceAttribute.h>
#include "localPositionDataSource.h"
PXR_NAMESPACE_OPEN_SCOPE
HdOmniGeospatialWGS84LocalPositionDataSource::HdOmniGeospatialWGS84LocalPositionDataSource(
const UsdPrim& prim,
const UsdImagingDataSourceStageGlobals& stageGlobals) :
_stageGlobals(stageGlobals)
{
_localPositionApi = OmniGeospatialWGS84LocalPositionAPI(prim);
}
#if PXR_VERSION < 2302
bool HdOmniGeospatialWGS84LocalPositionDataSource::Has(const TfToken& name)
{
return (name == HdOmniGeospatialWGS84LocalPositionSchemaTokens->position);
}
#endif
TfTokenVector HdOmniGeospatialWGS84LocalPositionDataSource::GetNames()
{
// return the hydra attribute names this data source is responsible for
TfTokenVector names;
names.push_back(HdOmniGeospatialWGS84LocalPositionSchemaTokens->position);
return names;
}
HdDataSourceBaseHandle HdOmniGeospatialWGS84LocalPositionDataSource::Get(const TfToken& name)
{
// retrieves the data source values for the attributes this data source
// supports
if (name == HdOmniGeospatialWGS84LocalPositionSchemaTokens->position)
{
return UsdImagingDataSourceAttribute<GfVec3d>::New(
_localPositionApi.GetPositionAttr(), _stageGlobals);
}
// this is a name we don't support
return nullptr;
}
PXR_NAMESPACE_CLOSE_SCOPE | 1,954 | C++ | 32.135593 | 93 | 0.772262 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/computedPrimDataSource.h | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef HD_OMNI_GEOSPATIAL_COMPUTED_PRIM_DATA_SOURCE_H_
#define HD_OMNI_GEOSPATIAL_COMPUTED_PRIM_DATA_SOURCE_H_
#include <pxr/imaging/hd/dataSource.h>
#include <pxr/imaging/hd/dataSourceTypeDefs.h>
PXR_NAMESPACE_OPEN_SCOPE
/// \class HdOmniGeospatialComputedPrimDataSource
///
/// A datasource representing a container data source mimicing
/// that of a container data source for xform data, but returning
/// computed values based on geospatial data applied to the prim.
///
class HdOmniGeospatialComputedPrimDataSource : public HdContainerDataSource
{
public:
HD_DECLARE_DATASOURCE(HdOmniGeospatialComputedPrimDataSource);
HdOmniGeospatialComputedPrimDataSource(HdContainerDataSourceHandle inputDataSource);
// data source overrides
TfTokenVector GetNames() override;
HdDataSourceBaseHandle Get(const TfToken& name) override;
#if PXR_VERSION < 2302
bool Has(const TfToken& name) override;
#endif
private:
HdDataSourceBaseHandle _ComputeGeospatialXform();
GfVec3d _GeodeticToEcef(const GfVec3d& llh) const;
GfVec3d _EcefToEnu(const GfVec3d& ecef, const GfVec3d& llh) const;
GfVec3d _EnuToCartesian(const GfVec3d& enu, const TfToken& upAxis, const double& metersPerUnit, const GfVec3d& reference) const;
private:
HdContainerDataSourceHandle _inputDataSource;
HdMatrixDataSourceHandle _matrixDataSource;
class _GeospatialMatrixDataSource : public HdMatrixDataSource
{
public:
HD_DECLARE_DATASOURCE(_GeospatialMatrixDataSource);
VtValue GetValue(Time shutterOffset) override;
GfMatrix4d GetTypedValue(Time shutterOffset) override;
bool GetContributingSampleTimesForInterval(
Time startTime,
Time endTime,
std::vector<Time>* outSampleTimes) override;
private:
_GeospatialMatrixDataSource(HdContainerDataSourceHandle inputDataSource);
HdMatrixDataSourceHandle _GetMatrixSource() const;
HdVec3dDataSourceHandle _GetLocalPositionSource() const;
HdTokenDataSourceHandle _GetTangentPlaneSource() const;
HdVec3dDataSourceHandle _GetReferencePositionSource() const;
HdVec3dDataSourceHandle _GetOrientationSource() const;
HdTokenDataSourceHandle _GetStageUpAxisSource() const;
HdDoubleDataSourceHandle _GetStageMetersPerUnitSource() const;
GfMatrix4d _GetMatrix(const Time shutterOffset) const;
GfVec3d _GetLocalPosition(const Time shutterOffset) const;
TfToken _GetTangentPlane() const;
GfVec3d _GetReferencePosition() const;
GfVec3d _GetOrientation() const;
TfToken _GetStageUpAxis() const;
double _GetStageMetersPerUnit() const;
// geospatial transform methods
GfMatrix4d _ComputeTransformedMatrix(const Time shutterOffset) const;
GfVec3d _GeodeticToEcef(const GfVec3d& llh) const;
GfVec3d _EcefToEnu(const GfVec3d& ecef, const GfVec3d& llh) const;
GfVec3d _EnuToCartesian(const GfVec3d& enu, const TfToken& upAxis, const double& metersPerUnit, const GfVec3d& reference) const;
struct GeoConstants
{
static constexpr double semiMajorAxis = 6378137.0;
static constexpr double semiMinorAxis = 6356752.3142;
static constexpr double flattening = 1.0 / 298.257223563;
static constexpr double eccentricity = flattening * (2 - flattening);
static constexpr double radians = M_PI / 180.0;
static constexpr double degrees = 180.0 / M_PI;
};
HdContainerDataSourceHandle _inputDataSource;
};
HD_DECLARE_DATASOURCE_HANDLES(_GeospatialMatrixDataSource);
};
HD_DECLARE_DATASOURCE_HANDLES(HdOmniGeospatialComputedPrimDataSource);
PXR_NAMESPACE_CLOSE_SCOPE
#endif // HD_OMNI_GEOSPATIAL_COMPUTED_PRIM_DATA_SOURCE_H_ | 4,414 | C | 37.72807 | 136 | 0.738106 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/geospatialSceneIndex.h | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef OMNI_GEO_SCENE_INDEX_H_
#define OMNI_GEO_SCENE_INDEX_H_
#include <pxr/pxr.h>
#include <pxr/usd/sdf/pathTable.h>
#include <pxr/imaging/hd/filteringSceneIndex.h>
#include "api.h"
PXR_NAMESPACE_OPEN_SCOPE
TF_DECLARE_REF_PTRS(OmniGeospatialSceneIndex);
///
/// \class OmniGeospatialSceneIndex
///
/// A scene index responsible for observing an input flattened scene
/// index and producing a comparable scene in which geospatial transforms
/// have been applied to prims with geospatial state attached to them
/// and for updating the transform of their children as needed.
///
/// Note that with Render Delegate 2.0 and the ability to pull data
/// from a non-flattened scene, this implementation will have to be
/// revisited to work with the unflattened xform representation of
/// the hydra prims.
///
class OmniGeospatialSceneIndex : public HdSingleInputFilteringSceneIndexBase
{
public:
OMNIGEOSCENEINDEX_API
static OmniGeospatialSceneIndexRefPtr New(const HdSceneIndexBaseRefPtr& inputSceneIndex,
const HdContainerDataSourceHandle& inputArgs = nullptr);
OMNIGEOSCENEINDEX_API
~OmniGeospatialSceneIndex() override;
OMNIGEOSCENEINDEX_API
HdSceneIndexPrim GetPrim(const SdfPath& primPath) const override;
OMNIGEOSCENEINDEX_API
SdfPathVector GetChildPrimPaths(const SdfPath& primPath) const override;
protected:
OmniGeospatialSceneIndex(const HdSceneIndexBaseRefPtr& inputSceneIndex,
const HdContainerDataSourceHandle& inputArgs);
// these three are provided by HdSingleInputFilteringSceneIndexBase
// and must be overridden by inheritors
virtual void _PrimsAdded(const HdSceneIndexBase& sender,
const HdSceneIndexObserver::AddedPrimEntries& entries) override;
virtual void _PrimsRemoved(const HdSceneIndexBase& sender,
const HdSceneIndexObserver::RemovedPrimEntries& entries) override;
virtual void _PrimsDirtied(const HdSceneIndexBase& sender,
const HdSceneIndexObserver::DirtiedPrimEntries& entries) override;
private:
SdfPathTable<HdSceneIndexPrim>::_IterBoolPair _IsPrimWrapped(const SdfPath& primPath) const;
HdSceneIndexPrim& _WrapPrim(const SdfPath& primPath, const HdSceneIndexPrim& hdPrim) const;
void _DirtyHierarchy(const SdfPath& primPath, const HdDataSourceLocatorSet& locators, HdSceneIndexObserver::DirtiedPrimEntries* dirtyEntries);
/*HdContainerDataSourceHandle _ComputeDataSource(
const SdfPath& primPath,
const HdContainerDataSourceHandle& primDataSource) const;
void _ComputeChildDataSources(const SdfPath& parentPath,
const HdContainerDataSourceHandle& parentDataSource) const;
HdContainerDataSourceHandle _ComputeMatrixDependenciesDataSource(
const SdfPath& primPath) const;*/
private:
// marked as mutable because it is an internal cache
// that is written to on-demand from the GetPrim method
// which is a const method by interface definition in HdSceneIndexBase
mutable SdfPathTable<HdSceneIndexPrim> _wrappedPrims;
};
PXR_NAMESPACE_CLOSE_SCOPE
#endif | 3,668 | C | 36.438775 | 146 | 0.773446 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/referencePositionSchema.cpp | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <pxr/imaging/hd/retainedDataSource.h>
#include "referencePositionSchema.h"
PXR_NAMESPACE_OPEN_SCOPE
TF_DEFINE_PUBLIC_TOKENS(HdOmniGeospatialWGS84ReferencePositionSchemaTokens,
HDOMNIGEOSPATIALWGS84REFERENCEPOSITION_SCHEMA_TOKENS);
HdTokenDataSourceHandle HdOmniGeospatialWGS84ReferencePositionSchema::GetTangentPlane()
{
return _GetTypedDataSource<HdTokenDataSource>(
HdOmniGeospatialWGS84ReferencePositionSchemaTokens->tangentPlane);
}
HdVec3dDataSourceHandle HdOmniGeospatialWGS84ReferencePositionSchema::GetReferencePosition()
{
return _GetTypedDataSource<HdVec3dDataSource>(
HdOmniGeospatialWGS84ReferencePositionSchemaTokens->referencePosition);
}
HdVec3dDataSourceHandle HdOmniGeospatialWGS84ReferencePositionSchema::GetOrientation()
{
return _GetTypedDataSource<HdVec3dDataSource>(
HdOmniGeospatialWGS84ReferencePositionSchemaTokens->orientation);
}
HdTokenDataSourceHandle HdOmniGeospatialWGS84ReferencePositionSchema::GetStageUpAxis()
{
return _GetTypedDataSource<HdTokenDataSource>(
HdOmniGeospatialWGS84ReferencePositionSchemaTokens->stageUpAxis);
}
HdDoubleDataSourceHandle HdOmniGeospatialWGS84ReferencePositionSchema::GetStageMetersPerUnit()
{
return _GetTypedDataSource<HdDoubleDataSource>(
HdOmniGeospatialWGS84ReferencePositionSchemaTokens->stageMetersPerUnit);
}
HdOmniGeospatialWGS84ReferencePositionSchema HdOmniGeospatialWGS84ReferencePositionSchema::GetFromParent(
const HdContainerDataSourceHandle& fromParentContainer)
{
if (fromParentContainer == nullptr)
{
return HdOmniGeospatialWGS84ReferencePositionSchema(nullptr);
}
return HdOmniGeospatialWGS84ReferencePositionSchema(
HdContainerDataSource::Cast(fromParentContainer->Get(
HdOmniGeospatialWGS84ReferencePositionSchemaTokens->referencePositionApi))
);
}
const HdDataSourceLocator& HdOmniGeospatialWGS84ReferencePositionSchema::GetDefaultLocator()
{
static const HdDataSourceLocator locator(
HdOmniGeospatialWGS84ReferencePositionSchemaTokens->referencePositionApi
);
return locator;
}
HdContainerDataSourceHandle HdOmniGeospatialWGS84ReferencePositionSchema::BuildRetained(
const HdTokenDataSourceHandle& tangentPlane,
const HdVec3dDataSourceHandle& referencePosition,
const HdVec3dDataSourceHandle& orientation,
const HdTokenDataSourceHandle& stageUpAxis,
const HdDoubleDataSourceHandle& stageMetersPerUnit)
{
TfToken names[5];
HdDataSourceBaseHandle values[5];
size_t count = 0;
if (tangentPlane != nullptr)
{
names[count] = HdOmniGeospatialWGS84ReferencePositionSchemaTokens->tangentPlane;
values[count] = tangentPlane;
count++;
}
if (referencePosition != nullptr)
{
names[count] = HdOmniGeospatialWGS84ReferencePositionSchemaTokens->referencePosition;
values[count] = referencePosition;
count++;
}
if (orientation != nullptr)
{
names[count] = HdOmniGeospatialWGS84ReferencePositionSchemaTokens->orientation;
values[count] = orientation;
count++;
}
if (stageUpAxis != nullptr)
{
names[count] = HdOmniGeospatialWGS84ReferencePositionSchemaTokens->stageUpAxis;
values[count] = stageUpAxis;
count++;
}
if (stageMetersPerUnit != nullptr)
{
names[count] = HdOmniGeospatialWGS84ReferencePositionSchemaTokens->stageMetersPerUnit;
values[count] = stageMetersPerUnit;
count++;
}
return HdRetainedContainerDataSource::New(count, names, values);
}
PXR_NAMESPACE_CLOSE_SCOPE | 4,221 | C++ | 33.048387 | 105 | 0.773513 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/localPositionSchema.h | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef HD_OMNI_GEOSPATIAL_WGS84_LOCAL_POSITION_SCHEMA_H_
#define HD_OMNI_GEOSPATIAL_WGS84_LOCAL_POSITION_SCHEMA_H_
#include <pxr/imaging/hd/schema.h>
#include <pxr/imaging/hd/dataSourceLocator.h>
#include "api.h"
PXR_NAMESPACE_OPEN_SCOPE
//-----------------------------------------------------------------------------
#define HDOMNIGEOSPATIALWGS84LOCALPOSITION_SCHEMA_TOKENS \
(localPositionApi) \
(position) \
TF_DECLARE_PUBLIC_TOKENS(HdOmniGeospatialWGS84LocalPositionSchemaTokens, OMNIGEOSCENEINDEX_API,
HDOMNIGEOSPATIALWGS84LOCALPOSITION_SCHEMA_TOKENS);
//-----------------------------------------------------------------------------
class HdOmniGeospatialWGS84LocalPositionSchema : public HdSchema
{
public:
HdOmniGeospatialWGS84LocalPositionSchema(HdContainerDataSourceHandle container)
: HdSchema(container) { }
OMNIGEOSCENEINDEX_API
HdVec3dDataSourceHandle GetPosition();
OMNIGEOSCENEINDEX_API
static HdOmniGeospatialWGS84LocalPositionSchema GetFromParent(
const HdContainerDataSourceHandle& fromParentContainer);
OMNIGEOSCENEINDEX_API
static const HdDataSourceLocator& GetDefaultLocator();
OMNIGEOSCENEINDEX_API
static HdContainerDataSourceHandle BuildRetained(
const HdVec3dDataSourceHandle& position
);
};
PXR_NAMESPACE_CLOSE_SCOPE
#endif // HD_OMNI_GEOSPATIAL_WGS84_LOCAL_POSITION_SCHEMA_H_ | 1,985 | C | 32.661016 | 95 | 0.716877 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/referencePositionDataSource.h | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef HD_OMNI_GEOSPATIAL_WGS84_REFERENCE_POSITION_DATA_SOURCE_H_
#define HD_OMNI_GEOSPATIAL_WGS84_REFERENCE_POSITION_DATA_SOURCE_H_
#include <pxr/imaging/hd/dataSource.h>
#include <pxr/usdImaging/usdImaging/dataSourceStageGlobals.h>
#include <omniGeospatial/wGS84ReferencePositionAPI.h>
#include "referencePositionSchema.h"
PXR_NAMESPACE_OPEN_SCOPE
class HdOmniGeospatialWGS84ReferencePositionDataSource : public HdContainerDataSource
{
public:
HD_DECLARE_DATASOURCE(HdOmniGeospatialWGS84ReferencePositionDataSource);
HdOmniGeospatialWGS84ReferencePositionDataSource(const UsdPrim& prim,
const UsdImagingDataSourceStageGlobals& stageGlobals);
TfTokenVector GetNames() override;
HdDataSourceBaseHandle Get(const TfToken& name) override;
#if PXR_VERSION < 2302
bool Has(const TfToken& name) override;
#endif
private:
OmniGeospatialWGS84ReferencePositionAPI _referencePositionApi;
const UsdImagingDataSourceStageGlobals& _stageGlobals;
template <typename T>
class _StageDataSource : public HdTypedSampledDataSource<T>
{
public:
HD_DECLARE_DATASOURCE(_StageDataSource<T>);
VtValue GetValue(HdSampledDataSource::Time shutterOffset) override
{
return VtValue(GetTypedValue(shutterOffset));
}
T GetTypedValue(HdSampledDataSource::Time shutterOffset) override
{
return _value;
}
bool GetContributingSampleTimesForInterval(
HdSampledDataSource::Time startTime,
HdSampledDataSource::Time endTime,
std::vector<HdSampledDataSource::Time>* outSampleTimes) override
{
return false;
}
private:
_StageDataSource(const T& value);
T _value;
};
};
HD_DECLARE_DATASOURCE_HANDLES(HdOmniGeospatialWGS84ReferencePositionDataSource);
PXR_NAMESPACE_CLOSE_SCOPE
#endif // HD_OMNI_GEOSPATIAL_WGS84_REFERENCE_POSITION_DATA_SOURCE_H_ | 2,546 | C | 30.060975 | 85 | 0.739199 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/referencePositionAPIAdapter.cpp | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <pxr/imaging/hd/retainedDataSource.h>
#include <omniGeospatial/wGS84ReferencePositionAPI.h>
#include "referencePositionAPIAdapter.h"
#include "referencePositionDataSource.h"
#include "referencePositionSchema.h"
PXR_NAMESPACE_OPEN_SCOPE
TF_REGISTRY_FUNCTION(TfType)
{
typedef OmniGeospatialWGS84ReferencePositionAPIAdapter Adapter;
TfType t = TfType::Define<Adapter, TfType::Bases<Adapter::BaseAdapter> >();
t.SetFactory<UsdImagingAPISchemaAdapterFactory<Adapter> >();
}
#if PXR_VERSION >= 2302
HdContainerDataSourceHandle OmniGeospatialWGS84ReferencePositionAPIAdapter::GetImagingSubprimData(
const UsdPrim& prim,
const TfToken& subprim,
const TfToken& appliedInstanceName,
const UsdImagingDataSourceStageGlobals& stageGlobals)
#else
HdContainerDataSourceHandle OmniGeospatialWGS84ReferencePositionAPIAdapter::GetImagingSubprimData(
const TfToken& subprim,
const UsdPrim& prim,
const TfToken& appliedInstanceName,
const UsdImagingDataSourceStageGlobals& stageGlobals)
#endif
{
// at the point we are invoked here, the stage scene index has already determined
// that the API schema applies to the prim, so we can safely create our
// data source
if (!subprim.IsEmpty() || !appliedInstanceName.IsEmpty())
{
// there shouldn't be a subprim or an applied instance name
// if there is, we don't really know what to do with it
// so we return null to indicate there is no data source
// for this prim setup
return nullptr;
}
return HdRetainedContainerDataSource::New(
HdOmniGeospatialWGS84ReferencePositionSchemaTokens->referencePositionApi,
HdOmniGeospatialWGS84ReferencePositionDataSource::New(prim, stageGlobals)
);
}
#if PXR_VERSION >= 2302
HdDataSourceLocatorSet OmniGeospatialWGS84ReferencePositionAPIAdapter::InvalidateImagingSubprim(
const UsdPrim& prim,
const TfToken& subprim,
const TfToken& appliedInstanceName,
const TfTokenVector& properties)
#else
HdDataSourceLocatorSet OmniGeospatialWGS84ReferencePositionAPIAdapter::InvalidateImagingSubprim(
const TfToken& subprim,
const TfToken& appliedInstanceName,
const TfTokenVector& properties)
#endif
{
if (!subprim.IsEmpty() || !appliedInstanceName.IsEmpty())
{
return HdDataSourceLocatorSet();
}
TfToken geospatialPrefix("omni:geospatial:wgs84:reference");
for (const TfToken& propertyName : properties)
{
if (TfStringStartsWith(propertyName, geospatialPrefix))
{
return HdOmniGeospatialWGS84ReferencePositionSchema::GetDefaultLocator();
}
}
return HdDataSourceLocatorSet();
}
PXR_NAMESPACE_CLOSE_SCOPE | 3,306 | C++ | 33.810526 | 98 | 0.753781 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/geospatialDataSource.h | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef HD_OMNI_GEOSPATIAL_DATA_SOURCE_H_
#define HD_OMNI_GEOSPATIAL_DATA_SOURCE_H_
#include <pxr/imaging/hd/dataSource.h>
#include <pxr/imaging/hd/dataSourceTypeDefs.h>
#include <pxr/imaging/hd/sceneIndex.h>
#include "api.h"
PXR_NAMESPACE_OPEN_SCOPE
//-----------------------------------------------------------------------------
#define HDOMNIGEOSPATIALDATASOURCE_TOKENS \
(geospatialPreservedXform)
TF_DECLARE_PUBLIC_TOKENS(HdOmniGeospatialDataSourceTokens, OMNIGEOSCENEINDEX_API,
HDOMNIGEOSPATIALDATASOURCE_TOKENS);
//-----------------------------------------------------------------------------
/// \class HdOmniGeospatialDataSource
///
/// A datasource representing a wrapped view of an existing flattened
/// data source where the xform token is intercepted and a new geospatial
/// matrix dynamically calculated.
///
class HdOmniGeospatialDataSource : public HdContainerDataSource
{
public:
HD_DECLARE_DATASOURCE(HdOmniGeospatialDataSource);
HdOmniGeospatialDataSource(const HdSceneIndexBase& sceneIndex, const SdfPath& primPath,
HdContainerDataSourceHandle wrappedDataSource);
void UpdateWrappedDataSource(HdContainerDataSourceHandle wrappedDataSource);
// data source overrides
TfTokenVector GetNames() override;
HdDataSourceBaseHandle Get(const TfToken& name) override;
#if PXR_VERSION < 2302
bool Has(const TfToken& name) override;
#endif
// determines if the data source would be dirtied based on the locators given
bool IsPrimDirtied(const HdDataSourceLocatorSet& locators);
private:
bool _HasGeospatialInformation(HdContainerDataSourceHandle dataSource);
HdDataSourceBaseHandle _ComputeGeospatialXform();
private:
const HdSceneIndexBase& _sceneIndex;
SdfPath _primPath;
HdContainerDataSourceHandle _wrappedDataSource;
// cached computed datasources
HdContainerDataSourceAtomicHandle _computedGeospatialPrimDataSource;
HdContainerDataSourceAtomicHandle _computedGeospatialDependentDataSource;
};
HD_DECLARE_DATASOURCE_HANDLES(HdOmniGeospatialDataSource);
PXR_NAMESPACE_CLOSE_SCOPE
#endif // HD_OMNI_GEOSPATIAL_DATA_SOURCE_H_ | 2,737 | C | 31.987951 | 91 | 0.739496 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/localPositionAPIAdapter.cpp | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <pxr/imaging/hd/retainedDataSource.h>
#include <omniGeospatial/wGS84LocalPositionAPI.h>
#include <omniGeospatial/wGS84ReferencePositionAPI.h>
#include "localPositionAPIAdapter.h"
#include "localPositionDataSource.h"
#include "localPositionSchema.h"
#include "referencePositionDataSource.h"
#include "referencePositionSchema.h"
PXR_NAMESPACE_OPEN_SCOPE
TF_REGISTRY_FUNCTION(TfType)
{
typedef OmniGeospatialWGS84LocalPositionAPIAdapter Adapter;
TfType t = TfType::Define<Adapter, TfType::Bases<Adapter::BaseAdapter> >();
t.SetFactory<UsdImagingAPISchemaAdapterFactory<Adapter> >();
}
#if PXR_VERSION >= 2302
HdContainerDataSourceHandle OmniGeospatialWGS84LocalPositionAPIAdapter::GetImagingSubprimData(
const UsdPrim& prim,
const TfToken& subprim,
const TfToken& appliedInstanceName,
const UsdImagingDataSourceStageGlobals& stageGlobals)
#else
HdContainerDataSourceHandle OmniGeospatialWGS84LocalPositionAPIAdapter::GetImagingSubprimData(
const TfToken& subprim,
const UsdPrim& prim,
const TfToken& appliedInstanceName,
const UsdImagingDataSourceStageGlobals& stageGlobals)
#endif
{
// at the point we are invoked here, the stage scene index has already determined
// that the API schema applies to the prim, so we can safely create our
// data source
if (!subprim.IsEmpty() || !appliedInstanceName.IsEmpty())
{
// there shouldn't be a subprim or an applied instance name
// if there is, we don't really know what to do with it
// so we return null to indicate there is no data source
// for this prim setup
return nullptr;
}
// to make it a bit easier, we will traverse the parent structure here to find a geodetic root
// rather than traversing it in the scene index - this is because we have all of the information
// we need at the point where this prim is getting processed
HdDataSourceBaseHandle referencePositionDataSource = nullptr;
for (UsdPrim parentPrim = prim; !parentPrim.IsPseudoRoot(); parentPrim = parentPrim.GetParent())
{
if (parentPrim.HasAPI<OmniGeospatialWGS84ReferencePositionAPI>())
{
// bake the geodetic root information into this local prim
referencePositionDataSource = HdOmniGeospatialWGS84ReferencePositionDataSource::New(parentPrim, stageGlobals);
break;
}
}
// only process local position if we found a geodetic root - if we didn't
// it means that this is an unrooted local position so we keep whatever
// transform information the prim would have had otherwise
if (referencePositionDataSource != nullptr)
{
return HdRetainedContainerDataSource::New(
HdOmniGeospatialWGS84LocalPositionSchemaTokens->localPositionApi,
HdOmniGeospatialWGS84LocalPositionDataSource::New(prim, stageGlobals),
HdOmniGeospatialWGS84ReferencePositionSchemaTokens->referencePositionApi,
referencePositionDataSource
);
}
return nullptr;
}
#if PXR_VERSION >= 2302
HdDataSourceLocatorSet OmniGeospatialWGS84LocalPositionAPIAdapter::InvalidateImagingSubprim(
const UsdPrim& prim,
const TfToken& subprim,
const TfToken& appliedInstanceName,
const TfTokenVector& properties)
#else
HdDataSourceLocatorSet OmniGeospatialWGS84LocalPositionAPIAdapter::InvalidateImagingSubprim(
const TfToken& subprim,
const TfToken& appliedInstanceName,
const TfTokenVector& properties)
#endif
{
if (!subprim.IsEmpty() || !appliedInstanceName.IsEmpty())
{
return HdDataSourceLocatorSet();
}
TfToken geospatialPrefix("omni:geospatial:wgs84:local");
for (const TfToken& propertyName : properties)
{
if (TfStringStartsWith(propertyName, geospatialPrefix))
{
return HdOmniGeospatialWGS84LocalPositionSchema::GetDefaultLocator();
}
}
return HdDataSourceLocatorSet();
}
PXR_NAMESPACE_CLOSE_SCOPE | 4,574 | C++ | 36.809917 | 122 | 0.740927 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/referencePositionDataSource.cpp | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <pxr/usd/usdGeom/metrics.h>
#include <pxr/usd/usdGeom/tokens.h>
#include <pxr/usdImaging/usdImaging/dataSourceAttribute.h>
#include "referencePositionDataSource.h"
PXR_NAMESPACE_OPEN_SCOPE
HdOmniGeospatialWGS84ReferencePositionDataSource::HdOmniGeospatialWGS84ReferencePositionDataSource(
const UsdPrim& prim,
const UsdImagingDataSourceStageGlobals& stageGlobals) :
_stageGlobals(stageGlobals)
{
_referencePositionApi = OmniGeospatialWGS84ReferencePositionAPI(prim);
}
#if PXR_VERSION < 2302
bool HdOmniGeospatialWGS84ReferencePositionDataSource::Has(const TfToken& name)
{
return (name == HdOmniGeospatialWGS84ReferencePositionSchemaTokens->tangentPlane) ||
(name == HdOmniGeospatialWGS84ReferencePositionSchemaTokens->referencePosition) ||
(name == HdOmniGeospatialWGS84ReferencePositionSchemaTokens->orientation) ||
(name == HdOmniGeospatialWGS84ReferencePositionSchemaTokens->stageUpAxis) ||
(name == HdOmniGeospatialWGS84ReferencePositionSchemaTokens->stageMetersPerUnit);
}
#endif
TfTokenVector HdOmniGeospatialWGS84ReferencePositionDataSource::GetNames()
{
// return the hydra attribute names this data source is responsible for
TfTokenVector names;
names.push_back(HdOmniGeospatialWGS84ReferencePositionSchemaTokens->tangentPlane);
names.push_back(HdOmniGeospatialWGS84ReferencePositionSchemaTokens->referencePosition);
names.push_back(HdOmniGeospatialWGS84ReferencePositionSchemaTokens->orientation);
names.push_back(HdOmniGeospatialWGS84ReferencePositionSchemaTokens->stageUpAxis);
names.push_back(HdOmniGeospatialWGS84ReferencePositionSchemaTokens->stageMetersPerUnit);
return names;
}
HdDataSourceBaseHandle HdOmniGeospatialWGS84ReferencePositionDataSource::Get(const TfToken& name)
{
// retrieves the data source values for the attributes this data source
// supports
if (name == HdOmniGeospatialWGS84ReferencePositionSchemaTokens->tangentPlane)
{
return UsdImagingDataSourceAttribute<TfToken>::New(
_referencePositionApi.GetTangentPlaneAttr(), _stageGlobals);
}
else if (name == HdOmniGeospatialWGS84ReferencePositionSchemaTokens->referencePosition)
{
return UsdImagingDataSourceAttribute<GfVec3d>::New(
_referencePositionApi.GetReferencePositionAttr(), _stageGlobals);
}
else if (name == HdOmniGeospatialWGS84ReferencePositionSchemaTokens->orientation)
{
return UsdImagingDataSourceAttribute<GfVec3d>::New(
_referencePositionApi.GetOrientationAttr(), _stageGlobals);
}
else if (name == HdOmniGeospatialWGS84ReferencePositionSchemaTokens->stageUpAxis)
{
TfToken upAxis = UsdGeomTokens->y;
UsdStageWeakPtr stage = _referencePositionApi.GetPrim().GetStage();
if (stage != nullptr)
{
upAxis = UsdGeomGetStageUpAxis(stage);
}
return _StageDataSource<TfToken>::New(upAxis);
}
else if (name == HdOmniGeospatialWGS84ReferencePositionSchemaTokens->stageMetersPerUnit)
{
double mpu = 0.01;
UsdStageWeakPtr stage = _referencePositionApi.GetPrim().GetStage();
if (stage != nullptr)
{
mpu = UsdGeomGetStageMetersPerUnit(stage);
}
return _StageDataSource<double>::New(mpu);
}
// this is a name we don't support
return nullptr;
}
template <typename T>
HdOmniGeospatialWGS84ReferencePositionDataSource::_StageDataSource<T>::_StageDataSource(const T& value) : _value(value)
{
}
PXR_NAMESPACE_CLOSE_SCOPE | 4,155 | C++ | 38.207547 | 119 | 0.754513 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/referencePositionAPIAdapter.h | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef OMNI_GEOSPATIAL_WGS84_REFERENCE_POSITION_API_ADAPTER_H_
#define OMNI_GEOSPATIAL_WGS84_REFERENCE_POSITION_API_ADAPTER_H_
#include <pxr/pxr.h>
#include <pxr/usdImaging/usdImaging/apiSchemaAdapter.h>
#include "api.h"
PXR_NAMESPACE_OPEN_SCOPE
class OmniGeospatialWGS84ReferencePositionAPIAdapter : public UsdImagingAPISchemaAdapter
{
public:
using BaseAdapter = UsdImagingAPISchemaAdapter;
#if PXR_VERSION >= 2302
OMNIGEOSCENEINDEX_API
HdContainerDataSourceHandle GetImagingSubprimData(
const UsdPrim& prim,
const TfToken& subprim,
const TfToken& appliedInstanceName,
const UsdImagingDataSourceStageGlobals& stageGlobals
) override;
#else
OMNIGEOSCENEINDEX_API
HdContainerDataSourceHandle GetImagingSubprimData(
const TfToken& subprim,
const UsdPrim& prim,
const TfToken& appliedInstanceName,
const UsdImagingDataSourceStageGlobals& stageGlobals
) override;
#endif
#if PXR_VERSION >= 2302
OMNIGEOSCENEINDEX_API
HdDataSourceLocatorSet InvalidateImagingSubprim(
const UsdPrim& prim,
const TfToken& subprim,
const TfToken& appliedInstanceName,
const TfTokenVector& properties
) override;
#else
OMNIGEOSCENEINDEX_API
HdDataSourceLocatorSet InvalidateImagingSubprim(
const TfToken& subprim,
const TfToken& appliedInstanceName,
const TfTokenVector& properties
) override;
#endif
};
PXR_NAMESPACE_CLOSE_SCOPE
#endif // OMNI_GEOSPATIAL_WGS84_REFERENCE_POSITION_API_ADAPTER_H_ | 2,144 | C | 30.544117 | 88 | 0.747201 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/computedDependentDataSource.cpp | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <pxr/imaging/hd/xformSchema.h>
#include "geospatialDataSource.h"
#include "computedDependentDataSource.h"
PXR_NAMESPACE_OPEN_SCOPE
HdOmniGeospatialComputedDependentDataSource::HdOmniGeospatialComputedDependentDataSource(
HdContainerDataSourceHandle inputDataSource,
HdContainerDataSourceHandle parentDataSource) :
_inputDataSource(inputDataSource),
_parentDataSource(parentDataSource)
{
_matrixDataSource =
HdOmniGeospatialComputedDependentDataSource::_GeospatiallyAffectedMatrixDataSource::New(
_inputDataSource, parentDataSource);
}
#if PXR_VERSION < 2302
bool HdOmniGeospatialComputedDependentDataSource::Has(const TfToken& name)
{
return (name == HdXformSchemaTokens->resetXformStack) ||
(name == HdXformSchemaTokens->matrix);
}
#endif
TfTokenVector HdOmniGeospatialComputedDependentDataSource::GetNames()
{
// this container data source retrieves the xform tokens
TfTokenVector result;
result.push_back(HdXformSchemaTokens->resetXformStack);
result.push_back(HdXformSchemaTokens->matrix);
return result;
}
HdDataSourceBaseHandle HdOmniGeospatialComputedDependentDataSource::Get(const TfToken& name)
{
if (_inputDataSource != nullptr)
{
if (name == HdXformSchemaTokens->resetXformStack)
{
// we don't modify the underlying time-sampled data
// for resetXformStack, so return that directly
HdXformSchema xformSchema = HdXformSchema::GetFromParent(_inputDataSource);
return xformSchema.IsDefined() ? xformSchema.GetResetXformStack() : nullptr;
}
else if (name == HdXformSchemaTokens->matrix)
{
return _matrixDataSource;
}
}
return nullptr;
}
HdOmniGeospatialComputedDependentDataSource::_GeospatiallyAffectedMatrixDataSource::_GeospatiallyAffectedMatrixDataSource(
HdContainerDataSourceHandle inputDataSource,
HdContainerDataSourceHandle parentDataSource) :
_inputDataSource(inputDataSource),
_parentDataSource(parentDataSource)
{
}
VtValue HdOmniGeospatialComputedDependentDataSource::_GeospatiallyAffectedMatrixDataSource::GetValue(Time shutterOffset)
{
return VtValue(this->GetTypedValue(shutterOffset));
}
GfMatrix4d HdOmniGeospatialComputedDependentDataSource::_GeospatiallyAffectedMatrixDataSource::GetTypedValue(Time shutterOffset)
{
return this->_ComputeTransformedMatrix(shutterOffset);
}
bool HdOmniGeospatialComputedDependentDataSource::_GeospatiallyAffectedMatrixDataSource::GetContributingSampleTimesForInterval(
Time startTime,
Time endTime,
std::vector<Time>* outSampleTimes)
{
HdSampledDataSourceHandle sources[] = {
this->_GetMatrixSource(),
this->_GetParentMatrixSource()
};
return HdGetMergedContributingSampleTimesForInterval(
TfArraySize(sources),
sources,
startTime,
endTime,
outSampleTimes);
}
HdMatrixDataSourceHandle HdOmniGeospatialComputedDependentDataSource::
_GeospatiallyAffectedMatrixDataSource::_GetMatrixSource() const
{
return HdXformSchema::GetFromParent(_inputDataSource).GetMatrix();
}
HdBoolDataSourceHandle HdOmniGeospatialComputedDependentDataSource::
_GeospatiallyAffectedMatrixDataSource::_GetResetXformStackSource() const
{
return HdXformSchema::GetFromParent(_inputDataSource).GetResetXformStack();
}
HdMatrixDataSourceHandle HdOmniGeospatialComputedDependentDataSource::
_GeospatiallyAffectedMatrixDataSource::_GetParentMatrixSource() const
{
return HdXformSchema::GetFromParent(_parentDataSource).GetMatrix();
}
HdMatrixDataSourceHandle HdOmniGeospatialComputedDependentDataSource::
_GeospatiallyAffectedMatrixDataSource::_GetParentOriginalMatrixSource() const
{
// the parent data source here should be a geospatial data source
// but in the even it is not, this method will simply return the same
// matrix as that of _GetParentMatrixSource
HdOmniGeospatialDataSourceHandle geospatialDataSource =
HdOmniGeospatialDataSource::Cast(_parentDataSource);
if (geospatialDataSource != nullptr)
{
HdContainerDataSourceHandle xformDataSource =
HdContainerDataSource::Cast(
geospatialDataSource->Get(HdOmniGeospatialDataSourceTokens->geospatialPreservedXform));
if (xformDataSource == nullptr)
{
TF_WARN("Parent data source could not retrieve preserved xform!");
return this->_GetParentMatrixSource();
}
HdMatrixDataSourceHandle matrixDataSource = HdMatrixDataSource::Cast(
xformDataSource->Get(HdXformSchemaTokens->matrix));
if (matrixDataSource == nullptr)
{
TF_WARN("Xform schema not defined on preserved container data source!");
}
return (matrixDataSource != nullptr) ? matrixDataSource : this->_GetParentMatrixSource();
}
else
{
TF_WARN("Parent data source has no geospatial data source!");
}
return this->_GetParentMatrixSource();
}
GfMatrix4d HdOmniGeospatialComputedDependentDataSource::
_GeospatiallyAffectedMatrixDataSource::_GetMatrix(const Time shutterOffset) const
{
HdMatrixDataSourceHandle dataSource = this->_GetMatrixSource();
if (dataSource != nullptr)
{
return dataSource->GetTypedValue(shutterOffset);
}
return GfMatrix4d(1.0);
}
bool HdOmniGeospatialComputedDependentDataSource::
_GeospatiallyAffectedMatrixDataSource::_GetResetXformStack(const Time shutterOffset) const
{
HdBoolDataSourceHandle dataSource = this->_GetResetXformStackSource();
if (dataSource != nullptr)
{
return dataSource->GetTypedValue(shutterOffset);
}
return false;
}
GfMatrix4d HdOmniGeospatialComputedDependentDataSource::
_GeospatiallyAffectedMatrixDataSource::_GetParentMatrix(const Time shutterOffset) const
{
HdMatrixDataSourceHandle dataSource = this->_GetParentMatrixSource();
if (dataSource != nullptr)
{
return dataSource->GetTypedValue(shutterOffset);
}
return GfMatrix4d(1.0);
}
GfMatrix4d HdOmniGeospatialComputedDependentDataSource::
_GeospatiallyAffectedMatrixDataSource::_GetParentOriginalMatrix(const Time shutterOffset) const
{
HdMatrixDataSourceHandle dataSource = this->_GetParentOriginalMatrixSource();
if (dataSource != nullptr)
{
return dataSource->GetTypedValue(shutterOffset);
}
return GfMatrix4d(1.0);
}
GfMatrix4d HdOmniGeospatialComputedDependentDataSource::
_GeospatiallyAffectedMatrixDataSource::_ComputeTransformedMatrix(const Time shutterOffset) const
{
// this prim did not have geospatial information applied to it,
// but it is the child of one that did, so we compute the updated
// value based on the recomputed value of the parent
// however, we actually only want to do this if this prim does
// not have a resetXformStack applied
bool resetXformStack = this->_GetResetXformStack(shutterOffset);
if (!resetXformStack)
{
// to compute the affected matrix, we first need to acquire the parent information
GfMatrix4d flattenedParentTransform = this->_GetParentMatrix(shutterOffset);
GfMatrix4d originalParentTransform = this->_GetParentOriginalMatrix(shutterOffset);
// since we are dealing with flattened transformations, we have to recover
// the local transform of the prim data source in question
// we can do this by knowing the prim's flattened transform
// and the original transform of its parent (the _dependsOnDataSource)
// Let FT be the flattened transform, P be the transform of the parent,
// and LT be the child's local transform. The flattened transform would
// then have been computed as FT = (P)(LT), thus to recover LT we divide
// out by P, which results in LT = (FT) / (P) = FT * (P)^-1
// so we need the inverse of the original parent transform
GfMatrix4d inverseParentTransform = originalParentTransform.GetInverse();
GfMatrix4d originalChildTransform = this->_GetMatrix(shutterOffset);
GfMatrix4d childLocalTransform = originalChildTransform * inverseParentTransform;
// once we have the local transform, we can re-apply the new
// flattened parent transform - this is the new geospatially affected transform
// of the child
return flattenedParentTransform * childLocalTransform;
}
// if resetXformStack was true, the original flattened transform of
// of the input data source is valid here and we don't recompute
return this->_GetMatrix(shutterOffset);
}
PXR_NAMESPACE_CLOSE_SCOPE | 9,285 | C++ | 35.996016 | 128 | 0.74238 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/localPositionSchema.cpp | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <pxr/imaging/hd/retainedDataSource.h>
#include "localPositionSchema.h"
PXR_NAMESPACE_OPEN_SCOPE
TF_DEFINE_PUBLIC_TOKENS(HdOmniGeospatialWGS84LocalPositionSchemaTokens,
HDOMNIGEOSPATIALWGS84LOCALPOSITION_SCHEMA_TOKENS);
HdVec3dDataSourceHandle HdOmniGeospatialWGS84LocalPositionSchema::GetPosition()
{
return _GetTypedDataSource<HdVec3dDataSource>(
HdOmniGeospatialWGS84LocalPositionSchemaTokens->position);
}
HdOmniGeospatialWGS84LocalPositionSchema HdOmniGeospatialWGS84LocalPositionSchema::GetFromParent(
const HdContainerDataSourceHandle& fromParentContainer)
{
if (fromParentContainer == nullptr)
{
return HdOmniGeospatialWGS84LocalPositionSchema(nullptr);
}
return HdOmniGeospatialWGS84LocalPositionSchema(
HdContainerDataSource::Cast(fromParentContainer->Get(
HdOmniGeospatialWGS84LocalPositionSchemaTokens->localPositionApi))
);
}
const HdDataSourceLocator& HdOmniGeospatialWGS84LocalPositionSchema::GetDefaultLocator()
{
static const HdDataSourceLocator locator(
HdOmniGeospatialWGS84LocalPositionSchemaTokens->localPositionApi
);
return locator;
}
HdContainerDataSourceHandle HdOmniGeospatialWGS84LocalPositionSchema::BuildRetained(
const HdVec3dDataSourceHandle& position)
{
TfToken names[1];
HdDataSourceBaseHandle values[1];
size_t count = 0;
if (position != nullptr)
{
names[count] = HdOmniGeospatialWGS84LocalPositionSchemaTokens->position;
values[count] = position;
count++;
}
return HdRetainedContainerDataSource::New(count, names, values);
}
PXR_NAMESPACE_CLOSE_SCOPE | 2,240 | C++ | 31.955882 | 97 | 0.775 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/geospatialSceneIndex.cpp | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <pxr/base/work/utils.h>
#include <pxr/imaging/hd/xformSchema.h>
#include <pxr/imaging/hd/retainedDataSource.h>
#include <pxr/imaging/hd/overlayContainerDataSource.h>
#include <pxr/imaging/hd/dependenciesSchema.h>
#include "geospatialSceneIndex.h"
#include "referencePositionSchema.h"
#include "localPositionSchema.h"
#include "geospatialDataSource.h"
PXR_NAMESPACE_OPEN_SCOPE
TF_DEFINE_PRIVATE_TOKENS(
_tokens,
(positionToXform)
);
OmniGeospatialSceneIndexRefPtr OmniGeospatialSceneIndex::New(
const HdSceneIndexBaseRefPtr& inputSceneIndex,
const HdContainerDataSourceHandle& inputArgs)
{
return TfCreateRefPtr(new OmniGeospatialSceneIndex(inputSceneIndex, inputArgs));
}
OmniGeospatialSceneIndex::OmniGeospatialSceneIndex(const HdSceneIndexBaseRefPtr& inputSceneIndex,
const HdContainerDataSourceHandle& inputArgs) :
HdSingleInputFilteringSceneIndexBase(inputSceneIndex)
{
}
OmniGeospatialSceneIndex::~OmniGeospatialSceneIndex() = default;
HdSceneIndexPrim OmniGeospatialSceneIndex::GetPrim(const SdfPath &primPath) const
{
// lookup the prim to see if we have wrapped it yet
auto iterBoolPair = this->_IsPrimWrapped(primPath);
if (iterBoolPair.second)
{
// we have it wrapped already, so return the wrapped prim
return iterBoolPair.first->second;
}
// we haven't wrapped it yet, but we only need to wrap it
// if it is Xformable - geospatial transforms have the potential
// to affect anything that has a transform, so even if it is
// never affected (e.g. resetXform is true or it is not the child
// of a geospatially applied prim) we wrap it here for simplicity
// sake at the cost of an extra HdSceneIndexPrim (as in some cases
// it will even retain its original data source)
// note that unlike the flattening scene index we wrap lazily
// instead of walking the tree at construction time - this is because
// there is a low chance of geospatial information being attached
// to a prim and in cases where the scene isn't goesptially grounded
// but the scene index is still applied we don't want to walk the
// whole scene
HdSceneIndexPrim sceneIndexPrim = this->_GetInputSceneIndex()->GetPrim(primPath);
HdXformSchema xformSchema = HdXformSchema::GetFromParent(sceneIndexPrim.dataSource);
if (xformSchema.IsDefined() && !xformSchema.GetResetXformStack())
{
return this->_WrapPrim(primPath, sceneIndexPrim);
}
// otherwise we don't need to wrap it and can return it directly
return sceneIndexPrim;
}
SdfPathVector OmniGeospatialSceneIndex::GetChildPrimPaths(const SdfPath& primPath) const
{
// no change in topology occurs as part of this scene index
// so we can ask the input scene to get the child prim paths directly
return this->_GetInputSceneIndex()->GetChildPrimPaths(primPath);
}
SdfPathTable<HdSceneIndexPrim>::_IterBoolPair OmniGeospatialSceneIndex::_IsPrimWrapped(const SdfPath& primPath) const
{
bool result = false;
const auto it = _wrappedPrims.find(primPath);
if (it != _wrappedPrims.end())
{
// because SdfPathTable inserts all parents
// when a path gets inserted, there may be an empty
// entry in our cache if a child path was visited first
// to verify we have to check the prim type and data source
if (it->second.primType != TfToken() || it->second.dataSource != nullptr)
{
// not an auto-insertion of the parent
result = true;
}
}
return std::make_pair(it, result);
}
HdSceneIndexPrim& OmniGeospatialSceneIndex::_WrapPrim(const SdfPath& primPath, const HdSceneIndexPrim& hdPrim) const
{
// PRECONDITION: The table must not yet contain a wrapped prim, check via _IsPrimWrapped first!
// wrapping a scene index prim involves creating our geospatial data source to wrap the original
// scene index prim's data source - this will allow us to intercept the xform token to return
// a compute geospatial transform and still provide access to the original xform via the wrapped data source
HdContainerDataSourceHandle wrappedDataSource = HdOmniGeospatialDataSource::New(*this, primPath, hdPrim.dataSource);
const auto it = _wrappedPrims.find(primPath);
if (it != _wrappedPrims.end())
{
// in this case, the entry is there, but it was auto-created
// by SdfPathTable, meaning it should have empty entries
TF_VERIFY(it->second.primType == TfToken());
TF_VERIFY(it->second.dataSource == nullptr);
it->second.primType = hdPrim.primType;
it->second.dataSource = std::move(wrappedDataSource);
return it->second;
}
else
{
auto iterBoolPair = _wrappedPrims.insert(
{
primPath,
HdSceneIndexPrim {
hdPrim.primType,
std::move(wrappedDataSource)
}
}
);
return iterBoolPair.first->second;
}
}
void OmniGeospatialSceneIndex::_PrimsAdded(const HdSceneIndexBase& sender,
const HdSceneIndexObserver::AddedPrimEntries& entries)
{
HdSceneIndexObserver::DirtiedPrimEntries dirtyEntries;
for(const HdSceneIndexObserver::AddedPrimEntry& entry : entries)
{
HdSceneIndexPrim sceneIndexPrim = this->_GetInputSceneIndex()->GetPrim(entry.primPath);
// cache the prim if necessary
HdXformSchema xformSchema = HdXformSchema::GetFromParent(sceneIndexPrim.dataSource);
if (xformSchema.IsDefined() && !xformSchema.GetResetXformStack())
{
auto iterBoolPair = this->_IsPrimWrapped(entry.primPath);
if (iterBoolPair.second)
{
/// we already wrapped this prim, so we need to update it
HdSceneIndexPrim& wrappedPrim = iterBoolPair.first->second;
wrappedPrim.primType = entry.primType;
if (wrappedPrim.dataSource != nullptr)
{
HdOmniGeospatialDataSource::Cast(wrappedPrim.dataSource)->UpdateWrappedDataSource(sceneIndexPrim.dataSource);
}
// if we updated it, we have to now see if we need
// to dirty any cached values alreday in the hierarchy
static HdDataSourceLocatorSet locators = {
HdXformSchema::GetDefaultLocator()
};
this->_DirtyHierarchy(entry.primPath, locators, &dirtyEntries);
}
else
{
// we don't yet have this prim wrapped - do so now
this->_WrapPrim(entry.primPath, sceneIndexPrim);
}
}
}
// forward on the notification
this->_SendPrimsAdded(entries);
// also, if we had to dirty entries because of an insertion in the middle
// of the stage hierarchy, send those along too
if (!dirtyEntries.empty())
{
this->_SendPrimsDirtied(dirtyEntries);
}
}
void OmniGeospatialSceneIndex::_PrimsRemoved(const HdSceneIndexBase& sender,
const HdSceneIndexObserver::RemovedPrimEntries& entries)
{
for (const HdSceneIndexObserver::RemovedPrimEntry& entry : entries)
{
if (entry.primPath.IsAbsoluteRootPath())
{
// removing the whole scene
_wrappedPrims.ClearInParallel();
TfReset(_wrappedPrims);
}
else
{
auto startEndRangeIterator = _wrappedPrims.FindSubtreeRange(entry.primPath);
for (auto it = startEndRangeIterator.first; it != startEndRangeIterator.second; it++)
{
WorkSwapDestroyAsync(it->second.dataSource);
}
if(startEndRangeIterator.first != startEndRangeIterator.second)
{
_wrappedPrims.erase(startEndRangeIterator.first);
}
}
}
_SendPrimsRemoved(entries);
}
void OmniGeospatialSceneIndex::_PrimsDirtied(const HdSceneIndexBase& sender,
const HdSceneIndexObserver::DirtiedPrimEntries& entries)
{
HdSceneIndexObserver::DirtiedPrimEntries dirtyEntries;
for (const HdSceneIndexObserver::DirtiedPrimEntry& entry : entries)
{
HdDataSourceLocatorSet locators;
if (entry.dirtyLocators.Intersects(HdXformSchema::GetDefaultLocator()))
{
locators.insert(HdXformSchema::GetDefaultLocator());
}
if (!locators.IsEmpty())
{
this->_DirtyHierarchy(entry.primPath, locators, &dirtyEntries);
}
}
_SendPrimsDirtied(entries);
if (!dirtyEntries.empty())
{
_SendPrimsDirtied(dirtyEntries);
}
}
void OmniGeospatialSceneIndex::_DirtyHierarchy(const SdfPath& primPath, const HdDataSourceLocatorSet& locators,
HdSceneIndexObserver::DirtiedPrimEntries* dirtyEntries)
{
// find subtree range retrieves a start end pair of children
// in the subtree of the given prim path
auto startEndRangeIterator = _wrappedPrims.FindSubtreeRange(primPath);
for (auto it = startEndRangeIterator.first; it != startEndRangeIterator.second;)
{
// if we have a valid wrapper for the prim, we need to check
// whether it needs to be dirtied - this involves checking the
// data sources to see if they have cached data and if so
// this indicates it needs to be updated
if (it->second.dataSource != nullptr)
{
HdOmniGeospatialDataSourceHandle geospatialDataSource =
HdOmniGeospatialDataSource::Cast(it->second.dataSource);
if (geospatialDataSource != nullptr && geospatialDataSource->IsPrimDirtied(locators))
{
if (it->first != primPath)
{
dirtyEntries->emplace_back(it->first, locators);
}
it++;
}
else
{
it++;
}
}
else
{
it++;
}
}
}
PXR_NAMESPACE_CLOSE_SCOPE | 10,650 | C++ | 36.37193 | 129 | 0.665446 |
NVIDIA-Omniverse/usd-plugin-samples/src/usd-plugins/fileFormat/edfFileFormat/edfFileFormat.cpp | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "edfFileFormat.h"
#include "edfData.h"
PXR_NAMESPACE_OPEN_SCOPE
EdfFileFormat::EdfFileFormat() : SdfFileFormat(
EdfFileFormatTokens->Id,
EdfFileFormatTokens->Version,
EdfFileFormatTokens->Target,
EdfFileFormatTokens->Extension)
{
}
EdfFileFormat::~EdfFileFormat()
{
}
bool EdfFileFormat::CanRead(const std::string& filePath) const
{
return true;
}
bool EdfFileFormat::Read(SdfLayer* layer, const std::string& resolvedPath, bool metadataOnly) const
{
// these macros emit methods defined in the Pixar namespace
// but not properly scoped, so we have to use the namespace
// locally here - note this isn't strictly true since we had to open
// the namespace scope anyway because the macros won't allow non-Pixar namespaces
// to be used because of some auto-generated content
PXR_NAMESPACE_USING_DIRECTIVE
if (!TF_VERIFY(layer))
{
return false;
}
// construct the SdfAbstractData object from the file format args
// and set that as the layer data - note this is a different object
// from that constructed in the InitData method - this may or may
// not be an issue, something to be investigated in more detail when
// working through the backend - either way we associate it with the layer
// so we always have a mapping from the dynamic layer and the specific
// set of parameters that created it
const FileFormatArguments& args = layer->GetFileFormatArguments();
SdfAbstractDataRefPtr layerData = this->InitData(args);
// inform the data provider that it's time to read the content
// this is a good time for it to cache data that it needs to generate
// the prim / property specs when asked for them via the data apis
EdfData& edfData = dynamic_cast<EdfData&>(*layerData);
bool readSuccess = edfData.Read();
if (readSuccess)
{
this->_SetLayerData(layer, layerData);
// for now, this is dynamic content read one way from a source external system
// therefore we mark that the layer is read-only
// later we will remove this restriction and explore what it means to edit
// data that is sourced from external data formats
layer->SetPermissionToSave(false);
layer->SetPermissionToEdit(false);
}
return readSuccess;
}
bool EdfFileFormat::WriteToString(const SdfLayer& layer, std::string* str, const std::string& comment) const
{
// this POC doesn't support writing
return false;
}
bool EdfFileFormat::WriteToStream(const SdfSpecHandle& spec, std::ostream& out, size_t indent) const
{
// this POC doesn't support writing
return false;
}
SdfAbstractDataRefPtr EdfFileFormat::InitData(const FileFormatArguments& args) const
{
// create the data parameters object to capture what data was used to create the layer
EdfDataParameters parameters = EdfDataParameters::FromFileFormatArgs(args);
return EdfData::CreateFromParameters(parameters);
}
bool EdfFileFormat::_ShouldSkipAnonymousReload() const
{
return false;
}
bool EdfFileFormat::_ShouldReadAnonymousLayers() const
{
return true;
}
void EdfFileFormat::ComposeFieldsForFileFormatArguments(const std::string& assetPath, const PcpDynamicFileFormatContext& context, FileFormatArguments* args, VtValue* contextDependencyData) const
{
VtValue val;
if (context.ComposeValue(EdfFileFormatTokens->Params, &val) && val.IsHolding<VtDictionary>())
{
// the composition engine has composed the metadata values of the prim appropriately
// for the currently composed stage, we read these metadata values that were composed
// and make them part of the file format arguments to load the dependent layer
VtDictionary dict = val.UncheckedGet<VtDictionary>();
const VtValue* dictVal = TfMapLookupPtr(dict, EdfDataParametersTokens->dataProviderId);
if (dictVal != nullptr)
{
(*args)[EdfDataParametersTokens->dataProviderId] = dictVal->UncheckedGet<std::string>();
}
// unfortunately, FileFormatArguments is a typedef for a map<string, string>
// which means we have to unpack the provider arguments dictionary
// to keep the unpacking simple, we assume for now that the providerArgs
// is itself a dictionary containing only string paris and values
// we can remove this restriction later for simple types (using TfStringify)
// but would need some work (recursively) for embedded lists and dictionary values
dictVal = TfMapLookupPtr(dict, EdfDataParametersTokens->providerArgs);
if (dictVal != nullptr)
{
std::string prefix = EdfDataParametersTokens->providerArgs.GetString();
VtDictionary providerArgs = dictVal->UncheckedGet<VtDictionary>();
for (VtDictionary::iterator it = providerArgs.begin(); it != providerArgs.end(); it++)
{
(*args)[prefix + ":" + it->first] = it->second.UncheckedGet<std::string>();
}
}
}
}
bool EdfFileFormat::CanFieldChangeAffectFileFormatArguments(const TfToken& field, const VtValue& oldValue, const VtValue& newValue, const VtValue& contextDependencyData) const
{
const VtDictionary& oldDictionaryValue = oldValue.IsHolding<VtDictionary>() ?
oldValue.UncheckedGet<VtDictionary>() : VtGetEmptyDictionary();
const VtDictionary& newDictionaryValue = newValue.IsHolding<VtDictionary>() ?
newValue.UncheckedGet<VtDictionary>() : VtGetEmptyDictionary();
// nothing to do if both metadata values are empty
if (oldDictionaryValue.empty() && newDictionaryValue.empty())
{
return false;
}
// our layer is new if:
// 1. there is a new provider
// 2. there is a change to the value of the provider specific data
const VtValue* oldProviderId =
TfMapLookupPtr(oldDictionaryValue, EdfDataParametersTokens->dataProviderId);
const VtValue* newProviderId =
TfMapLookupPtr(newDictionaryValue, EdfDataParametersTokens->dataProviderId);
if (oldProviderId != nullptr && newProviderId != nullptr)
{
if (oldProviderId->UncheckedGet<std::string>() != newProviderId->UncheckedGet<std::string>())
{
// different providers!
return true;
}
else
{
// same provider, but the specific provider metadata may have changed
const VtValue* oldProviderDictionaryValue =
TfMapLookupPtr(oldDictionaryValue, EdfDataParametersTokens->providerArgs);
const VtValue* newProviderDictionaryValue =
TfMapLookupPtr(newDictionaryValue, EdfDataParametersTokens->providerArgs);
const VtDictionary& oldProviderDictionary = oldProviderDictionaryValue->IsHolding<VtDictionary>() ?
oldProviderDictionaryValue->UncheckedGet<VtDictionary>() : VtGetEmptyDictionary();
const VtDictionary& newProviderDictionary = newProviderDictionaryValue->IsHolding<VtDictionary>() ?
newProviderDictionaryValue->UncheckedGet<VtDictionary>() : VtGetEmptyDictionary();
return oldProviderDictionary != newProviderDictionary;
}
}
else
{
// one of them (or both) are nullptrs
if (oldProviderId == nullptr && newProviderId == nullptr)
{
// no change to provider, don't need to check parameters
return false;
}
// otherwise one changed
return true;
}
}
// these macros emit methods defined in the Pixar namespace
// but not properly scoped, so we have to use the namespace
// locally here
TF_DEFINE_PUBLIC_TOKENS(
EdfFileFormatTokens,
((Id, "edfFileFormat"))
((Version, "1.0"))
((Target, "usd"))
((Extension, "edf"))
((Params, "EdfDataParameters"))
);
TF_REGISTRY_FUNCTION(TfType)
{
SDF_DEFINE_FILE_FORMAT(EdfFileFormat, SdfFileFormat);
}
PXR_NAMESPACE_CLOSE_SCOPE | 7,937 | C++ | 35.75 | 194 | 0.754567 |
NVIDIA-Omniverse/usd-plugin-samples/src/usd-plugins/dynamicPayload/omniMetProvider/api.h | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef OMNI_OMNIMETPROVIDER_API_H_
#define OMNI_OMNIMETPROVIDER_API_H_
#include "pxr/base/arch/export.h"
#if defined(PXR_STATIC)
# define OMNIMETPROVIDER_API
# define OMNIMETPROVIDER_API_TEMPLATE_CLASS(...)
# define OMNIMETPROVIDER_API_TEMPLATE_STRUCT(...)
# define OMNIMETPROVIDER_LOCAL
#else
# if defined(OMNIMETPROVIDER_EXPORTS)
# define OMNIMETPROVIDER_API ARCH_EXPORT
# define OMNIMETPROVIDER_API_TEMPLATE_CLASS(...) ARCH_EXPORT_TEMPLATE(class, __VA_ARGS__)
# define OMNIMETPROVIDER_API_TEMPLATE_STRUCT(...) ARCH_EXPORT_TEMPLATE(struct, __VA_ARGS__)
# else
# define OMNIMETPROVIDER_API ARCH_IMPORT
# define OMNIMETPROVIDER_API_TEMPLATE_CLASS(...) ARCH_IMPORT_TEMPLATE(class, __VA_ARGS__)
# define OMNIMETPROVIDER_API_TEMPLATE_STRUCT(...) ARCH_IMPORT_TEMPLATE(struct, __VA_ARGS__)
# endif
# define OMNIMETPROVIDER_LOCAL ARCH_HIDDEN
#endif
#endif | 1,498 | C | 38.447367 | 97 | 0.732977 |
NVIDIA-Omniverse/usd-plugin-samples/src/usd-plugins/dynamicPayload/omniMetProvider/omniMetProvider.cpp | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <pxr/base/tf/token.h>
#include <pxr/base/vt/value.h>
#include <pxr/base/js/json.h>
#include <pxr/usd/sdf/path.h>
#include <pxr/usd/sdf/schema.h>
#include <pxr/usd/sdf/payload.h>
#include <pxr/usd/sdf/primSpec.h>
#include <pxr/usd/sdf/attributeSpec.h>
#include <pxr/usd/usd/tokens.h>
#include <edfDataProviderFactory.h>
#include "omniMetProvider.h"
#include <iostream>
#include <curl/curl.h>
PXR_NAMESPACE_OPEN_SCOPE
EDF_DEFINE_DATAPROVIDER(OmniMetProvider);
TF_DEFINE_PUBLIC_TOKENS(
OmniMetProviderProviderArgKeys,
(dataLodLevel)
(deferredRead)
(lod1Count)
);
TF_DEFINE_PRIVATE_TOKENS(
EdfFieldKeys,
(EdfDataParameters)
);
TF_DEFINE_PRIVATE_TOKENS(
OmniMetProviderTypeNames,
(AmaDepartment)
(AmaObject)
);
TF_DEFINE_PRIVATE_TOKENS(
OmniMetProviderFieldKeys,
(departmentId)
(displayName)
(objectID)
(isHighlight)
(accessionNumber)
(accessionYear)
(isPublicDomain)
(primaryImage)
(primaryImageSmall)
(additionalImages)
(constituents)
(department)
(objectName)
(title)
(culture)
(period)
(dynasty)
(reign)
(portfolio)
(artistRole)
(artistPrefix)
(artistDisplayName)
(artistDisplayBio)
(artistSuffix)
(artistAlphaSort)
(artistNationality)
(artistGender)
(artistWikidata_URL)
(artistULAN_URL)
(objectDate)
(objectBeginDate)
(objectEndDate)
(medium)
(dimensions)
(measurements)
(creditLine)
(geographyType)
(city)
(state)
(county)
(country)
(region)
(subregion)
(locale)
(locus)
(excavation)
(river)
(classification)
(rightsAndReproduction)
(linkResource)
(metadataDate)
(repository)
(objectURL)
(objectWikidataURL)
(isTimelineWork)
(galleryNumber)
);
enum struct DataLodLevel
{
Level0 = 0,
Level1 = 1,
Level2 = 2
};
// urls used to retrieve the data
static const std::string DEPARTMENT_URL = "https://collectionapi.metmuseum.org/public/collection/v1/departments";
static const std::string OBJECTS_IN_DEPARTMENT_URL = "https://collectionapi.metmuseum.org/public/collection/v1/objects?departmentIds=";
static const std::string OBJECT_URL = "https://collectionapi.metmuseum.org/public/collection/v1/objects/";
static const SdfPath DATA_ROOT_PATH("/Data");
OmniMetProvider::OmniMetProvider(const EdfDataParameters& parameters) : IEdfDataProvider(parameters)
{
curl_global_init(CURL_GLOBAL_DEFAULT);
}
OmniMetProvider::~OmniMetProvider()
{
curl_global_cleanup();
}
bool OmniMetProvider::Read(std::shared_ptr<IEdfSourceData> sourceData)
{
// this gives the provider a chance to load all data it needs to on first layer read
// if we are parameterized for a deferred read, we do nothing and read on demand
// at first ask, if it's not a deferred read, we load all appropriate content from the
// back-end here
if(!this->IsDeferredRead())
{
// it's not a deferred read, so determine how much data we want to really load
int lodLevel = this->GetDataLodLevel();
if (lodLevel == static_cast<int>(DataLodLevel::Level0))
{
// load the departments
this->_LoadData(false, 0, sourceData);
}
else if (lodLevel == static_cast<int>(DataLodLevel::Level1))
{
// load the departments and their children
// but cap the number of children at the specified level
this->_LoadData(true, this->GetLod1Count(), sourceData);
}
else
{
// max lod level, load everything
this->_LoadData(true, 0, sourceData);
}
}
return true;
}
void OmniMetProvider::_LoadData(bool includeObjects, size_t objectCount, std::shared_ptr<IEdfSourceData> sourceData)
{
// load the department data
std::string departmentData = this->_LoadDepartments();
std::vector<std::pair<std::string, int>> departments = this->_ParseDepartments(departmentData, sourceData);
// do we want to load objects as well?
if (includeObjects)
{
for (auto it = departments.begin(); it != departments.end(); it++)
{
std::vector<std::string> objectData = this->_LoadObjects(TfStringify(it->second), objectCount);
for (auto itt = objectData.begin(); itt != objectData.end(); itt++)
{
this->_ParseObject(*itt, it->first, sourceData);
}
}
}
}
std::string OmniMetProvider::_LoadDepartments()
{
std::string departments;
CURL* departmentCurl = curl_easy_init();
if (departmentCurl != nullptr)
{
CURLcode resultCode;
curl_easy_setopt(departmentCurl, CURLOPT_URL, DEPARTMENT_URL.c_str());
curl_easy_setopt(departmentCurl, CURLOPT_HTTPGET, 1L);
curl_easy_setopt(departmentCurl, CURLOPT_WRITEFUNCTION, OmniMetProvider::_CurlWriteCallback);
// allocate a string that we can append the result onto
std::string* result = new std::string();
curl_easy_setopt(departmentCurl, CURLOPT_WRITEDATA, reinterpret_cast<void*>(result));
resultCode = curl_easy_perform(departmentCurl);
if (resultCode == CURLE_OK)
{
departments = *result;
}
else
{
TF_CODING_ERROR("Unable to load departments from '%s'!", DEPARTMENT_URL.c_str());
}
// done with the callback data
delete result;
// done with the request handle
curl_easy_cleanup(departmentCurl);
}
return departments;
}
std::vector<int> OmniMetProvider::_ParseObjectIds(const std::string& response) const
{
std::vector<int> objectIds;
PXR_NS::JsValue jsValue = PXR_NS::JsParseString(response, nullptr);
if (!jsValue.IsNull())
{
PXR_NS::JsObject rootObject = jsValue.GetJsObject();
PXR_NS::JsObject::iterator it = rootObject.find("objectIDs");
if (it != rootObject.end())
{
PXR_NS::JsArray jsonObjectIdArray = it->second.GetJsArray();
for (auto objectIdIt = jsonObjectIdArray.begin(); objectIdIt != jsonObjectIdArray.end(); objectIdIt++)
{
objectIds.push_back((*objectIdIt).GetInt());
}
}
else
{
TF_CODING_ERROR("Unable to find 'objectIDs' array in returned data '%s'!", response.c_str());
}
}
else
{
TF_CODING_ERROR("Data returned '%s' was not JSON or was empty!", response.c_str());
}
return objectIds;
}
std::vector<std::string> OmniMetProvider::_LoadObjects(const std::string& departmentId, size_t objectCount)
{
// NOTE: this should be updated to make these requests in parallel in the case
// where we aren't doing deferred reads
// ideally we wouldn't want to initialize a new curl handle here, but since this
// call can be made in the parallel prim indexing, we can't share the easy handle
// across threads, so we take the overhead hit here
std::vector<std::string> objects;
CURL* objectCurl = curl_easy_init();
std::string url = OBJECTS_IN_DEPARTMENT_URL + departmentId;
std::string* result = new std::string();
CURLcode resultCode;
*result = "";
curl_easy_setopt(objectCurl, CURLOPT_URL, url.c_str());
curl_easy_setopt(objectCurl, CURLOPT_HTTPGET, 1L);
curl_easy_setopt(objectCurl, CURLOPT_WRITEFUNCTION, OmniMetProvider::_CurlWriteCallback);
curl_easy_setopt(objectCurl, CURLOPT_WRITEDATA, reinterpret_cast<void*>(result));
resultCode = curl_easy_perform(objectCurl);
if (resultCode == CURLE_OK)
{
// process result
std::vector<int> objectIds = this->_ParseObjectIds(*result);
// objectCount = 0 means load all objects
// objectCount > 0 means load max that many objects
size_t counter = 0;
for (auto objectIdIterator = objectIds.begin(); objectIdIterator != objectIds.end() && (objectCount == 0 || counter < objectCount); objectIdIterator++)
{
// reset the URL and result buffer
// NOTE: this should be updated to make these requests in parallel
url = OBJECT_URL + TfStringify(*objectIdIterator);
*result = "";
curl_easy_setopt(objectCurl, CURLOPT_URL, url.c_str());
resultCode = curl_easy_perform(objectCurl);
if (resultCode == CURLE_OK)
{
objects.push_back(*result);
}
counter++;
}
}
// done with the callback data
delete result;
// done with the request handle
curl_easy_cleanup(objectCurl);
return objects;
}
std::vector<std::pair<std::string, int>> OmniMetProvider::_ParseDepartments(const std::string& departmentJson,
std::shared_ptr<IEdfSourceData> sourceData)
{
std::vector<std::pair<std::string, int>> parsedDepartments;
JsValue jsValue = JsParseString(departmentJson, nullptr);
if (!jsValue.IsNull())
{
JsObject rootObject = jsValue.GetJsObject();
JsObject::iterator it = rootObject.find("departments");
if (it != rootObject.end())
{
JsArray departments = it->second.GetJsArray();
std::string parent = DATA_ROOT_PATH.GetAsString();
for (auto departmentIt = departments.begin(); departmentIt != departments.end(); departmentIt++)
{
// for each department, create a prim to represent it
JsObject department = (*departmentIt).GetJsObject();
int departmentId = department[OmniMetProviderFieldKeys->departmentId.GetString()].GetInt();
std::string displayName = department[OmniMetProviderFieldKeys->displayName.GetString()].GetString();
// create the prim
std::string primName = TfMakeValidIdentifier(displayName);
sourceData->CreatePrim(DATA_ROOT_PATH, primName, SdfSpecifier::SdfSpecifierDef,
OmniMetProviderTypeNames->AmaDepartment);
// create the attributes for the prim
SdfPath parentPrim = SdfPath(parent + "/" + primName);
sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->departmentId.GetString(),
SdfValueTypeNames->Int, SdfVariability::SdfVariabilityUniform, VtValue(departmentId));
sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->displayName.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform, VtValue(displayName));
parsedDepartments.push_back(std::make_pair(parentPrim.GetAsString(), departmentId));
}
}
else
{
TF_CODING_ERROR("Unable to find 'departments' array in returned data '%s'!", departmentJson.c_str());
}
}
else
{
TF_CODING_ERROR("Data returned '%s' was not JSON or was empty!", departmentJson.c_str());
}
return parsedDepartments;
}
void OmniMetProvider::_ParseObject(const std::string& objectData, const std::string& parentPath,
std::shared_ptr<IEdfSourceData> sourceData)
{
// from the parent path given and the data contained in the JSON
// object retrieved from the server, we can create the full prim
JsValue jsValue = JsParseString(objectData, nullptr);
if (!jsValue.IsNull())
{
JsObject rootObject = jsValue.GetJsObject();
// the root object contains all of our properties that we now need
// to create a prim spec for the object and a set of property
// specs for it
// NOTE: this code uses the "default value" of a property spec
// to represent the authored value coming from the external system
// We don't need to do sub-composition over the data coming
// from the external system, so we ever only have a value or not
// so if HasDefaultValue is true on the property spec, it means
// there was an authored value that came from the remote system
// One optimization we could do in the layer above (EdfData) is
// to add schema acquisition and checking in the loop. This would allow us
// to create the property spec or not depending on if the value that came in
// is different from the true fallback declared in the schema
// (but we'd have to change the ask for the property to check whether
// the schema has the property rather than if the property spec exists)
std::string objectName = rootObject[OmniMetProviderFieldKeys->objectName.GetString()].GetString();
std::string primName = TfMakeValidIdentifier(objectName) +
TfStringify(rootObject[OmniMetProviderFieldKeys->objectID.GetString()].GetInt());
// create the prim
SdfPath newPrimParentPath(parentPath);
sourceData->CreatePrim(newPrimParentPath, primName, SdfSpecifier::SdfSpecifierDef,
OmniMetProviderTypeNames->AmaObject);
// set the fact that this prim has an API schema attached to it
// usdGenSchema doesn't generate a public token for the actual
// API schema class name, so we hard code that here
SdfPath parentPrim = SdfPath(parentPath + "/" + primName);
TfTokenVector apiSchemas;
apiSchemas.push_back(TfToken("OmniMetArtistAPI"));
VtValue apiSchemasValue(apiSchemas);
sourceData->SetField(parentPrim, UsdTokens->apiSchemas, apiSchemasValue);
// create the attributes for the prim
sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->objectID.GetString(),
SdfValueTypeNames->Int, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->objectID.GetString()].GetInt()));
sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->isHighlight.GetString(),
SdfValueTypeNames->Bool, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->isHighlight.GetString()].GetBool()));
sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->accessionNumber.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->accessionNumber.GetString()].GetString()));
sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->accessionYear.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->accessionYear.GetString()].GetString()));
sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->isPublicDomain.GetString(),
SdfValueTypeNames->Bool, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->isPublicDomain.GetString()].GetBool()));
sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->primaryImage.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->primaryImage.GetString()].GetString()));
sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->primaryImageSmall.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->primaryImageSmall.GetString()].GetString()));
sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->department.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->department.GetString()].GetString()));
sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->title.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->title.GetString()].GetString()));
sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->culture.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->culture.GetString()].GetString()));
sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->period.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->period.GetString()].GetString()));
sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->dynasty.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->dynasty.GetString()].GetString()));
sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->reign.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->reign.GetString()].GetString()));
sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->portfolio.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->portfolio.GetString()].GetString()));
// artist information complying with sample API schema
std::string namespaceFieldPrefix = "omni:met:artist:";
JsObject::const_iterator i = rootObject.find(OmniMetProviderFieldKeys->artistRole.GetString());
if (i != rootObject.end())
{
sourceData->CreateAttribute(parentPrim, namespaceFieldPrefix + OmniMetProviderFieldKeys->artistRole.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->artistRole.GetString()].GetString()));
}
i = rootObject.find(OmniMetProviderFieldKeys->artistPrefix.GetString());
if (i != rootObject.end())
{
sourceData->CreateAttribute(parentPrim, namespaceFieldPrefix + OmniMetProviderFieldKeys->artistPrefix.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->artistPrefix.GetString()].GetString()));
}
i = rootObject.find(OmniMetProviderFieldKeys->artistDisplayName.GetString());
if (i != rootObject.end())
{
sourceData->CreateAttribute(parentPrim, namespaceFieldPrefix + OmniMetProviderFieldKeys->artistDisplayName.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->artistDisplayName.GetString()].GetString()));
}
i = rootObject.find(OmniMetProviderFieldKeys->artistDisplayBio.GetString());
if (i != rootObject.end())
{
sourceData->CreateAttribute(parentPrim, namespaceFieldPrefix + OmniMetProviderFieldKeys->artistDisplayBio.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->artistDisplayBio.GetString()].GetString()));
}
i = rootObject.find(OmniMetProviderFieldKeys->artistSuffix.GetString());
if (i != rootObject.end())
{
sourceData->CreateAttribute(parentPrim, namespaceFieldPrefix + OmniMetProviderFieldKeys->artistSuffix.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->artistSuffix.GetString()].GetString()));
}
i = rootObject.find(OmniMetProviderFieldKeys->artistAlphaSort.GetString());
if (i != rootObject.end())
{
sourceData->CreateAttribute(parentPrim, namespaceFieldPrefix + OmniMetProviderFieldKeys->artistAlphaSort.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->artistAlphaSort.GetString()].GetString()));
}
i = rootObject.find(OmniMetProviderFieldKeys->artistNationality.GetString());
if (i != rootObject.end())
{
sourceData->CreateAttribute(parentPrim, namespaceFieldPrefix + OmniMetProviderFieldKeys->artistNationality.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->artistNationality.GetString()].GetString()));
}
i = rootObject.find(OmniMetProviderFieldKeys->artistGender.GetString());
if (i != rootObject.end())
{
sourceData->CreateAttribute(parentPrim, namespaceFieldPrefix + OmniMetProviderFieldKeys->artistGender.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->artistGender.GetString()].GetString()));
}
i = rootObject.find(OmniMetProviderFieldKeys->artistWikidata_URL.GetString());
if (i != rootObject.end())
{
sourceData->CreateAttribute(parentPrim, namespaceFieldPrefix + OmniMetProviderFieldKeys->artistWikidata_URL.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->artistWikidata_URL.GetString()].GetString()));
}
i = rootObject.find(OmniMetProviderFieldKeys->artistULAN_URL.GetString());
if (i != rootObject.end())
{
sourceData->CreateAttribute(parentPrim, namespaceFieldPrefix + OmniMetProviderFieldKeys->artistULAN_URL.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->artistULAN_URL.GetString()].GetString()));
}
// note that there are quite a few additional properties that could be pulled, the above
// represents only a sample of the data that is there - if you'd like to try the rest as an
// exercise, you can enhance the schema attributes and read the remaining ones here
}
else
{
TF_CODING_ERROR("Data returned '%s' was not JSON or was empty!", objectData.c_str());
}
}
bool OmniMetProvider::ReadChildren(const std::string& parentPath, std::shared_ptr<IEdfSourceData> sourceData)
{
// if the parent path is the root, we need to load the departments
// but only if we are in a deferred read scenario
if (this->IsDeferredRead())
{
SdfPath parentPrimPath = SdfPath(parentPath);
int lodLevel = this->GetDataLodLevel();
if (parentPrimPath == DATA_ROOT_PATH)
{
// load the department data
std::cout << "Loading department data..." << std::endl;
std::string departmentData = this->_LoadDepartments();
std::vector<std::pair<std::string, int>> departments = this->_ParseDepartments(departmentData,
sourceData);
}
else
{
VtValue typeNameValue;
if(sourceData->HasField(SdfPath(parentPath), SdfFieldKeys->TypeName, &typeNameValue))
{
if (typeNameValue.UncheckedGet<TfToken>() == OmniMetProviderTypeNames->AmaDepartment &&
this->GetDataLodLevel() != static_cast<int>(DataLodLevel::Level0))
{
// it's a department, we need to load the objects
// associated with the department
std::string departmentIdPath = parentPath + "." + OmniMetProviderFieldKeys->departmentId.GetString();
VtValue departmentId;
if (sourceData->HasAttribute(SdfPath(departmentIdPath), &departmentId))
{
size_t objectCount = 0;
if (lodLevel == static_cast<int>(DataLodLevel::Level1))
{
objectCount = this->GetLod1Count();
}
// load the object data
std::cout << "Loading object data for " + parentPath + "..." << std::endl;
std::vector<std::string> objectData = this->_LoadObjects(TfStringify(departmentId.UncheckedGet<int>()), objectCount);
for (auto it = objectData.begin(); it != objectData.end(); it++)
{
this->_ParseObject(*it, parentPath, sourceData);
}
}
}
}
}
return true;
}
return false;
}
bool OmniMetProvider::IsDataCached() const
{
return !this->IsDeferredRead();
}
int OmniMetProvider::GetDataLodLevel() const
{
int dataLodLevel = 0;
EdfDataParameters parameters = this->GetParameters();
std::unordered_map<std::string, std::string>::const_iterator it = parameters.providerArgs.find(OmniMetProviderProviderArgKeys->dataLodLevel);
if (it != parameters.providerArgs.end())
{
dataLodLevel = TfUnstringify<int>(it->second);
if (dataLodLevel < 0)
{
dataLodLevel = 0;
}
}
return dataLodLevel;
}
size_t OmniMetProvider::GetLod1Count() const
{
// although the incoming string from the parameter set
// might be interpretable as a negative integer
// it doesn't really make practical sense, so if
// it is interpreted as negative, we clamp to 0
// and return an unsigned version to the caller
size_t lod1Count = 0;
EdfDataParameters parameters = this->GetParameters();
std::unordered_map<std::string, std::string>::const_iterator it = parameters.providerArgs.find(OmniMetProviderProviderArgKeys->lod1Count);
if (it != parameters.providerArgs.end())
{
lod1Count = TfUnstringify<int>(it->second);
if (lod1Count < 0)
{
lod1Count = 0;
}
}
return static_cast<size_t>(lod1Count);
}
bool OmniMetProvider::IsDeferredRead() const
{
bool deferredRead = false;
EdfDataParameters parameters = this->GetParameters();
std::unordered_map<std::string, std::string>::const_iterator it = parameters.providerArgs.find(OmniMetProviderProviderArgKeys->deferredRead);
if (it != parameters.providerArgs.end())
{
deferredRead = TfUnstringify<bool>(it->second);
}
return deferredRead;
}
size_t OmniMetProvider::_CurlWriteCallback(void* data, size_t size, size_t nmemb, void* userp)
{
std::string* result = reinterpret_cast<std::string*>(userp);
result->append(reinterpret_cast<const char* const>(data), nmemb);
return nmemb;
}
PXR_NAMESPACE_CLOSE_SCOPE
| 27,507 | C++ | 41.780715 | 159 | 0.662704 |
NVIDIA-Omniverse/usd-plugin-samples/src/usd-plugins/dynamicPayload/omniMetProvider/omniMetProvider.h | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef OMNI_OMNIMETPROVIDER_OMNIMETPROVIDER_H_
#define OMNI_OMNIMETPROVIDER_OMNIMETPROVIDER_H_
#include <string>
#include <vector>
#include <utility>
#include <pxr/pxr.h>
#include <pxr/base/tf/token.h>
#include <pxr/usd/sdf/layer.h>
#include <pxr/usd/sdf/schema.h>
#include <iEdfDataProvider.h>
PXR_NAMESPACE_OPEN_SCOPE
TF_DECLARE_PUBLIC_TOKENS(
OmniMetProviderProviderArgKeys,
(dataLodLevel)
(deferredRead)
(lod1Count)
);
/// \class OmniMetProvider
///
/// Defines a specific EDF back-end data provider for reading information
/// from the Metropolitan Museum of Art REST APIs and converting that
/// into prim and attribute data that can be processed by USD.
///
class OmniMetProvider : public IEdfDataProvider
{
public:
OmniMetProvider(const EdfDataParameters& parameters);
virtual ~OmniMetProvider();
virtual bool Read(std::shared_ptr<IEdfSourceData> sourceData) override;
virtual bool ReadChildren(const std::string& parentPath, std::shared_ptr<IEdfSourceData> sourceData) override;
virtual bool IsDataCached() const override;
private:
int GetDataLodLevel() const;
size_t GetLod1Count() const;
bool IsDeferredRead() const;
void _LoadData(bool includeObjects, size_t objectCount, std::shared_ptr<IEdfSourceData> sourceData);
std::string _LoadDepartments();
std::vector<std::string> _LoadObjects(const std::string& departmentId, size_t objectCount);
std::vector<std::pair<std::string, int>> _ParseDepartments(const std::string& departmentJson,
std::shared_ptr<IEdfSourceData> sourceData);
void _ParseObject(const std::string& objectData, const std::string& parentPath, std::shared_ptr<IEdfSourceData> sourceData);
// NOTE: these methods are not technically const, since they do change internal state
// in the edfData object's layer data. This is ok, because that object is a cache
// https://isocpp.github.io/CppCoreGuidelines/CppCoreGuidelines#es50-dont-cast-away-const
// the mutuable cache state is allowed to change internally and still keep the semantics
// of the object not changing from the outside
void _LoadDepartments(bool includeObjects) const;
void _LoadObjects(const std::string& departmentId, const std::string& parentPath) const;
bool _IsDepartmentDataCached() const;
bool _IsObjectDataCached(const std::string& parentPath) const;
void _ParseDepartments(const std::string& response) const;
std::vector<int> _ParseObjectIds(const std::string& response) const;
void _ParseObject(const std::string& parentPath, const std::string& response) const;
static size_t _CurlWriteCallback(void* data, size_t size, size_t nmemb, void* userp);
};
PXR_NAMESPACE_CLOSE_SCOPE
#endif | 3,321 | C | 37.627907 | 128 | 0.747666 |
NVIDIA-Omniverse/kit-osc/README.md | # OSC Omniverse Kit Extension [omni.osc]
Omniverse Kit extension for sending and receiving OSC (Open Sound Control) messages.
![demo.gif](/docs/images/demo.gif)
*The OSC control surface app running on the iPad is [TouchOSC](https://hexler.net/touchosc).*
# Getting Started
Open the Community tab under Extensions window (`Window > Extensions`), search for `OSC`, and install and enable the `omni.osc` extension.
![extension-install](/docs/images/extension-install.png)
## Running the server
After installing and enabling the extension, you should see the following window.
![server-ui-window](/docs/images/server-ui-window.png)
Enter the private IP address of the computer running your Kit application and the desired port, then click `Start`. If you are prompted to configure your Windows Firewall, ensure that the Kit application is allowed to communicate with other devices on the private network.
![windows-firewall](/docs/images/osc-start-windows-security-alert.png)
You can find the private IP address of your computer by running `ipconfig` in the Windows terminal.
![ipconfig](/docs/images/ipconfig.png)
If you run the server on `localhost`, that means the server can only receive messages from OSC clients running on the same machine. If you want to receive messages from OSC clients running on other devices on the same network, you must run the server on an IP address that is visible to those devices.
Once the server is running, confirm that it can successfully receive messages by inspecting the verbose console logs. It might be helpful to filter only the logs that originate from `omni.osc`.
![console-logs](/docs/images/console-logs.png)
## Receiving messages with Python
Below is a python snippet that demonstrates how to handle OSC messages received by the server. It assumes that the OSC server configured above is running. You can paste and run the below snippet directly into the Omniverse Script Editor for testing.
```python
import carb
import carb.events
import omni.osc
def on_event(event: carb.events.IEvent) -> None:
addr, args = omni.osc.osc_message_from_carb_event(event)
carb.log_info(f"Received OSC message: [{addr}, {args}]")
sub = omni.osc.subscribe_to_osc_event_stream(on_event)
```
## Receiving messages with ActionGraph
Search for `OSC` in the Action Graph nodes list and add the `On OSC Message` node to your graph. The node takes a single input,
the OSC address path that this node will handle. This input can be a valid regular expression. Note that this input field does *not* support
OSC pattern matching expressions. The node outputs an OmniGraph bundle with two attributes named `address` and `arguments` which you
can access by using the `Extract Attribute` node.
![og-receive](/docs/images/og-receive.png)
You can find example USD stages that demonstrate how to configure an ActionGraph using this extension at [exts/omni.osc/data/examples](/exts/omni.osc/data/examples).
## Sending messages from Python
Since `omni.osc` depends on [python-osc](https://pypi.org/project/python-osc/), you can import this module directly in
your own Python code to send OSC messages. Please see the [documentation](https://python-osc.readthedocs.io/en/latest/) for additional
information and support.
```python
import random
import time
from pythonosc import udp_client
client = udp_client.SimpleUDPClient("127.0.0.1", 3334)
client.send_message("/scale", [random.random(), random.random(), random.random()])
```
You can paste and run the above snippet directly into the Omniverse Script Editor for testing.
## Sending messages from ActionGraph
This is not currently implemented.
## Limitations & Known Issues
- OSC Bundles are currently not supported.
- The OmniGraph `On OSC Message` node can only handle OSC messages containing lists of floating-point arguments.
# Help
The below sections should help you diagnose any potential issues you may encounter while working with `omni.osc` extension.
## Unable to receive messages
1. First, enable verbose logs in the console (filter by the `omni.osc` extension). The server will log any messages received.
2. Confirm that the computer running the Kit application and the device sending the OSC messages are on the same network.
3. Confirm that kit.exe is allowed to communicate with the private network through the Windows Defender Firewall. Note that
you may have multiple instances of kit.exe on this list. When in doubt, ensure that all of them have the appropriate permission.
![windows-firewall](/docs/images/windows-firewall.png)
4. Confirm that the Windows Defender Firewall allows incoming UDP traffic to the port in use.
5. Confirm that the device sending the OSC messages is sending the messages via UDP to the correct IP address and port.
6. Use a tool such as [wireshark](https://www.wireshark.org/) to confirm that the computer running the Kit application is receiving UDP traffic from the device.
## Unable to send messages
1. Confirm that the computer running the Kit application and the device receiving the OSC messages are on the same network.
2. Confirm that kit.exe is allowed to communicate with the private network through the Windows Defender Firewall.
3. Confirm that the device receiving the OSC messages is able to receive incoming UDP traffic at the port in use.
# Contributing
The source code for this repository is provided as-is and we are not accepting outside contributions.
# License
- The code in this repository is licensed under the Apache License 2.0. See [LICENSE](/LICENSE).
- python-osc is licensed under the Unlicense. See [exts/omni.osc/vendor/LICENSE-python-osc](/exts/omni.osc/vendor/LICENSE-python-osc).
# Resources
- [https://opensoundcontrol.stanford.edu/spec-1_0.html](https://opensoundcontrol.stanford.edu/spec-1_0.html)
- [https://en.wikipedia.org/wiki/Open_Sound_Control](https://en.wikipedia.org/wiki/Open_Sound_Control)
- [https://python-osc.readthedocs.io/en/latest/](https://python-osc.readthedocs.io/en/latest/)
| 5,998 | Markdown | 46.992 | 301 | 0.779593 |
NVIDIA-Omniverse/kit-osc/exts/omni.osc/config/extension.toml | [package]
# Semantic Versionning is used: https://semver.org/
version = "0.3.1"
# The title and description fields are primarily for displaying extension info in UI
title = "OSC (Open Sound Control)"
description="Send and receive OSC (Open Sound Control) messages"
authors = ["NVIDIA"]
repository = "https://github.com/NVIDIA-Omniverse/kit-osc"
readme = "docs/README.md"
changelog = "docs/CHANGELOG.md"
icon = "data/icon.png"
preview_image = "data/preview.png"
# One of categories for UI.
category = "Other"
# Keywords for the extension
keywords = ["kit", "osc"]
[dependencies]
"omni.kit.uiapp" = {}
"omni.kit.pipapi" = {}
"omni.graph" = {}
"omni.graph.bundle.action" = {}
# Main python module this extension provides, it will be publicly available as "import omni.osc.core".
[[python.module]]
name = "omni.osc"
[python.pipapi]
archiveDirs = ["vendor"]
[settings.exts."omni.osc"]
address = "localhost"
port = 3334
[[test]]
dependencies = ["omni.graph", "omni.kit.test"]
| 983 | TOML | 22.999999 | 102 | 0.703967 |
NVIDIA-Omniverse/kit-osc/exts/omni.osc/omni/osc/extension.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from typing import Any, List
import carb
import carb.events
import carb.profiler
import omni.ext
import omni.kit.app
from pythonosc.dispatcher import Dispatcher
from .core import carb_event_payload_from_osc_message, push_to_osc_event_stream
from .menu import OscMenu
from .server import DaemonOSCUDPServer
from .window import OscWindow
class OmniOscExt(omni.ext.IExt):
def on_startup(self, ext_id):
def on_start(host: str, port: int) -> bool:
return self.server.start(host, port)
def on_stop() -> bool:
return self.server.stop()
def toggle_window_visible(_arg0, _arg1) -> None:
"""
Toggle the window visibility from the editor menu item
"""
self.window.visible = not self.window.visible
self.server = OmniOscExt.create_server()
# The main UI window
default_addr = carb.settings.get_settings().get("exts/omni.osc/address")
default_port = carb.settings.get_settings().get("exts/omni.osc/port")
self.window = OscWindow(
on_start=on_start, on_stop=on_stop, default_addr=default_addr, default_port=default_port
)
# The editor menu entry that toggles the window visibility
self.menu = OscMenu(on_click=toggle_window_visible)
# Toggle the editor menu entry when the user closes the window
self.window.set_visibility_changed_fn(lambda visible: self.menu.set_item_value(visible))
def on_shutdown(self):
self.window = None
self.menu = None
if self.server is not None:
self.server.stop()
self.server = None
def create_server() -> DaemonOSCUDPServer:
"""
Create a server that routes all OSC messages to a carbonite event stream
"""
@carb.profiler.profile
def on_osc_msg(addr: str, *args: List[Any]) -> None:
"""
OSC message handler
"""
carb.log_verbose(f"OSC message: [{addr}, {args}]")
payload = carb_event_payload_from_osc_message(addr, args)
push_to_osc_event_stream(payload)
# Server
dispatcher = Dispatcher()
dispatcher.set_default_handler(on_osc_msg)
return DaemonOSCUDPServer(dispatcher)
| 2,714 | Python | 34.723684 | 100 | 0.658438 |
NVIDIA-Omniverse/kit-osc/exts/omni.osc/omni/osc/__init__.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import omni.kit.pipapi
# python-osc:
# - SWIPAT request: http://nvbugs/3684871
# - A copy of the source is forked to https://github.com/NVIDIA-Omniverse/python-osc
# - The dependency vendored and installed from exts/omni.osc/vendor/python_osc-1.8.0-py3-none-any.whl
omni.kit.pipapi.install(
package="python-osc", module="pythonosc", use_online_index=False, ignore_cache=True, ignore_import_check=False
)
from pythonosc import * # noqa: F401
from .core import * # noqa: F401,F403
from .extension import * # noqa: F401,F403
from .server import * # noqa: F401,F403
# NOTE(jshrake): omni.graph is an optional dependency so handle the case
# that the below import fails
try:
from .ogn import *
except Exception as e:
print(f"omni.osc failed to import OGN due to {e}")
pass
| 1,219 | Python | 37.124999 | 114 | 0.754717 |