file_path
stringlengths 21
202
| content
stringlengths 12
1.02M
| size
int64 12
1.02M
| lang
stringclasses 9
values | avg_line_length
float64 3.33
100
| max_line_length
int64 10
993
| alphanum_fraction
float64 0.27
0.93
|
---|---|---|---|---|---|---|
mnaskret/omni-tetGen/mnresearch/tetgen/ogn/tests/TestPBDBasicGravity.py | import omni.kit.test
import omni.graph.core as og
import omni.graph.core.tests as ogts
import os
import carb
class TestOgn(ogts.test_case_class(use_schema_prims=True, allow_implicit_graph=False)):
async def test_import(self):
import mnresearch.tetgen.ogn.PBDBasicGravityDatabase
self.assertTrue(hasattr(mnresearch.tetgen.ogn.PBDBasicGravityDatabase, "PBDBasicGravityDatabase"))
async def test_usda(self):
test_file_name = "PBDBasicGravityTemplate.usda"
usd_path = os.path.join(os.path.dirname(__file__), "usd", test_file_name)
if not os.path.exists(usd_path):
self.assertTrue(False, f"{usd_path} not found for loading test")
(result, error) = await ogts.load_test_file(usd_path)
self.assertTrue(result, f'{error} on {usd_path}')
test_node = og.Controller.node("/TestGraph/Template_mnresearch_tetgen_PBDBasicGravity")
self.assertTrue(test_node.is_valid())
node_type_name = test_node.get_type_name()
self.assertEqual(og.GraphRegistry().get_node_type_version(node_type_name), 1)
self.assertTrue(test_node.get_attribute_exists("inputs:edge"))
input_attr = test_node.get_attribute("inputs:edge")
actual_input = og.Controller.get(input_attr)
ogts.verify_values([], actual_input, "mnresearch.tetgen.PBDBasicGravity USD load test - inputs:edge attribute value error")
self.assertTrue(test_node.get_attribute_exists("inputs:edgesRestLengths"))
input_attr = test_node.get_attribute("inputs:edgesRestLengths")
actual_input = og.Controller.get(input_attr)
ogts.verify_values([], actual_input, "mnresearch.tetgen.PBDBasicGravity USD load test - inputs:edgesRestLengths attribute value error")
self.assertTrue(test_node.get_attribute_exists("inputs:elem"))
input_attr = test_node.get_attribute("inputs:elem")
actual_input = og.Controller.get(input_attr)
ogts.verify_values([], actual_input, "mnresearch.tetgen.PBDBasicGravity USD load test - inputs:elem attribute value error")
self.assertTrue(test_node.get_attribute_exists("inputs:gravity"))
input_attr = test_node.get_attribute("inputs:gravity")
actual_input = og.Controller.get(input_attr)
ogts.verify_values([0.0, -9.8, 0.0], actual_input, "mnresearch.tetgen.PBDBasicGravity USD load test - inputs:gravity attribute value error")
self.assertTrue(test_node.get_attribute_exists("inputs:ground"))
input_attr = test_node.get_attribute("inputs:ground")
actual_input = og.Controller.get(input_attr)
ogts.verify_values(-100.0, actual_input, "mnresearch.tetgen.PBDBasicGravity USD load test - inputs:ground attribute value error")
self.assertTrue(test_node.get_attribute_exists("inputs:inverseMasses"))
input_attr = test_node.get_attribute("inputs:inverseMasses")
actual_input = og.Controller.get(input_attr)
ogts.verify_values([], actual_input, "mnresearch.tetgen.PBDBasicGravity USD load test - inputs:inverseMasses attribute value error")
self.assertTrue(test_node.get_attribute_exists("inputs:ks_distance"))
input_attr = test_node.get_attribute("inputs:ks_distance")
actual_input = og.Controller.get(input_attr)
ogts.verify_values(1.0, actual_input, "mnresearch.tetgen.PBDBasicGravity USD load test - inputs:ks_distance attribute value error")
self.assertTrue(test_node.get_attribute_exists("inputs:ks_volume"))
input_attr = test_node.get_attribute("inputs:ks_volume")
actual_input = og.Controller.get(input_attr)
ogts.verify_values(1.0, actual_input, "mnresearch.tetgen.PBDBasicGravity USD load test - inputs:ks_volume attribute value error")
self.assertTrue(test_node.get_attribute_exists("inputs:num_substeps"))
input_attr = test_node.get_attribute("inputs:num_substeps")
actual_input = og.Controller.get(input_attr)
ogts.verify_values(8, actual_input, "mnresearch.tetgen.PBDBasicGravity USD load test - inputs:num_substeps attribute value error")
self.assertTrue(test_node.get_attribute_exists("inputs:points"))
input_attr = test_node.get_attribute("inputs:points")
actual_input = og.Controller.get(input_attr)
ogts.verify_values([], actual_input, "mnresearch.tetgen.PBDBasicGravity USD load test - inputs:points attribute value error")
self.assertTrue(test_node.get_attribute_exists("inputs:sim_constraints"))
input_attr = test_node.get_attribute("inputs:sim_constraints")
actual_input = og.Controller.get(input_attr)
ogts.verify_values(1, actual_input, "mnresearch.tetgen.PBDBasicGravity USD load test - inputs:sim_constraints attribute value error")
self.assertTrue(test_node.get_attribute_exists("inputs:tetrahedronsRestVolumes"))
input_attr = test_node.get_attribute("inputs:tetrahedronsRestVolumes")
actual_input = og.Controller.get(input_attr)
ogts.verify_values([], actual_input, "mnresearch.tetgen.PBDBasicGravity USD load test - inputs:tetrahedronsRestVolumes attribute value error")
self.assertTrue(test_node.get_attribute_exists("inputs:velocities"))
input_attr = test_node.get_attribute("inputs:velocities")
actual_input = og.Controller.get(input_attr)
ogts.verify_values([], actual_input, "mnresearch.tetgen.PBDBasicGravity USD load test - inputs:velocities attribute value error")
self.assertTrue(test_node.get_attribute_exists("inputs:velocity_dampening"))
input_attr = test_node.get_attribute("inputs:velocity_dampening")
actual_input = og.Controller.get(input_attr)
ogts.verify_values(0.1, actual_input, "mnresearch.tetgen.PBDBasicGravity USD load test - inputs:velocity_dampening attribute value error")
| 5,839 | Python | 60.473684 | 150 | 0.71365 |
mnaskret/omni-tetGen/mnresearch/tetgen/nodes/__init__.py |
"""
Dynamically import every file in a directory tree that looks like a Python Ogn Node.
This includes linked directories, which is the mechanism by which nodes can be hot-reloaded from the source tree.
"""
import omni.graph.core as og
og.register_ogn_nodes(__file__, "mnresearch.tetgen")
| 290 | Python | 35.374996 | 113 | 0.768966 |
Kim2091/RTXRemixTools/README.md | # RTXRemixTools
These are some tools I've made that are intended for use with Nvidia's RTX Remix. Right now I have 3:
* **MagicUSDA** - Allows you to generate .usda files based on your gameReadyAssets folder
* **LightAdjuster** - A simple script that allows you to adjust light intensity and color temperature in a specified .usda file
* **RemixMeshConvert** - This script will convert meshes to be (more) compatible with Remix
These should hopefully help with setting up mods for Remix quickly and easily.
| 511 | Markdown | 45.54545 | 127 | 0.772994 |
Kim2091/RTXRemixTools/LightAdjuster/LightAdjuster.py | import argparse
def adjust_value(line, value_name, percentage, log_changes, i):
if f'float {value_name} =' in line:
parts = line.split('=')
old_value = float(parts[1].strip())
new_value = old_value * percentage
new_line = f'{parts[0]}= {new_value}\n'
if log_changes:
log_line = f'Line {i + 1}: {line.strip()} -> {new_line.strip()}'
print(log_line)
with open('changes.log', 'a') as log:
log.write(log_line + '\n')
line = new_line
return line, True
return line, False
def adjust_file(file_path, start_line=1, log_changes=False, adjust_intensity=False, adjust_color_temperature=False, percentage=None):
with open(file_path, 'r') as file:
data = file.readlines()
lines_changed = 0
with open(file_path, 'w') as file:
for i, line in enumerate(data):
if i + 1 >= start_line:
if adjust_intensity:
line, changed = adjust_value(line, 'intensity', percentage, log_changes, i)
if changed:
lines_changed += 1
if adjust_color_temperature:
line, changed = adjust_value(line, 'colorTemperature', percentage, log_changes, i)
if changed:
lines_changed += 1
file.write(line)
print(f'Completed! {lines_changed} lines changed.')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Adjust the intensity and/or color temperature values in a file.')
parser.add_argument('file_path', type=str, help='The path to the file to modify.')
parser.add_argument('-s', '--start-line', type=int, default=1, help='The line number to start modifying at.')
parser.add_argument('-l', '--log', action='store_true', help='Whether to print a log of the changed lines.')
parser.add_argument('-ai', '--adjust-intensity', action='store_true', help='Whether to adjust the intensity value.')
parser.add_argument('-act', '--adjust-color-temperature', action='store_true', help='Whether to adjust the color temperature value.')
parser.add_argument('-p', '--percentage', type=float, required=True, help='The percentage to adjust the value by.')
args = parser.parse_args()
adjust_file(args.file_path, args.start_line, args.log, args.adjust_intensity, args.adjust_color_temperature, args.percentage)
| 2,440 | Python | 52.065216 | 137 | 0.609836 |
Kim2091/RTXRemixTools/LightAdjuster/README.md | # **Remix Light Adjuster**
*Written with the assistance of Bing*
This script adjusts the intensity and/or color temperature values in a file.
$\color{#f7d26a}{\textsf{Please back up your usda files before running!}}$
## Usage
To use this script, run the following command:
`python LightAdjuster.py file_path`
where `file_path` is the path to the .usda file to modify.
There are several additional options that can be used with this script:
* `-s` or `--start-line` - This option allows you to specify the line number to start modifying at. The default value is 1.
* `-l` or `--log` - This option enables logging of the changed lines. If this option is used, a log of the changed lines will be printed to the console and written to a file named `changes.log`.
* `-p` or `--percentage` - This option specifies the percentage to adjust the value by. This option is required.
* `-ai` or `--adjust-intensity` - This option enables adjustment of the intensity value using `-p`.
* `-act` or `--adjust-color-temperature` - This option enables adjustment of the color temperature value using `-p`.
For example, to adjust the intensity value in a file named `data.txt`, starting at line 5, and logging the changes, you would run the following command:
`python adjust_file.py data.txt -s 5 -l -ai -p 0.5`
This would adjust the intensity value in all lines containing `float intensity =`, starting at line 5, by multiplying it by 0.5. A log of the changed lines would be printed to the console and written to a file named `changes.log`.
## Description
This script reads the specified file and modifies lines that contain either `float intensity =` or `float colorTemperature =`, depending on which value is being adjusted. The value is multiplied by the specified percentage and the line is updated with the new value. If logging is enabled, a log of the changed lines is printed to the console and written to a file named `changes.log`.
After all lines have been processed, the script prints a message indicating how many lines were changed.
| 2,047 | Markdown | 55.888887 | 385 | 0.755252 |
Kim2091/RTXRemixTools/MagicUSDA/README.md | # Remix USDA Generator
*Written with the assistance of Bing and ChatGPT*
$\color{#f7d26a}{\textsf{Please back up your usda files to a separate folder before running!}}$
This is a script to generate `.usda` files from your gameReadyAssets folder. It detects any of these map types in your folder:
- emissive
- normal
- metallic
- rough
## Usage
How to use this script:
`python MagicUSDA.py -d path\to\gameReadyAssets`
There are some additional functions:
* `-o` - Change the output usda file names.
* `-m` - Split the output USDA files into separate entries for each map type (e.g. mod_emissive.usda, mod_metallic.usda). Works with `-o` to change the base file name.
* `-a` - Add sublayers made with `-m` to the mod.usda file. Not compatible with custom files specified by `-o`, will only modify mod.usda. Works with `-m` and `-o`.
* `-g` - Toggle generating hashes for file names before the suffix. Useful for files with generic names like test.dds. Diffuse textures must be identical to Remix dumps.
* `-s` - Change between the AperturePBR_Opacity and AperturePBR_Translucent material shader types. Using this, you can generate separate .usda files for normal or translucent objects easily
* `-r` _**Currently broken**_ - Specify a separate folder to use as a reference for generating diffuse texture hashes. Searches for files in the reference directory based on file names from the base directory. If not provided, uses the main directory to generate hashes. Useful with folders like captures or game texture rips.
The `.usda` files generated by this script serve to replace textures in your Remix games, allowing you to swap out textures and utilize additional map types to enhance the game's visuals.
This script is intended to be used with original diffuse textures, which are required for it to function correctly. It generates a `mod.usda` file for use in your game through Remix. It was designed with [chaiNNer](https://chainner.app/) in mind, however you can use this with any textures you've created. Be aware that this script will overwrite any pre-existing `mod.usda` files in your directory!
| 2,113 | Markdown | 74.499997 | 399 | 0.769049 |
Kim2091/RTXRemixTools/MagicUSDA/MagicUSDA.py | import os
import argparse
import xxhash
from pxr import Usd, UsdGeom, UsdShade, Sdf
suffixes = ["_normal", "_emissive", "_metallic", "_rough"]
def generate_hashes(file_path) -> str:
# Read the file and extract the raw data. Thanks @BlueAmulet!
with open(file_path, "rb") as file:
data = file.read(128)
dwHeight = int.from_bytes(data[12:16], "little")
dwWidth = int.from_bytes(data[16:20], "little")
pfFlags = int.from_bytes(data[80:84], "little")
pfFourCC = data[84:88]
bitCount = int.from_bytes(data[88:92], "little")
mipsize = dwWidth * dwHeight
if pfFlags & 0x4: # DDPF_FOURCC
if pfFourCC == b"DXT1": # DXT1 is 4bpp
mipsize //= 2
elif pfFlags & 0x20242: # DDPF_ALPHA | DDPF_RGB | DDPF_YUV | DDPF_LUMINANCE
mipsize = mipsize * bitCount // 8
# Read the required portion of the file for hash calculation
with open(file_path, "rb") as file:
file.seek(128) # Move the file pointer to the appropriate position
data = file.read(mipsize)
hash_value = xxhash.xxh3_64(data).hexdigest()
return hash_value.upper()
def write_usda_file(args, file_list, suffix=None) -> [list, list]:
created_files = []
modified_files = []
game_ready_assets_path = os.path.join(args.directory)
# Check if there are any texture files with the specified suffix
if suffix:
has_suffix_files = False
for file_name in file_list:
if file_name.endswith(f"{suffix}.dds"):
has_suffix_files = True
break
if not has_suffix_files:
# return a blank set
return [created_files, modified_files]
usda_file_name = f'{args.output}{suffix if suffix else ""}.usda'
usda_file_path = os.path.join(game_ready_assets_path, usda_file_name)
if os.path.exists(usda_file_path):
modified_files.append(usda_file_path)
else:
created_files.append(usda_file_path)
targets = {}
reference_directory = args.reference_directory if args.reference_directory else args.directory
for file_name in file_list:
if file_name.endswith(".dds"):
# Extract only the file name from the absolute path
name = os.path.basename(file_name)
name, ext = os.path.splitext(name)
if "_" not in name or name.endswith("_diffuse") or name.endswith("_albedo"):
# Check if the generate_hashes argument is specified
if args.generate_hashes:
key = name.split("_")[0] # Use the prefix of the diffuse file name as the key
hash_value = generate_hashes(os.path.join(reference_directory, file_name)) # Generate hash for the diffuse file
else:
key = os.path.basename(name)
hash_value = key # Use the original name as the hash value
# Check if the key contains a hash or ends with _diffuse or _albedo
if not (key.isupper() and len(key) == 16) and not (key.endswith("_diffuse") or key.endswith("_albedo")):
continue
# Remove the _diffuse or _albedo suffix from the key and hash_value
key = key.replace("_diffuse", "").replace("_albedo", "")
hash_value = hash_value.replace("_diffuse", "").replace("_albedo", "")
# Get the relative path from the game ready assets path to the texture file
rel_file_path = os.path.relpath(file_name, args.directory)
targets[key] = (rel_file_path, hash_value)
# Create a new stage
stage = Usd.Stage.CreateNew(usda_file_path)
# Modify the existing RootNode prim
root_node_prim = stage.OverridePrim("/RootNode")
# Add a Looks scope as a child of the RootNode prim
looks_scope = UsdGeom.Scope.Define(stage, "/RootNode/Looks")
added_targets = set()
for value, (rel_file_path, hash_value) in targets.items():
# Check if there is a corresponding texture file for the specified suffix
if suffix and not any(
file_name.endswith(f"{value}{suffix}.dds") for file_name in file_list
): continue
if value in added_targets:
continue
else:
added_targets.add(value)
print(f"Adding texture {rel_file_path} with hash: {hash_value}")
# Add a material prim as a child of the Looks scope
material_prim = UsdShade.Material.Define(
stage, f"/RootNode/Looks/mat_{hash_value.upper()}"
)
material_prim.GetPrim().GetReferences().SetReferences([])
# Set the shader attributes
shader_prim = UsdShade.Shader.Define(
stage, f"/RootNode/Looks/mat_{hash_value.upper()}/Shader"
)
shader_prim.GetPrim().CreateAttribute("info:mdl:sourceAsset", Sdf.ValueTypeNames.Asset).Set(
f"{args.shader_type}.mdl"
)
shader_prim.GetPrim().CreateAttribute("info:implementationSource", Sdf.ValueTypeNames.Token).Set(
"sourceAsset"
)
shader_prim.GetPrim().CreateAttribute("info:mdl:sourceAsset:subIdentifier", Sdf.ValueTypeNames.Token).Set(
f"{args.shader_type}"
)
shader_output = shader_prim.CreateOutput("output", Sdf.ValueTypeNames.Token)
if not suffix or suffix == "_diffuse" or suffix == "_albedo":
diffuse_texture = shader_prim.CreateInput(
"diffuse_texture", Sdf.ValueTypeNames.Asset
)
# Use the dynamically generated relative path for the diffuse texture
diffuse_texture.Set(f".\{rel_file_path}")
# Process each type of texture
if not suffix or suffix == "_emissive":
emissive_file_name = f"{value}_emissive.dds"
# print(f"Emissive File Name: {emissive_file_name in file_list}")
# print(file_list)
if any(file_path.endswith(emissive_file_name) for file_path in file_list):
emissive_mask_texture = shader_prim.CreateInput(
"emissive_mask_texture", Sdf.ValueTypeNames.Asset
)
# Use the dynamically generated relative path for the emissive texture
emissive_rel_file_path = os.path.relpath(os.path.join(os.path.dirname(file_name), emissive_file_name), args.directory)
emissive_mask_texture.Set(f".\{emissive_rel_file_path}")
enable_emission = shader_prim.CreateInput(
"enable_emission", Sdf.ValueTypeNames.Bool
)
enable_emission.Set(True)
emissive_intensity = shader_prim.CreateInput(
"emissive_intensity", Sdf.ValueTypeNames.Float
)
emissive_intensity.Set(5)
if not suffix or suffix == "_metallic":
metallic_file_name = f"{value}_metallic.dds"
if any(file_path.endswith(metallic_file_name) for file_path in file_list):
metallic_texture = shader_prim.CreateInput(
"metallic_texture", Sdf.ValueTypeNames.Asset
)
# Use the dynamically generated relative path for the metallic texture
metallic_rel_file_path = os.path.relpath(os.path.join(os.path.dirname(file_name), metallic_file_name), args.directory)
metallic_texture.Set(f".\{metallic_rel_file_path}")
if not suffix or suffix == "_normal":
normal_file_name = f"{value}_normal.dds"
if any(file_path.endswith(normal_file_name) for file_path in file_list):
normalmap_texture = shader_prim.CreateInput(
"normal_texture", Sdf.ValueTypeNames.Asset
)
# Use the dynamically generated relative path for the normal texture
normal_rel_file_path = os.path.relpath(os.path.join(os.path.dirname(file_name), normal_file_name), args.directory)
normalmap_texture.Set(f".\{normal_rel_file_path}")
if not suffix or suffix == "_rough":
roughness_file_name = f"{value}_rough.dds"
if any(file_path.endswith(roughness_file_name) for file_path in file_list):
reflectionroughness_texture = shader_prim.CreateInput(
"reflectionroughness_texture", Sdf.ValueTypeNames.Asset
)
# Use the dynamically generated relative path for the roughness texture
roughness_rel_file_path = os.path.relpath(os.path.join(os.path.dirname(file_name), roughness_file_name), args.directory)
reflectionroughness_texture.Set(f".\{roughness_rel_file_path}")
# Connect shader output to material inputs
material_prim.CreateInput(
"mdl:displacement", Sdf.ValueTypeNames.Token
).ConnectToSource(shader_output)
material_prim.CreateInput(
"mdl:surface", Sdf.ValueTypeNames.Token
).ConnectToSource(shader_output)
material_prim.CreateInput(
"mdl:volume", Sdf.ValueTypeNames.Token
).ConnectToSource(shader_output)
# Save the stage
stage.Save()
return [modified_files, created_files]
def add_sublayers(args, file_list) -> list:
modified_files = []
game_ready_assets_path = os.path.join(args.directory)
mod_file_path = os.path.join(game_ready_assets_path, "mod.usda")
if os.path.exists(mod_file_path):
modified_files.append(mod_file_path)
# Open the existing stage
stage = Usd.Stage.Open(mod_file_path)
# Get the existing sublayers
existing_sublayers = list(stage.GetRootLayer().subLayerPaths)
# Create a set of existing sublayer file names
existing_sublayer_files = {
os.path.basename(sublayer_path) for sublayer_path in existing_sublayers
}
# Add new sublayers
new_sublayers = [
f"./{args.output}{suffix}.usda"
for suffix in suffixes
if f"{args.output}{suffix}.usda" not in existing_sublayer_files
and any(
os.path.basename(file_path) == f"{args.output}{suffix}.usda"
for file_path in file_list
)
]
stage.GetRootLayer().subLayerPaths = (existing_sublayers + new_sublayers)
# Save the stage
stage.Save()
return modified_files
if __name__ == "__main__":
# ARGUMENT BLOCK
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--directory", required=True, help="Path to directory")
parser.add_argument("-o", "--output", default="mod", help="Output file name")
parser.add_argument("-g", "--generate-hashes", action="store_true", help="Generates hashes for file names before the suffix")
parser.add_argument("-m", "--multiple-files", action="store_true", help="Save multiple .usda files, one for each suffix type (except for diffuse)")
parser.add_argument("-a", "--add-sublayers", action="store_true", help="Add sublayers made with -m to the mod.usda file. This argument only modifies the mod.usda file and does not affect any custom USDA file specified by the -o argument.")
parser.add_argument("-s", "--shader-type", default="AperturePBR_Opacity", choices=["AperturePBR_Opacity", "AperturePBR_Translucent"], help="Shader type")
parser.add_argument("-r", "--reference-directory", help="Path to reference directory for diffuse texture hashes")
args = parser.parse_args()
# Check target processing directory before use
if not os.path.isdir(args.directory):
raise FileNotFoundError("Specified processing directory (-d) is invalid")
# Recursively scan folders
file_list = []
for root, dirs, files in os.walk(args.directory):
for file in files:
file_list.append(os.path.join(root, file))
created_files = []
modified_files = []
# Process sublayer additions
print(f"Add Sublayers: {args.add_sublayers}")
if args.add_sublayers:
modified_files.extend(add_sublayers(args, file_list))
# Generate unique USDA files per suffix type (except diffuse)
if args.multiple_files:
for suffix in suffixes:
m, c = write_usda_file(args, file_list, suffix)
modified_files.extend(m), created_files.extend(c)
else: # Generate a single USDA file for all suffixes
m, c = write_usda_file(args, file_list)
modified_files.extend(m), created_files.extend(c)
# Complete
print("Finished!")
print("Created files:")
for file in created_files:
print(f" - {file}")
print("Modified files:")
for file in modified_files:
print(f" - {file}")
| 12,785 | Python | 43.242214 | 243 | 0.615722 |
Kim2091/RTXRemixTools/RemixMeshConvert/RemixMeshConvert.py | import argparse
import logging
import os
import shutil
import sys
from pxr import Usd, UsdGeom, Gf, Sdf
ALIASES = {
"primvars:UVMap": ("primvars:st", Sdf.ValueTypeNames.Float2Array),
"primvars:UVChannel_1": ("primvars:st1", Sdf.ValueTypeNames.Float2Array),
"primvars:map1": ("primvars:st1", Sdf.ValueTypeNames.Float2Array),
# Add more aliases here
}
def convert_face_varying_to_vertex_interpolation(usd_file_path):
stage = Usd.Stage.Open(usd_file_path)
mesh_prims = [prim for prim in stage.TraverseAll() if prim.IsA(UsdGeom.Mesh)]
for prim in mesh_prims:
mesh = UsdGeom.Mesh(prim)
indices = prim.GetAttribute("faceVertexIndices")
points = prim.GetAttribute("points")
if not indices or not points:
continue # Skip if the required attributes are missing
points_arr = points.Get()
modified_points = [points_arr[i] for i in indices.Get()]
points.Set(modified_points)
indices.Set([i for i in range(len(indices.Get()))])
mesh.SetNormalsInterpolation(UsdGeom.Tokens.vertex)
primvar_api = UsdGeom.PrimvarsAPI(prim)
for var in primvar_api.GetPrimvars():
if var.GetInterpolation() == UsdGeom.Tokens.faceVarying:
var.SetInterpolation(UsdGeom.Tokens.vertex)
# Replace aliases with "float2[] primvars:st"
if var.GetName() in ALIASES:
new_name, new_type_name = ALIASES[var.GetName()]
new_var = primvar_api.GetPrimvar(new_name)
if new_var:
new_var.Set(var.Get())
else:
new_var = primvar_api.CreatePrimvar(new_name, new_type_name)
new_var.Set(var.Get())
new_var.SetInterpolation(UsdGeom.Tokens.vertex) # Set interpolation to vertex
primvar_api.RemovePrimvar(var.GetBaseName())
return stage
def process_folder(input_folder, output_folder, output_extension=None):
for file_name in os.listdir(input_folder):
input_file = os.path.join(input_folder, file_name)
if output_extension:
file_name = os.path.splitext(file_name)[0] + '.' + output_extension
output_file = os.path.join(output_folder, file_name)
if not os.path.isfile(input_file):
continue
shutil.copy(input_file, output_file) # Make a copy of the input file and rename it to the output file
stage = convert_face_varying_to_vertex_interpolation(output_file)
stage.Save() # Modify the output file in place
logging.info(f"Processed file: {input_file} -> {output_file}")
def main():
parser = argparse.ArgumentParser(description='Convert USD file formats and interpolation of meshes.')
parser.add_argument('input', type=str, help='Input file or folder path')
parser.add_argument('output', type=str, help='Output file or folder path')
parser.add_argument('-f', '--format', type=str, choices=['usd', 'usda'], help='Output file format (usd or usda)')
args = parser.parse_args()
input_path = args.input
output_path = args.output
output_extension = args.format
logging.basicConfig(level=logging.INFO, format='%(message)s')
if os.path.isdir(input_path):
process_folder(input_path, output_path, output_extension)
else:
if output_extension:
output_path = os.path.splitext(output_path)[0] + '.' + output_extension
shutil.copy(input_path, output_path) # Make a copy of the input file and rename it to the output file
stage = convert_face_varying_to_vertex_interpolation(output_path)
stage.Save() # Modify the output file in place
logging.info(f"Processed file: {input_path} -> {output_path}")
if __name__ == '__main__':
main()
| 3,853 | Python | 37.929293 | 117 | 0.637944 |
Kim2091/RTXRemixTools/RemixMeshConvert/README.md | ## RemixMeshConvert
$\color{#f7d26a}{\textsf{Use this instead. It integrates directly into Omniverse:}}$ https://github.com/Ekozmaster/NvidiaOmniverseRTXRemixTools
<details>
<summary>Old description:</summary>
*Based on a script originally written by E-man*
$\color{#f7d26a}{\textsf{Please back up your USD and USDA files before running!}}$
**How to use this script:**
To convert a single file:
`python RemixMeshConvert.py [input.usda] [output.usda]`
To batch convert a folder:
`python RemixMeshConvert.py path\to\input\folder path\to\output\folder -f [usd or usda]`
**Arguments:**
`-f` `--output-format` - This controls the output format when using the script in **batch** mode
**Description:**
This script takes USD files as input, makes a copy named as the output, converts the interpolation of all meshes in the given USD file from face-varying to vertex, and finally saves the modified stages to the new USD files. It can process a single file or a folder of files, and also includes a dictionary of aliases for replacing specific primvar names with `float2[] primvars:st1`.
**For your final exports to use in-game, please save as USD! USDA files are very inefficient in comparison**
Please refer to `requirements.txt` for necessary Python libraries.
</details>
| 1,289 | Markdown | 33.864864 | 383 | 0.757952 |
Kim2091/RTXRemixTools/RemixMeshConvert/For USD Composer/RemixMeshConvert_OV.py | from pxr import Usd, UsdGeom, Sdf
ALIASES = {
"primvars:UVMap": ("primvars:st", Sdf.ValueTypeNames.Float2Array),
"primvars:UVChannel_1": ("primvars:st1", Sdf.ValueTypeNames.Float2Array),
"primvars:map1": ("primvars:st1", Sdf.ValueTypeNames.Float2Array),
# Add more aliases here
}
def convert_face_varying_to_vertex_interpolation(stage):
mesh_prims = [prim for prim in stage.TraverseAll() if prim.IsA(UsdGeom.Mesh)]
for prim in mesh_prims:
mesh = UsdGeom.Mesh(prim)
indices = prim.GetAttribute("faceVertexIndices")
points = prim.GetAttribute("points")
if not indices or not points:
continue # Skip if the required attributes are missing
points_arr = points.Get()
modified_points = [points_arr[i] for i in indices.Get()]
points.Set(modified_points)
indices.Set([i for i in range(len(indices.Get()))])
mesh.SetNormalsInterpolation(UsdGeom.Tokens.vertex)
primvar_api = UsdGeom.PrimvarsAPI(prim)
for var in primvar_api.GetPrimvars():
if var.GetInterpolation() == UsdGeom.Tokens.faceVarying:
var.SetInterpolation(UsdGeom.Tokens.vertex)
# Replace aliases with "float2[] primvars:st"
if var.GetName() in ALIASES:
new_name, new_type_name = ALIASES[var.GetName()]
new_var = primvar_api.GetPrimvar(new_name)
if new_var:
new_var.Set(var.Get())
else:
new_var = primvar_api.CreatePrimvar(new_name, new_type_name)
new_var.Set(var.Get())
new_var.SetInterpolation(UsdGeom.Tokens.vertex) # Set interpolation to vertex
# Remove the old primvar directly from the UsdGeomPrimvar object
var.GetAttr().Block()
return stage
stage = omni.usd.get_context().get_stage()
convert_face_varying_to_vertex_interpolation(stage)
| 1,995 | Python | 38.137254 | 97 | 0.614035 |
Kim2091/RTXRemixTools/RemixMeshConvert/For USD Composer/README.md | ## RemixMeshConvert
*Based on a script originally written by E-man*
$\color{#f7d26a}{\textsf{Please back up your USD and USDA files before running!}}$
**How to use this script:**
* Install USD Composer: https://www.nvidia.com/en-us/omniverse/apps/create/
* Once launched, open the Script Editor in Window > Script Editor
* Load your mesh files by dragging it into the pane on the right
* Run the script
For more information, look at [this thread](https://discord.com/channels/1028444667789967381/1096847508002590760/1123306156773879928) in the [RTX Remix Showcase server](https://discord.gg/rtxremix)
**Description:**
The RemixMeshConvert_OV script is only for usage within Omniverse's USD Composer. If you want to process files and folders independently of Omniverse, use RemixMeshConvert in the directory above this one.
**For your final exports to use in-game, please save as USD! USDA files are very inefficient in comparison**
| 941 | Markdown | 46.099998 | 204 | 0.776833 |
gigwegbe/synthetic_data_with_nvidia_replicator_and_edge_impulse/objects_position_normal_90.py | import omni.replicator.core as rep
with rep.new_layer():
# Load in asset
local_path = "/home/george/Documents/synthetic_data_with_nvidia_replicator_and_edge_impulse/"
TABLE_USD = f"{local_path}/asset/Collected_EastRural_Table/EastRural_Table.usd"
SPOON_SMALL_USD = f"{local_path}/asset/Collected_Spoon_Small/Spoon_Small.usd"
SPOON_BIG_USD = f"{local_path}/asset/Collected_Spoon_Big/Spoon_Big.usd"
FORK_SMALL_USD = f"{local_path}/asset/Collected_Fork_Small/Fork_Small.usd"
FORK_BIG_USD = f"{local_path}/asset/Collected_Fork_Big/Fork_Big.usd"
KNIFE_USD = f"{local_path}/asset/Collected_Knife/Knife.usd"
# Camera paramters
cam_position = (46, 200, 25)
cam_position2 = (46, 120, 25)
cam_position_random = rep.distribution.uniform((0, 181, 0), (0, 300, 0))
cam_rotation = (-90, 0, 0)
focus_distance = 114
focus_distance2 = 39.1
focal_length = 27
focal_length2 = 18.5
f_stop = 1.8
f_stop2 = 1.8
focus_distance_random = rep.distribution.normal(500.0, 100)
# Cultery path
current_cultery = SPOON_SMALL_USD # Change the item here e.g KNIFE_USD
output_path = current_cultery.split(".")[0].split("/")[-1]
def rect_lights(num=1):
lights = rep.create.light(
light_type="rect",
temperature=rep.distribution.normal(6500, 500),
intensity=rep.distribution.normal(0, 5000),
position=(45, 110, 0),
rotation=(-90, 0, 0),
scale=rep.distribution.uniform(50, 100),
count=num
)
return lights.node
def dome_lights(num=3):
lights = rep.create.light(
light_type="dome",
temperature=rep.distribution.normal(6500, 500),
intensity=rep.distribution.normal(0, 1000),
position=(45, 120, 18),
rotation=(225, 0, 0),
count=num
)
return lights.node
def table():
table = rep.create.from_usd(TABLE_USD, semantics=[('class', 'table')])
with table:
rep.modify.pose(
position=(46, -0.0, 20),
rotation=(0, -90, -90),
)
return table
# Define randomizer function for CULTERY assets. This randomization includes placement and rotation of the assets on the surface.
def cutlery_props(size=15):
instances = rep.randomizer.instantiate(rep.utils.get_usd_files(
current_cultery), size=size, mode='point_instance')
with instances:
rep.modify.pose(
position=rep.distribution.uniform(
(0, 76.3651, 0), (90, 76.3651, 42)),
rotation=rep.distribution.uniform(
(-90, -180, 0), (-90, 180, 0)),
)
return instances.node
# Register randomization
rep.randomizer.register(table)
rep.randomizer.register(cutlery_props)
rep.randomizer.register(rect_lights)
rep.randomizer.register(dome_lights)
# Multiple setup cameras and attach it to render products
camera = rep.create.camera(focus_distance=focus_distance, focal_length=focal_length,
position=cam_position, rotation=cam_rotation, f_stop=f_stop)
camera2 = rep.create.camera(focus_distance=focus_distance2, focal_length=focal_length2,
position=cam_position2, rotation=cam_rotation, f_stop=f_stop)
# Will render 1024x1024 images and 512x512 images
render_product = rep.create.render_product(camera, (1024, 1024))
render_product2 = rep.create.render_product(camera2, (512, 512))
# Initialize and attach writer
writer = rep.WriterRegistry.get("BasicWriter")
writer.initialize(output_dir=f"{local_path}/data/normal/{output_path}",
rgb=True, bounding_box_2d_tight=False, semantic_segmentation=False)
writer.attach([render_product, render_product2])
with rep.trigger.on_frame(num_frames=50):
rep.randomizer.table()
rep.randomizer.rect_lights(1)
rep.randomizer.dome_lights(1)
rep.randomizer.cutlery_props(5)
# Run the simulation graph
rep.orchestrator.run()
| 4,170 | Python | 37.62037 | 133 | 0.621823 |
gigwegbe/synthetic_data_with_nvidia_replicator_and_edge_impulse/README.md | ---
description: Come learn how to generate photorealistic images in Nvidia Replicator and build object detection model using Edge Impulse.
---
# The Unreasonable Effectiveness of Synthetic Data
Created By:
[George Igwegbe](https://www.linkedin.com/in/george-igwegbe/)
Public Project Link:
[GitHub](https://github.com/gigwegbe/synthetic_data_with_nvidia_replicator_and_edge_impulse) | [Edge Impulse](https://studio.edgeimpulse.com/public/187851/latest)

## Introduction
Building an object detection model can be tricky since it requires a large dataset. Sometimes, data can be few or not diverse enough to train a robust model. Synthetic data offers an alternative to generating well-represented datasets to build a quality model. By applying domain randomization, we developed photorealistic datasets, trained a neural network, and validated the model using real datasets. To create a diverse dataset, we created a variety of simulated environments with randomized properties: changing lighting conditions, camera position, and material textures. We also show that synthetic, randomized datasets can help generalize a model to adapt to the real-world environment.
## Story
We wanted to replicate the [object detection](https://www.youtube.com/watch?v=Vwv0PJPeC4s) work by Louis Moreau, but this time using synthetic data rather than real data. The project aims to demonstrate how to build and deploy the Edge Impulse object detection model using synthetic datasets generated by Nvidia Omniverse Replicator. The Replicator is an Nvidia Omniverse extension that provides means of generating physically accurate synthetic data.
## Why Synthetic Data?
Computer vision tasks such as classification, object detection, and segmentation require a large-scale dataset. Data collected from some real-world applications tend to be narrow and less diverse, often collected from a single environment, and sometimes is unchanged and stays the same for the most time. In addition, data collected from a single field tend to have fewer examples of tail-end scenarios and rare events, and we cannot easily replicate these situations in the real world.
Andrej Karpathy's presentation - (source: Tesla AI Day, 2021) |
--- |
 |
Consequently, models trained in a single domain are brittle and often fail when deployed in another environment; thus, it requires another training cycle to adapt to the new environment. It raises the question, how can we efficiently and cheaply collect generalized data across several domains? A simple unreasonable effective solution is Domain Randomization, which varies the texture and colour of the foreground object, the background image, the number of lights in the scene, the pose of the lights, and the camera position etc. Domain randomization can further improve the variability in the texture of synthetic data of rare events generated in the simulator.
> The purpose of domain randomization is to provide enough simulated variability at training time such that at test time the model is able to generalize to real-world data.” - Tobin et al, Domain Randomization for Transferring Deep Neural Networks from Simulation to the Real World, 2017
Domain Randomization for Transferring Deep Neural Networks - source: Tobin et al, 2017) |
--- |
 |
Nvidia Replicator enables us to perform Domain Randomization. The Replicator is one module within the Omniverse family, and it offers tools and workflow to generate data for various computer vision and non-visual tasks. The Replicator is a highly interoperable tool that integrates with over 40+ modelling/rendering applications across different verticals. The seamless integration is possible thanks to Pixar's Universal Scene Description(USD), which serves as a protocol for various applications such as Blender, 3DMax, Maya, Revit, C4D etc., to work with the Nvidia Replicator.
## Data-Centric Workflow
Traditional machine learning workflow is often model-centric, focusing more on the model's development by iteratively improving the algorithm design, etc. In this project, we chose the Data-centric approach, where we fixed the model and iteratively improved the quality of the generated dataset. This approach is more robust since we know our model is as good as the dataset. This method hence systematically changes the dataset performance on an AI task. At its core, it is thinking about ML in terms of data, not the model.
Data generation and model building workflow |
--- |
 |
## Requirements
- Nvidia Omniverse Replicator
- Edge Impulse Studio
- Logitech Webcam HD Pro - C920
### Hardware and Driver Setup
Nvidia Omniverse Replicator is a computation-intensive application requiring a moderate-size GPU and decent RAM. My hardware setup consists of 32GB RAM, 1TB storage space and 8GB GPU with an Intel i9 processor.
Hardware Specification | Hardware Specification
--- | ---
 | 
The application can run on both Windows and Linux operating systems. For this experiment, we used Ubuntu 20.04 LTS distro, given Ubuntu 18.04 is no longer supported by Nvidia Omniverse as of November 2022. In addition, we selected the appropriate Nvidia driver, v510.108.03 and installed it on a Linux machine.
Software Specification | Software Specification
--- | ---
 | 
## Experiment Setup and Data Generation
The environment for the experiment consists of movable and immovable objects (dynamic and static positioning objects). The immovable object consists of Lights, a Table and two Cameras. At the same time, the movable objects are the cutlery which is a spoon, fork and knife. We will use domain randomization to alter the properties of some of the movable and immovable objects. Assets which include objects and scenes are represented in the Replicator as USD.
Experimental Setup |
--- |
 |
Every object in Omniverse Replicator is represented as USD. A 3D model file with varying extensions such as obj, fbx, and glif can be imported into the Replicator using Nvidia Omniverse's CAD Importer extension. The extension converts the 3D files into USD. We imported our assets (Table, knife, spoon and fork) into the simulator by specifying the path of the assets.
Rectangular Light | Dome Light
--- | --- |
 | 
Lightning plays a crucial role in data generation. There are different built-in lighting types in the Nvidia replicator. We choose two rectangular lights and a dome light since they give us better lighting options and capabilities for generating photorealistic images. The rectangular light emulates light generated from a panel, and the dome light lets you dynamically lighten the entire scene. We randomized some light parameters such as temperature and intensity, and both parameters were sampled from a <strong>normal distribution</strong>. In addition, the scale parameter was sampled from a <strong>uniform distribution</strong> while keeping the rotation and position of the lights fixed.
```python
# Lightning setup for Rectangular light and Dome light
def rect_lights(num=2):
lights = rep.create.light(
light_type="rect",
temperature=rep.distribution.normal(6500, 500),
intensity=rep.distribution.normal(0, 5000),
position=(-131,150,-134),
rotation=(-90,0,0),
scale=rep.distribution.uniform(50, 100),
count=num
)
return lights.node
def dome_lights(num=1):
lights = rep.create.light(
light_type="dome",
temperature=rep.distribution.normal(6500, 500),
intensity=rep.distribution.normal(0, 1000),
position=(0,0,0),
rotation=(270,0,0),
count=num
)
return lights.node
```
We fixed the position and rotation, selected the tabletop materials, chose an additional <strong>Mahogany</strong> material, and alternated the material in the data generation process.
```python
# Import and position the table object
def table():
table = rep.create.from_usd(TABLE_USD, semantics=[('class', 'table')])
with table:
rep.modify.pose(
position=(-135.39745, 0, -140.25696),
rotation=(0,-90,-90),
)
return table
```
To improve our dataset's quality further, we chose two cameras of different resolutions, which we strategically positioned in various locations within the scene. In addition, we varied the position of the cameras in a different version of the data generation process.
```python
# Multiple setup cameras and attach it to render products
camera = rep.create.camera(focus_distance=focus_distance, focal_length=focal_length, position=cam_position, rotation=cam_rotation, f_stop=f_stop)
camera2 = rep.create.camera(focus_distance=focus_distance2, focal_length=focal_length2, position=cam_position2, rotation=cam_rotation, f_stop=f_stop)
# Will render 1024x1024 images and 512x512 images
render_product = rep.create.render_product(camera1, (1024, 1024))
render_product2 = rep.create.render_product(camera2, (512, 512))
```
Finally, for the movable objects, which include a knife, spoon and fork, we ensure that these objects can only translate within the bound of the table. So we chose a bounding position where the objects were expected to translate and rotate with the table. We sampled position and rotation from a uniform distribution while maintaining the number of movable objects generated at each iteration to be five.
```python
# Define randomizer function for CULTERY assets.
def cutlery_props(size=5):
instances = rep.randomizer.instantiate(rep.utils.get_usd_files(current_cultery), size=size, mode='point_instance')
with instances:
rep.modify.pose(
position=rep.distribution.uniform((-212, 76.2, -187), (-62, 76.2, -94)),
rotation=rep.distribution.uniform((-90,-180, 0), (-90, 180, 0)),
)
return instances.node
```
At this juncture, we have instantiated all objects in our scene. We can now run the randomizer to generate 50 images at each synthetic generation cycle.
```python
# Register randomization
with rep.trigger.on_frame(num_frames=50):
rep.randomizer.table()
rep.randomizer.rect_lights(1)
rep.randomizer.dome_lights(1)
rep.randomizer.cutlery_props(5)
# Run the simulation graph
rep.orchestrator.run()
```
To ensure we generated photorealistic images, we switched to <strong>RTXinteractive(Path Tracing)</strong> mode, which gave high-fidelity renderings.
Data generation process |
--- |
 |
## Data Distribution and Model Building
Data Distribution of different items |
--- |
 |
Following the data-centric philosophy, We generated three versions of the dataset. The first version, <strong>V1</strong>, consists of generated images normal to the camera position, and <strong>V2</strong> represents images generated at an angle of 60 degrees to the camera position with a mahogany table top. <strong>V3</strong> comprises images normal to the camera position while the cutlery were suspended in space.
V1 - Normal to the object |
--- |
 |
<table>
<tr>
<td>V2 - Angled to the object</td>
<td>V3 - Normal to the object and object suspended in space</td>
</tr>
<tr>
<td valign="top"><img src="media_assets/v2.avif"></td>
<td valign="top"><img src="media_assets/v3.avif"></td>
</tr>
</table>
<table>
<tr>
<td>Generated Dataset - V2</td>
<td>Generated Dataset - V3</td>
</tr>
<tr>
<td valign="top"><img src="media_assets/generated_dataset.avif"></td>
<td valign="top"><img src="media_assets/generated_dataset2.avif"></td>
</tr>
</table>
## Edge Impulse: Data Annotation and Model Building
<table>
<tr>
<td>Data Labeler </td>
<td>Data Annotation</td>
</tr>
<tr>
<td><img src="media_assets/annotating_image.png"></td>
<td><img src="media_assets/image_in_queue.png"></td>
</tr>
</table>
We uploaded the generated images to Edge Impulse Studio, where we annotated the dataset into different classes. We carefully annotated each dataset version and trained using the <strong>Yolov5</strong> object detection model. We tried a couple of input sizes ranging from 320, 512 and 1024 pixels before settling with <strong>320</strong>. Edge Impulse provided an excellent version control system for models, which enabled us to track model performance across different dataset versions and hyperparameters.
<table>
<tr>
<td>Create Impulse</td>
<td>Generate Feature </td>
</tr>
<tr>
<td><img src="media_assets/building_model.png"></td>
<td><img src="media_assets/feature_extraction.png"></td>
</tr>
</table>
Version Control in Edge Impulse |
--- |
 |
### Testing of Object Detection Models with Real Objects
We used the Edge Impulse CLI tool to evaluate the model's accuracy by downloading, building and running the model locally. A Logitech C920 webcam streamed the live video of objects on a table from 50 cm to 80 cm from the camera. The position of the camera remains fixed during the experiment. The clips below show that the trained model does not generalize well to real-world objects. Thus we needed to improve the model by uploading, annotating and training the model with the V2 dataset.
V1 failure - model failed to identify objects |
--- |
 |
We observed improved model performance when trained with the V2 dataset. The model could identify various objects distinctly, although the model failed when we changed the objects' orientations. Thus, we trained the model with the remaining V3 dataset to mitigate these issues and increase other hyperparameters, such as epochs from 500 to 2000. We also tested the performance of our object detector on real objects with different background textures, and the model performed well in these conditions.
V2 success - model can identify objects |
--- |
 |
V2 failure - model failed to identify objects in different orientations |
--- |
 |
After several cycles of iterating over various hyperparameters, we got a model that generalizes well across different orientations.
V3 success - model can identify objects in different orientations |
--- |
 |
V3 success - model can identify different materials |
--- |
 |
The core idea behind the data-centric approach to solving ML problems is to create more data around the failure points of the model. We improved the model by iteratively improving the data generation, especially in areas where the model had previously failed.

## Conclusion
In this work, we learned how the domain randomization approach helps generate quality and well-generalized datasets for the object detection task. We also demonstrated the effectiveness of data-centric machine learning workflow in improving the model performance. Although this work is restricted to visual problems, we can extend domain randomization to other sensors such as lidar, accelerometer, and ultrasonic sensors.
## Reference
- [Project on Edge Impulse](https://studio.edgeimpulse.com/public/187851/latest)
- [Introduction to Replicator](https://docs.omniverse.nvidia.com/app_code/prod_extensions/ext_replicator.html)
- [Introduction to USD](https://developer.nvidia.com/usd#usdnvidia)
- [Telsa AI Day](https://youtu.be/j0z4FweCy4M?t=5727)
- [Domain Randomization for Transferring Deep Neural Networks](https://arxiv.org/pdf/1703.06907.pdf)
- [Understanding Domain Randomization for SIM-TO-REAL Transfer](https://arxiv.org/pdf/2110.03239.pdf)
| 16,016 | Markdown | 55.597173 | 695 | 0.763986 |
gigwegbe/synthetic_data_with_nvidia_replicator_and_edge_impulse/old_setting/README_old.md |
### Synthetic data with Nvidia replicator and Edge Impulse

- Fixed position
- Fixed Camera but not random
- Fixed Lightning and light parameters
- Changed background materials | 228 | Markdown | 24.444442 | 60 | 0.767544 |
gigwegbe/synthetic_data_with_nvidia_replicator_and_edge_impulse/old_setting/objects_position_normal_90.py | import omni.replicator.core as rep
with rep.new_layer():
# Load in asset
local_path = "/home/george/Documents/synthetic_data_with_nvidia_replicator_and_edge_impulse/"
TABLE_USD =f"{local_path}/asset/Collected_EastRural_Table/EastRural_Table.usd"
SPOON_SMALL_USD = f"{local_path}/asset/Collected_Spoon_Small/Spoon_Small.usd"
SPOON_BIG_USD = f"{local_path}/asset/Collected_Spoon_Big/Spoon_Big.usd"
FORK_SMALL_USD = f"{local_path}/asset/Collected_Fork_Small/Fork_Small.usd"
FORK_BIG_USD = f"{local_path}/asset/Collected_Fork_Big/Fork_Big.usd"
KNIFE_USD = f"{local_path}/asset/Collected_Knife/Knife.usd"
# Camera paramters
cam_position = (-131,200,-134)
cam_position2 = (-131,120,-134)
cam_position_random = rep.distribution.uniform((0,181,0), (0, 300, 0))
cam_rotation = (-90,0,0) #(-45,0,0)
focus_distance = 120
focus_distance2 = 72
focal_length = 19.1
focal_length2 = 7.5
f_stop = 1.8
f_stop2 = 1.8
focus_distance_random = rep.distribution.normal(500.0, 100)
# Cultery path
current_cultery = SPOON_SMALL_USD # Change the item here e.g KNIFE_USD
output_path = current_cultery.split(".")[0].split("/")[-1]
def rect_lights(num=2):
lights = rep.create.light(
light_type="rect",
temperature=rep.distribution.normal(6500, 500),
intensity=rep.distribution.normal(0, 5000),
position=(-131,150,-134),
rotation=(-90,0,0),
scale=rep.distribution.uniform(50, 100),
count=num
)
return lights.node
def dome_lights(num=1):
lights = rep.create.light(
light_type="dome",
temperature=rep.distribution.normal(6500, 500),
intensity=rep.distribution.normal(0, 1000),
position=(0,0,0),
rotation=(270,0,0),
count=num
)
return lights.node
def table():
table = rep.create.from_usd(TABLE_USD, semantics=[('class', 'table')])
with table:
rep.modify.pose(
position=(-135.39745, 0, -140.25696),
rotation=(0,-90,-90),
)
return table
# Define randomizer function for CULTERY assets. This randomization includes placement and rotation of the assets on the surface.
def cutlery_props(size=15):
instances = rep.randomizer.instantiate(rep.utils.get_usd_files(current_cultery), size=size, mode='point_instance')
with instances:
rep.modify.pose(
position=rep.distribution.uniform((-212, 76.2, -187), (-62, 76.2, -94)),
rotation=rep.distribution.uniform((-90,-180, 0), (-90, 180, 0)),
)
return instances.node
# Register randomization
rep.randomizer.register(table)
rep.randomizer.register(cutlery_props)
rep.randomizer.register(rect_lights)
rep.randomizer.register(dome_lights)
# Multiple setup cameras and attach it to render products
camera = rep.create.camera(focus_distance=focus_distance, focal_length=focal_length, position=cam_position, rotation=cam_rotation, f_stop=f_stop)
camera2 = rep.create.camera(focus_distance=focus_distance2, focal_length=focal_length2, position=cam_position2, rotation=cam_rotation, f_stop=f_stop)
# Will render 1024x1024 images and 512x512 images
render_product = rep.create.render_product(camera, (1024, 1024))
render_product2 = rep.create.render_product(camera2, (512, 512))
# Initialize and attach writer
writer = rep.WriterRegistry.get("BasicWriter")
writer.initialize(output_dir=f"{local_path}/data/normal/{output_path}", rgb=True, bounding_box_2d_tight=False, semantic_segmentation=False)
writer.attach([render_product, render_product2])
with rep.trigger.on_frame(num_frames=50):
rep.randomizer.table()
rep.randomizer.rect_lights(1)
rep.randomizer.dome_lights(1)
rep.randomizer.cutlery_props(15)
# Run the simulation graph
rep.orchestrator.run()
| 4,065 | Python | 38.096153 | 153 | 0.64182 |
mati-nvidia/window-menu-add/exts/maticodes.example.window.add/maticodes/example/window/add/extension.py | import carb
import omni.ext
import omni.kit.ui
from .window import MyCustomWindow, WINDOW_TITLE
class WindowMenuAddExtension(omni.ext.IExt):
def on_startup(self, ext_id):
carb.log_info("[maticodes.example.window.add] WindowMenuAddExtension startup")
# Note the "Window" part of the path that directs the new menu item to the "Window" menu.
self._menu_path = f"Window/{WINDOW_TITLE}"
self._window = None
self._menu = omni.kit.ui.get_editor_menu().add_item(self._menu_path, self._on_menu_click, True)
def on_shutdown(self):
carb.log_info("[maticodes.example.window.add] WindowMenuAddExtension shutdown")
omni.kit.ui.get_editor_menu().remove_item(self._menu)
if self._window is not None:
self._window.destroy()
self._window = None
def _on_menu_click(self, menu, toggled):
"""Handles showing and hiding the window from the 'Windows' menu."""
if toggled:
if self._window is None:
self._window = MyCustomWindow(WINDOW_TITLE, self._menu_path)
else:
self._window.show()
else:
if self._window is not None:
self._window.hide()
| 1,232 | Python | 34.22857 | 103 | 0.621753 |
mati-nvidia/window-menu-add/exts/maticodes.example.window.add/maticodes/example/window/add/window.py | import omni.kit.ui
import omni.ui as ui
WINDOW_TITLE = "My Custom Window"
class MyCustomWindow(ui.Window):
def __init__(self, title, menu_path):
super().__init__(title, width=640, height=480)
self._menu_path = menu_path
self.set_visibility_changed_fn(self._on_visibility_changed)
self._build_ui()
def on_shutdown(self):
self._win = None
def show(self):
self.visible = True
self.focus()
def hide(self):
self.visible = False
def _build_ui(self):
with self.frame:
with ui.VStack():
ui.Label("This is just an empty window", width=0, alignment=ui.Alignment.CENTER)
def _on_visibility_changed(self, visible):
omni.kit.ui.get_editor_menu().set_value(self._menu_path, visible)
| 809 | Python | 25.129031 | 96 | 0.606922 |
mati-nvidia/window-menu-add/exts/maticodes.example.window.add/docs/README.md | # Window Menu Add
An example extension showing how to create a window and add it to the `Window` menu so that it can be shown and hidden
using the menu item in the `Window` menu. | 179 | Markdown | 43.999989 | 118 | 0.765363 |
mati-nvidia/developer-office-hours/RUNBOOK.md | 1. Run make_ext.bat <YYYY-MM-DD>
1. Put script into the `scripts/` folder.
1. Add questions covered in the ext README. | 118 | Markdown | 38.666654 | 43 | 0.728814 |
mati-nvidia/developer-office-hours/tools/scripts/csv2md.py | # SPDX-License-Identifier: Apache-2.0
import argparse
import csv
from pathlib import Path
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Convert DOH CSV to MD")
parser.add_argument(
"csvfile", help="The CSV file to convert"
)
args = parser.parse_args()
csvfile = Path(args.csvfile)
mdfile = csvfile.with_suffix(".md")
rows = []
with open(csvfile) as f:
with open(mdfile, "w") as out:
for row in csv.reader(f):
if row[2] and not row[5]:
out.write(f"1. [{row[1]}]({row[2]})\n")
print("Success!")
| 654 | Python | 20.833333 | 73 | 0.54893 |
mati-nvidia/developer-office-hours/tools/scripts/make_ext.py | # SPDX-License-Identifier: Apache-2.0
import argparse
import shutil
import os
from pathlib import Path
SOURCE_PATH = Path(__file__).parent / "template" / "maticodes.doh_YYYY_MM_DD"
def text_replace(filepath, tokens_map):
with open(filepath, "r") as f:
data = f.read()
for token, fstring in tokens_map.items():
data = data.replace(token, fstring)
with open(filepath, "w") as f:
f.write(data)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create folder link to Kit App installed from Omniverse Launcher")
parser.add_argument(
"date", help="The dates of the Office Hour in YYYY-MM-DD format."
)
args = parser.parse_args()
year, month, day = args.date.split("-")
# copy files
dest_path = Path(__file__).parent / "../.." / f"exts/maticodes.doh_{year}_{month}_{day}"
shutil.copytree(SOURCE_PATH, dest_path)
# rename folders
template_ext_folder = dest_path / "maticodes" / "doh_YYYY_MM_DD"
ext_folder = dest_path / "maticodes" / f"doh_{year}_{month}_{day}"
os.rename(template_ext_folder, ext_folder)
tokens_map = {
"[DATE_HYPHEN]": f"{year}-{month}-{day}",
"[DATE_UNDERSCORE]": f"{year}_{month}_{day}",
"[DATE_PRETTY]": f"{month}/{day}/{year}",
}
# text replace extension.toml
ext_toml = dest_path / "config" / "extension.toml"
text_replace(ext_toml, tokens_map)
# text replace README
readme = dest_path / "docs" / "README.md"
text_replace(readme, tokens_map)
# text replace extension.py
ext_py = ext_folder / "extension.py"
text_replace(ext_py, tokens_map)
print("Success!")
| 1,695 | Python | 28.241379 | 115 | 0.614159 |
mati-nvidia/developer-office-hours/tools/scripts/template/maticodes.doh_YYYY_MM_DD/maticodes/doh_YYYY_MM_DD/extension.py | # SPDX-License-Identifier: Apache-2.0
import carb
import omni.ext
import omni.ui as ui
class MyWindow(ui.Window):
def __init__(self, title: str = None, **kwargs):
super().__init__(title, **kwargs)
self.frame.set_build_fn(self._build_window)
def _build_window(self):
with ui.ScrollingFrame():
with ui.VStack(height=0):
ui.Label("My Label")
def clicked():
carb.log_info("Button Clicked!")
ui.Button("Click Me", clicked_fn=clicked)
class MyExtension(omni.ext.IExt):
def on_startup(self, ext_id):
carb.log_info("[maticodes.doh_[DATE_UNDERSCORE]] Dev Office Hours Extension ([DATE_HYPHEN]) startup")
self._window = MyWindow("MyWindow", width=300, height=300)
def on_shutdown(self):
carb.log_info("[maticodes.doh_[DATE_UNDERSCORE]] Dev Office Hours Extension ([DATE_HYPHEN]) shutdown")
if self._window:
self._window.destroy()
self._window = None
| 1,025 | Python | 29.17647 | 110 | 0.599024 |
mati-nvidia/developer-office-hours/tools/scripts/template/maticodes.doh_YYYY_MM_DD/scripts/my_script.py | # SPDX-License-Identifier: Apache-2.0 | 37 | Python | 36.999963 | 37 | 0.783784 |
mati-nvidia/developer-office-hours/tools/scripts/template/maticodes.doh_YYYY_MM_DD/docs/README.md | # Developer Office Hour - [DATE_PRETTY]
This is the sample code from the Developer Office Hour held on [DATE_PRETTY], Mati answered some developer questions
from the NVIDIA Omniverse forums regarding Kit, Omniverse Code, Python, and USD.
## Questions
- How do I do something?
...
| 282 | Markdown | 34.374996 | 117 | 0.762411 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_01_06/scripts/query_script_components.py | # SPDX-License-Identifier: Apache-2.0
from pxr import Sdf
import omni.usd
stage = omni.usd.get_context().get_stage()
prim = stage.GetPrimAtPath("/World/Cube")
attr = prim.GetAttribute("omni:scripting:scripts")
if attr.IsValid():
scripts = attr.Get()
if scripts:
for script in scripts:
print(script) | 327 | Python | 26.333331 | 50 | 0.685015 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_01_06/scripts/add_script_component.py | # SPDX-License-Identifier: Apache-2.0
import omni.kit.commands
from pxr import Sdf
import omni.usd
omni.kit.commands.execute('ApplyScriptingAPICommand',
paths=[Sdf.Path('/World/Cube')])
omni.kit.commands.execute('RefreshScriptingPropertyWindowCommand')
stage = omni.usd.get_context().get_stage()
prim = stage.GetPrimAtPath("/World/Cube")
attr = prim.GetAttribute("omni:scripting:scripts")
scripts = attr.Get()
attr.Set([r"C:\Users\mcodesal\Downloads\new_script2.py"])
# attr.Set(["omniverse://localhost/Users/mcodesal/new_script2.py"])
omni.kit.commands.execute('ChangeProperty',
prop_path=Sdf.Path('/World/Cube.omni:scripting:scripts'),
value=Sdf.AssetPathArray(1, (Sdf.AssetPath('C:\\mcodesal\\Downloads\\new_script2.py'))),
prev=None)
attr = prim.GetAttribute("omni:scripting:scripts")
scripts = list(attr.Get())
scripts.append(r"C:\mcodesal\Downloads\new_script.py")
attr.Set(scripts)
| 899 | Python | 31.142856 | 89 | 0.757508 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_01_06/scripts/interact_script_components.py | # SPDX-License-Identifier: Apache-2.0
import carb.events
import omni.kit.app
MY_CUSTOM_EVENT = carb.events.type_from_string("omni.my.extension.MY_CUSTOM_EVENT")
bus = omni.kit.app.get_app().get_message_bus_event_stream()
bus.push(MY_CUSTOM_EVENT, payload={"prim_path": "/World/Cube", "x": 1}) | 295 | Python | 31.888885 | 83 | 0.728814 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_01_06/docs/README.md | # Developer Office Hour - 01/06/2023
This is the sample code from the Developer Office Hour held on 01/06/2023, Mati answered some developer questions
from the NVIDIA Omniverse forums regarding Kit, Omniverse Code, Python, and USD.
## Questions
- 3:47 - How do you add an extension via the Extension Manager?
- 8:48 - How do you programmatically add a Python Script to a prim? (Python Scripting Component)
- 23:45 - How do you check what Python scripts a prim has? (Python Scripting Component)
- 27:00 - How can you have your extension interact with a Python Scripting Component?
| 583 | Markdown | 52.090904 | 114 | 0.768439 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_04_21/scripts/pass_arg_to_callback.py | # SPDX-License-Identifier: Apache-2.0
from functools import partial
import omni.ui as ui
def do_something(p1, p2):
print(f"Hello {p1} {p2}")
window = ui.Window("My Window", width=300, height=300)
with window.frame:
ui.Button("Click Me", clicked_fn=partial(do_something, "a", "b")) | 290 | Python | 25.454543 | 69 | 0.696552 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_04_21/scripts/create_mdl_mtl.py | # SPDX-License-Identifier: Apache-2.0
import omni.kit.commands
omni.kit.commands.execute('CreateAndBindMdlMaterialFromLibrary',
mdl_name='OmniPBR.mdl',
mtl_name='OmniPBR',
mtl_created_list=None) | 198 | Python | 27.428568 | 64 | 0.787879 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_04_21/scripts/lock_prim.py | # SPDX-License-Identifier: Apache-2.0
import omni.kit.commands
from pxr import Sdf
omni.kit.commands.execute('LockSpecs',
spec_paths=['/Desk'],
hierarchy=False) | 164 | Python | 19.624998 | 38 | 0.756098 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_04_21/docs/README.md | # Developer Office Hour - 04/21/2023
This is the sample code from the Developer Office Hour held on 04/21/2023, Mati answered some developer questions
from the NVIDIA Omniverse forums regarding Kit, Omniverse Code, Python, and USD.
## Questions
- 2:02 - How do I change the label of an Action Graph UI button?
- 17:29 - How do I style and Action Graph UI Button?
- 30:36 - How do I pass extra parameters to a button's clicked_fn callback?
- 40:00 - How do I programmatically Lock Selected for a prim?
- 44:08 - How do I create an MDL material on a stage?
| 558 | Markdown | 49.818177 | 114 | 0.74552 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_04_14/scripts/toggle_fullscreen.py | # SPDX-License-Identifier: Apache-2.0
import omni.kit.actions.core
action_registry = omni.kit.actions.core.get_action_registry()
action = action_registry.get_action("omni.kit.ui.editor_menu_bridge", "action_editor_menu_bridge_window_fullscreen_mode")
action.execute() | 269 | Python | 37.571423 | 121 | 0.784387 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_04_14/scripts/move_prim_forward.py | # SPDX-License-Identifier: Apache-2.0
from pxr import Gf, UsdGeom, Usd
import omni.usd
stage = omni.usd.get_context().get_stage()
prim = stage.GetPrimAtPath("/World/Camera")
xform = UsdGeom.Xformable(prim)
local_transformation: Gf.Matrix4d = xform.GetLocalTransformation()
# Apply the local matrix to the start and end points of the camera's default forward vector (-Z)
a: Gf.Vec4d = Gf.Vec4d(0,0,0,1) * local_transformation
b: Gf.Vec4d = Gf.Vec4d(0,0,-1,1) * local_transformation
# Get the vector between those two points to get the camera's current forward vector
cam_fwd_vec = b-a
# Convert to Vec3 and then normalize to get unit vector
cam_fwd_unit_vec = Gf.Vec3d(cam_fwd_vec[:3]).GetNormalized()
# Multiply the forward direction vector with how far forward you want to move
forward_step = cam_fwd_unit_vec * 100
# Create a new matrix with the translation that you want to perform
offset_mat = Gf.Matrix4d()
offset_mat.SetTranslate(forward_step)
# Apply the translation to the current local transform
new_transform = local_transformation * offset_mat
# Extract the new translation
translate: Gf.Vec3d = new_transform.ExtractTranslation()
# Update the attribute
prim.GetAttribute("xformOp:translate").Set(translate) | 1,220 | Python | 44.222221 | 96 | 0.769672 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_04_14/scripts/execute_action.py | # SPDX-License-Identifier: Apache-2.0
import omni.kit.actions.core
action_registry = omni.kit.actions.core.get_action_registry()
action = action_registry.get_action("ext_id", "action_id")
action.execute() | 206 | Python | 28.571424 | 61 | 0.762136 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_04_14/docs/README.md | # Developer Office Hour - 04/14/2023
This is the sample code from the Developer Office Hour held on 04/14/2023, Mati answered some developer questions
from the NVIDIA Omniverse forums regarding Kit, Omniverse Code, Python, and USD.
## Questions
- How do I find and execute a Kit Action?
- How do I programatically toggle fullscreen mode?
- How do I move a prim in the forward direction? | 388 | Markdown | 47.624994 | 114 | 0.775773 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_01_13/scripts/add_script_component.py | # SPDX-License-Identifier: Apache-2.0
import omni.kit.commands
from pxr import Sdf
import omni.usd
# Create the Python Scripting Component property
omni.kit.commands.execute('ApplyScriptingAPICommand',
paths=[Sdf.Path('/World/Cube')])
omni.kit.commands.execute('RefreshScriptingPropertyWindowCommand')
# Add your script to the property
stage = omni.usd.get_context().get_stage()
prim = stage.GetPrimAtPath("/World/Cube")
attr = prim.GetAttribute("omni:scripting:scripts")
scripts = attr.Get()
# Property with no script paths returns None
if scripts is None:
scripts = []
else:
# Property with scripts paths returns VtArray.
# Convert to list to make it easier to work with.
scripts = list(scripts)
scripts.append(r"C:\Users\mcodesal\Downloads\new_script.py")
attr.Set(scripts)
| 785 | Python | 29.230768 | 66 | 0.769427 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_01_13/docs/README.md | # Developer Office Hour - 01/13/2023
This is the sample code from the Developer Office Hour held on 01/13/2023, Mati answered some developer questions
from the NVIDIA Omniverse forums regarding Kit, Omniverse Code, Python, and USD.
## Questions
- 3:46 - How do you programmatically add a Python Script to a prim? (Python Scripting Component)
- 8:02 - What are the criteria for valid prim and property names in USD?
- 11:14 - How can I swap a prim's material using Action Graph? | 479 | Markdown | 58.999993 | 114 | 0.76618 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_06_23/scripts/select_prims.py | # SPDX-License-Identifier: Apache-2.0
import omni.usd
stage = omni.usd.get_context().get_stage()
ctx = omni.usd.get_context()
selection: omni.usd.Selection = ctx.get_selection()
selection.set_selected_prim_paths(["/World/Cube", "/World/Sphere"], False)
import omni.kit.commands
import omni.usd
ctx = omni.usd.get_context()
selection: omni.usd.Selection = ctx.get_selection()
omni.kit.commands.execute('SelectPrimsCommand',
old_selected_paths=selection.get_selected_prim_paths(),
new_selected_paths=["/World/Cone"],
expand_in_stage=True)
| 547 | Python | 23.90909 | 74 | 0.744058 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_06_23/docs/README.md | # Developer Office Hour - 06/23/2023
This is the sample code from the Developer Office Hour held on 06/23/2023, Mati answered some developer questions
from the NVIDIA Omniverse forums regarding Kit, Omniverse Code, Python, and USD.
## Questions
- 03:50 - How do programmatically select a prim?
- 16:00 - How do I reset the settings and preferences for a Kit app?
| 365 | Markdown | 44.749994 | 114 | 0.767123 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_01_27/scripts/sub_child_changes.py | # SPDX-License-Identifier: Apache-2.0
# UsdWatcher
from pxr import Sdf, Tf, Usd
import omni.usd
stage = omni.usd.get_context().get_stage()
def changed_paths(notice, stage):
print("Change fired")
for p in notice.GetChangedInfoOnlyPaths():
if str(p).startswith("/World/Parent" + "/"):
print("Something happened to a descendent of /World/Parent")
print(p)
for p in notice.GetResyncedPaths():
if str(p).startswith("/World/Parent" + "/"):
print("A descendent of /World/Parent was added or removed")
print(p)
objects_changed = Tf.Notice.Register(Usd.Notice.ObjectsChanged, changed_paths, None)
objects_changed.Revoke()
| 696 | Python | 26.879999 | 84 | 0.656609 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_01_27/docs/README.md | # Developer Office Hour - 01/27/2023
This is the sample code from the Developer Office Hour held on 01/27/2023, Mati answered some developer questions
from the NVIDIA Omniverse forums regarding Kit, Omniverse Code, Python, and USD.
## Questions
- 3:14 - How do I listen for changes to the children of a certain prim?
- 15:00 - How do I rotate a prim using Action Graph?
| 372 | Markdown | 45.624994 | 114 | 0.760753 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_07_22/maticodes/doh_2022_07_22/extension.py | # SPDX-License-Identifier: Apache-2.0
import omni.ext
import omni.ui as ui
from omni.kit.widget.searchable_combobox import build_searchable_combo_widget
class MyWindow(ui.Window):
def __init__(self, title: str = None, delegate=None, **kwargs):
super().__init__(title, **kwargs)
self.frame.set_build_fn(self._build_window)
def _build_window(self):
with ui.ScrollingFrame():
with ui.VStack(height=0):
ui.Label("My Label")
self.proj_scale_model = ui.SimpleFloatModel()
self.proj_scale_model_sub = (
self.proj_scale_model.subscribe_value_changed_fn(
self.slider_value_changed
)
)
ui.FloatSlider(model=self.proj_scale_model, min=0, max=100)
def do_rebuild():
self.frame.rebuild()
ui.Button("Rebuild", clicked_fn=do_rebuild)
def clicked():
# Example showing how to retreive the value from the model.
print(
f"Button Clicked! Slider Value: {self.proj_scale_model.as_float}"
)
self.proj_scale_model.set_value(1.0)
ui.Button("Set Slider", clicked_fn=clicked)
def on_combo_click_fn(model):
component = model.get_value_as_string()
print(f"{component} selected")
component_list = ["Synthetic Data", "USD", "Kit", "UX", "UX / UI"]
component_index = -1
self._component_combo = build_searchable_combo_widget(
component_list,
component_index,
on_combo_click_fn,
widget_height=18,
default_value="Kit",
)
def slider_value_changed(self, model):
# Example showing how to get the value when it changes.
print("Slider Value:", model.as_float)
def destroy(self) -> None:
del self.proj_scale_model_sub
return super().destroy()
class MyExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
print(
"[maticodes.doh_2022_07_22] Dev Office Hours Extension (2022-07-22) startup"
)
self._window = MyWindow("MyWindow", width=300, height=300)
def on_shutdown(self):
print(
"[maticodes.doh_2022_07_22] Dev Office Hours Extension (2022-07-22) shutdown"
)
if self._window:
self._window.destroy()
self._window = None
| 2,804 | Python | 34.961538 | 119 | 0.542083 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_07_22/maticodes/doh_2022_07_22/__init__.py | # SPDX-License-Identifier: Apache-2.0
from .extension import *
| 63 | Python | 20.333327 | 37 | 0.761905 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_07_22/scripts/cmds_more_params.py | # SPDX-License-Identifier: Apache-2.0
import omni.kit.commands
from pxr import Gf, Usd
omni.kit.commands.execute('SetAnimCurveKey',
paths=['/World/toy_drummer.xformOp:translate'],
value=Gf.Vec3d(0.0, 0.0, 18))
omni.kit.commands.execute('SetAnimCurveKey',
paths=['/World/toy_drummer.xformOp:translate'],
value=Gf.Vec3d(0.0, 0.0, 24),
time=Usd.TimeCode(72))
| 365 | Python | 23.399998 | 48 | 0.731507 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_07_22/scripts/set_current_time.py | # SPDX-License-Identifier: Apache-2.0
# https://docs.omniverse.nvidia.com/py/kit/source/extensions/omni.timeline/docs/index.html
import omni.timeline
timeline = omni.timeline.get_timeline_interface()
# set in using seconds
timeline.set_current_time(1)
# set using frame number
fps = timeline.get_time_codes_per_seconds()
timeline.set_current_time(48 / fps)
| 360 | Python | 26.769229 | 90 | 0.775 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_07_22/scripts/reference_usdz.py | # SPDX-License-Identifier: Apache-2.0
import omni.kit.commands
from pxr import Sdf
import omni.usd
omni.kit.commands.execute('CreateReference',
path_to=Sdf.Path('/World/toy_drummer2'),
asset_path='C:/Users/mcodesal/Downloads/toy_drummer.usdz',
usd_context=omni.usd.get_context())
| 287 | Python | 21.153845 | 59 | 0.759582 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_07_22/scripts/run_action_graph.py | # SPDX-License-Identifier: Apache-2.0
# https://docs.omniverse.nvidia.com/py/kit/source/extensions/omni.graph/docs/index.html
import omni.graph.core as og
keys = og.Controller.Keys
og.Controller.edit("/World/ActionGraph", { keys.SET_VALUES: ("/World/ActionGraph/on_impulse_event.state:enableImpulse", True) })
| 313 | Python | 33.888885 | 128 | 0.763578 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_07_22/docs/README.md | # Developer Office Hour - 07/22/2022
This is the sample code from the Developer Office Hour held on 07/22/2022, Mati answered some developer questions
from the NVIDIA Omniverse forums regarding Kit, Omniverse Code, Python, and USD.
## Questions
- How do I reference a USDZ file?
- How do I look up all of the parameters for a Kit Command?
- How do I set the current frame using Python?
- How do I execute an Action Graph using Python?
- How do I align radio buttons horizontally?
- How do I refresh a UI window?
- How do I customize the appearance of a slider?
| 563 | Markdown | 42.384612 | 114 | 0.760213 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_10_14/maticodes/doh_2022_10_14/extension.py | # SPDX-License-Identifier: Apache-2.0
import carb
import omni.ext
import omni.ui as ui
class MyWindow(ui.Window):
def __init__(self, title: str = None, **kwargs):
super().__init__(title, **kwargs)
self.frame.set_build_fn(self._build_window)
def _build_window(self):
def hello():
print("Hello")
ui.Button("Click Me", clicked_fn=hello)
class MyExtension(omni.ext.IExt):
def on_startup(self, ext_id):
carb.log_info("[maticodes.doh_2022_10_14] Dev Office Hours Extension (2022-10-14) startup")
self._window = MyWindow("MyWindow", width=300, height=300)
def on_shutdown(self):
carb.log_info("[maticodes.doh_2022_10_14] Dev Office Hours Extension (2022-10-14) shutdown")
if self._window:
self._window.destroy()
self._window = None
| 848 | Python | 28.275861 | 100 | 0.621462 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_10_14/scripts/context_menu_inject.py | # SPDX-License-Identifier: Apache-2.0
import omni.kit.context_menu
def show_menu(objects):
print("show it?")
return True
def hello_world(objects):
print(f"Hello Objects: {objects}")
menu_item_config = {
"name": "Hello World!",
"glyph": "menu_search.svg",
"show_fn": [show_menu],
"onclick_fn": hello_world,
}
# You must keep a reference to the menu item. Set this variable to None to remove the item from the menu
hello_world_menu_item = omni.kit.context_menu.add_menu(menu_item_config, "MENU", "omni.kit.window.viewport")
hello_world_menu_item = None | 583 | Python | 26.809523 | 108 | 0.687822 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_10_14/docs/README.md | # Developer Office Hour - 10/14/2022
This is the sample code from the Developer Office Hour held on 10/14/2022, Mati answered some developer questions
from the NVIDIA Omniverse forums regarding Kit, Omniverse Code, Python, and USD.
## Questions
- Why doesn't OV Code start?
- How do I inject my custom menu item into an existing context menu?
| 345 | Markdown | 42.249995 | 114 | 0.773913 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_08_25/scripts/add_sbsar.py | # SPDX-License-Identifier: Apache-2.0
import omni.kit.commands
omni.kit.commands.execute('AddSbsarReferenceAndBindCommand', sbsar_path=r"C:\Users\mcodesal\Downloads\blueberry_skin.sbsar",
target_prim_path="/World/Sphere")
| 252 | Python | 35.142852 | 124 | 0.710317 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_08_25/scripts/get_selected_meshes.py | # SPDX-License-Identifier: Apache-2.0
import omni.usd
from pxr import Usd, UsdGeom
stage = omni.usd.get_context().get_stage()
selection = omni.usd.get_context().get_selection().get_selected_prim_paths()
meshes = []
for path in selection:
prim = stage.GetPrimAtPath(path)
if prim.IsA(UsdGeom.Mesh):
meshes.append(prim)
print("Selected meshes:")
print(meshes) | 377 | Python | 22.624999 | 76 | 0.713528 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_08_25/docs/README.md | # Developer Office Hour - 08/25/2023
This is the sample code from the Developer Office Hour held on 08/25/2023, Mati answered some developer questions
from the NVIDIA Omniverse forums regarding Kit, Omniverse Code, Python, and USD.
## Questions
- 02:17 - How do I get just the mesh prims from my current selection?
- 08:26 - How do I programmatically add an SBSAR material to my USD Stage?
| 392 | Markdown | 48.124994 | 114 | 0.767857 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_04_28/scripts/toggle_hud.py | # SPDX-License-Identifier: Apache-2.0
import omni.kit.viewport.utility as okvu
okvu.toggle_global_visibility() | 112 | Python | 21.599996 | 40 | 0.794643 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_04_28/scripts/custom_attrs.py |
# SPDX-License-Identifier: Apache-2.0
# Docs: https://docs.omniverse.nvidia.com/prod_kit/prod_kit/programmer_ref/usd/properties/create-attribute.html
import omni.usd
from pxr import Usd, Sdf
stage = omni.usd.get_context().get_stage()
prim: Usd.Prim = stage.GetPrimAtPath("/World/Cylinder")
attr: Usd.Attribute = prim.CreateAttribute("mySecondAttr", Sdf.ValueTypeNames.Bool)
attr.Set(False)
| 394 | Python | 29.384613 | 111 | 0.769036 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_04_28/scripts/custom_global_data.py | # SPDX-License-Identifier: Apache-2.0
import omni.usd
stage = omni.usd.get_context().get_stage()
layer = stage.GetRootLayer()
print(type(layer))
layer.SetCustomLayerData({"Hello": "World"})
stage.DefinePrim("/World/Hello", "HelloWorld")
stage.DefinePrim("/World/MyTypeless") | 278 | Python | 22.249998 | 46 | 0.741007 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_04_28/scripts/make_unselectable.py | # SPDX-License-Identifier: Apache-2.0
# Docs: https://docs.omniverse.nvidia.com/kit/docs/omni.usd/latest/omni.usd/omni.usd.UsdContext.html#omni.usd.UsdContext.set_pickable
import omni.usd
ctx = omni.usd.get_context()
ctx.set_pickable("/", True) | 247 | Python | 29.999996 | 133 | 0.757085 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_04_28/scripts/change_viewport_camera.py | # SPDX-License-Identifier: Apache-2.0
import omni.kit.viewport.utility as vu
from pxr import Sdf
vp_api = vu.get_active_viewport()
vp_api.camera_path = "/World/Camera_01"
| 173 | Python | 20.749997 | 39 | 0.745665 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_04_28/docs/README.md | # Developer Office Hour - 04/28/2023
This is the sample code from the Developer Office Hour held on 04/28/2023, Mati answered some developer questions
from the NVIDIA Omniverse forums regarding Kit, Omniverse Code, Python, and USD.
## Questions
- 02:18 - How do I make a prim unselectable in the viewport?
- 07:50 - How do I create custom properties?
- 26:00 - Can I write custom components for prims link in Unity?
- 50:21 - How can I toggle the stats heads up display on the viewport?
- 66:00 - How can you switch cameras on the active viewport?
| 550 | Markdown | 49.090905 | 114 | 0.754545 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_09_01/scripts/logging.py | # SPDX-License-Identifier: Apache-2.0
import logging
import carb
logger = logging.getLogger()
print("Hello")
carb.log_info("World")
logger.info("Omniverse")
| 164 | Python | 9.999999 | 37 | 0.72561 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_09_01/scripts/xforming.py | # SPDX-License-Identifier: Apache-2.0
from pxr import UsdGeom
import omni.usd
stage = omni.usd.get_context().get_stage()
cube = stage.GetPrimAtPath("/World/Xform/Cube")
cube_xformable = UsdGeom.Xformable(cube)
transform = cube_xformable.GetLocalTransformation()
print(transform)
transform2 = cube_xformable.GetLocalTransformation()
print(transform2)
| 355 | Python | 21.249999 | 52 | 0.785915 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_09_01/docs/README.md | # Developer Office Hour - 09/01/2023
This is the sample code from the Developer Office Hour held on 09/01/2023, Mati answered some developer questions
from the NVIDIA Omniverse forums regarding Kit, Omniverse Code, Python, and USD.
## Questions
- 7:27 - How do I validate my assets to make sure they are up-to-date with the latest USD specification?
| 352 | Markdown | 49.428564 | 114 | 0.775568 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_10_28/scripts/usd_watcher.py | # SPDX-License-Identifier: Apache-2.0
import omni.usd
from pxr import Gf
stage = omni.usd.get_context().get_stage()
cube = stage.GetPrimAtPath("/World/Cube")
def print_size(changed_path):
print("Size Changed:", changed_path)
def print_pos(changed_path):
print(changed_path)
if changed_path.IsPrimPath():
prim_path = changed_path
else:
prim_path = changed_path.GetPrimPath()
prim = stage.GetPrimAtPath(prim_path)
local_transform = omni.usd.get_local_transform_SRT(prim)
print("Translation: ", local_transform[3])
def print_world_pos(changed_path):
world_transform: Gf.Matrix4d = omni.usd.get_world_transform_matrix(prim)
translation: Gf.Vec3d = world_transform.ExtractTranslation()
print(translation)
size_attr = cube.GetAttribute("size")
cube_sub = omni.usd.get_watcher().subscribe_to_change_info_path(cube.GetPath(), print_world_pos)
cube_size_sub = omni.usd.get_watcher().subscribe_to_change_info_path(size_attr.GetPath(), print_size)
cube_sub = None
cube_size_sub = None | 1,040 | Python | 31.531249 | 101 | 0.714423 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_10_28/scripts/extras.py | # SPDX-License-Identifier: Apache-2.0
import omni.usd
stage = omni.usd.get_context().get_stage()
prim = stage.GetPrimAtPath("/World/Cube")
if prim:
print("Prim Exists")
from pxr import UsdGeom
# e.g., find all prims of type UsdGeom.Mesh
mesh_prims = [x for x in stage.Traverse() if x.IsA(UsdGeom.Mesh)]
mesh_prims = []
for x in stage.Traverse():
if x.IsA(UsdGeom.Mesh):
mesh_prims.append(x)
print(mesh_prims) | 432 | Python | 23.055554 | 65 | 0.685185 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_10_28/scripts/docking.py | # SPDX-License-Identifier: Apache-2.0
import omni.ui as ui
my_window = ui.Window("Example Window", width=300, height=300)
with my_window.frame:
with ui.VStack():
f = ui.FloatField()
def clicked(f=f):
print("clicked")
f.model.set_value(f.model.get_value_as_float() + 1)
ui.Button("Plus One", clicked_fn=clicked)
my_window.dock_in_window("Property", ui.DockPosition.SAME)
ui.dock_window_in_window(my_window.title, "Property", ui.DockPosition.RIGHT, 0.2)
my_window.deferred_dock_in("Content", ui.DockPolicy.TARGET_WINDOW_IS_ACTIVE)
| 548 | Python | 26.449999 | 81 | 0.717153 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_10_28/docs/README.md | # Developer Office Hour - 10/28/2022
This is the sample code from the Developer Office Hour held on 10/28/2022, Mati answered some developer questions
from the NVIDIA Omniverse forums regarding Kit, Omniverse Code, Python, and USD.
## Questions
- How do I dock my window?
- How do I subscribe to changes to a prim or attribute? (omni.usd.get_watcher())
| 355 | Markdown | 43.499995 | 114 | 0.76338 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_09_23/scripts/combobox_selected_item.py | # SPDX-License-Identifier: Apache-2.0
import omni.ui as ui
my_window = ui.Window("Example Window", width=300, height=300)
combo_sub = None
options = ["One", "Two", "Three"]
with my_window.frame:
with ui.VStack():
combo_model: ui.AbstractItemModel = ui.ComboBox(0, *options).model
def combo_changed(item_model: ui.AbstractItemModel, item: ui.AbstractItem):
value_model = item_model.get_item_value_model(item)
current_index = value_model.as_int
option = options[current_index]
print(f"Selected '{option}' at index {current_index}.")
combo_sub = combo_model.subscribe_item_changed_fn(combo_changed)
def clicked():
value_model = combo_model.get_item_value_model()
current_index = value_model.as_int
option = options[current_index]
print(f"Button Clicked! Selected '{option}' at index {current_index}.")
ui.Button("Print Combo Selection", clicked_fn=clicked)
| 1,009 | Python | 35.071427 | 83 | 0.634291 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_09_23/scripts/use_tokens.py | # SPDX-License-Identifier: Apache-2.0
# https://docs.omniverse.nvidia.com/py/kit/docs/guide/tokens.html
import carb.tokens
from pathlib import Path
path = Path("${shared_documents}") / "maticodes.foo"
resolved_path = carb.tokens.get_tokens_interface().resolve(str(path))
print(resolved_path) | 293 | Python | 31.666663 | 69 | 0.761092 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_09_23/scripts/simple_instancer.py | # SPDX-License-Identifier: Apache-2.0
import omni.usd
from pxr import Usd, UsdGeom, Sdf, Gf
stage: Usd.Stage = omni.usd.get_context().get_stage()
prim_path = Sdf.Path("/World/MyInstancer")
instancer: UsdGeom.PointInstancer = UsdGeom.PointInstancer.Define(stage, prim_path)
proto_container = UsdGeom.Scope.Define(stage, prim_path.AppendPath("Prototypes"))
shapes = []
shapes.append(UsdGeom.Cube.Define(stage, proto_container.GetPath().AppendPath("Cube")))
shapes.append(UsdGeom.Sphere.Define(stage, proto_container.GetPath().AppendPath("Sphere")))
shapes.append(UsdGeom.Cone.Define(stage, proto_container.GetPath().AppendPath("Cone")))
instancer.CreatePositionsAttr([Gf.Vec3f(0, 0, 0), Gf.Vec3f(2, 0, 0), Gf.Vec3f(4, 0, 0)])
instancer.CreatePrototypesRel().SetTargets([shape.GetPath() for shape in shapes])
instancer.CreateProtoIndicesAttr([0, 1, 2]) | 852 | Python | 49.176468 | 91 | 0.761737 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_09_23/scripts/one_widget_in_container.py | # SPDX-License-Identifier: Apache-2.0
import omni.ui as ui
my_window = ui.Window("Example Window", width=300, height=300)
with my_window.frame:
with ui.VStack():
with ui.CollapsableFrame():
with ui.VStack():
ui.FloatField()
ui.FloatField()
ui.Button("Button 1") | 328 | Python | 24.30769 | 62 | 0.588415 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_09_23/docs/README.md | # Developer Office Hour - 09/23/2022
This is the sample code from the Developer Office Hour held on 09/23/2022, Mati answered some developer questions
from the NVIDIA Omniverse forums regarding Kit, Omniverse Code, Python, and USD.
## Questions
- How do I query the selected item in a ui.ComboBox?
- How do I use Kit tokens?
- Why do I only see one widget in my UI container?
| 378 | Markdown | 41.111107 | 114 | 0.761905 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_09_09/maticodes/doh_2022_09_09/extension.py | # SPDX-License-Identifier: Apache-2.0
import carb
import omni.ext
import omni.kit.commands
import omni.ui as ui
import omni.usd
from pxr import Gf, Sdf
# Check out: USDColorModel
# C:\ext_projects\omni-dev-office-hours\app\kit\exts\omni.example.ui\omni\example\ui\scripts\colorwidget_doc.py
class MyWindow(ui.Window):
def __init__(self, title: str = None, **kwargs):
super().__init__(title, **kwargs)
self._color_model = None
self._color_changed_subs = []
self._path_model = None
self._change_info_path_subscription = None
self._path_changed_sub = None
self._stage = omni.usd.get_context().get_stage()
self.frame.set_build_fn(self._build_window)
def _build_window(self):
with ui.ScrollingFrame():
with ui.VStack(height=0):
ui.Label("Property Path")
self._path_model = ui.StringField().model
self._path_changed_sub = self._path_model.subscribe_value_changed_fn(
self._on_path_changed
)
ui.Label("Color")
with ui.HStack(spacing=5):
self._color_model = ui.ColorWidget(width=0, height=0).model
for item in self._color_model.get_item_children():
component = self._color_model.get_item_value_model(item)
self._color_changed_subs.append(component.subscribe_value_changed_fn(self._on_color_changed))
ui.FloatField(component)
def _on_mtl_attr_changed(self, path):
color_attr = self._stage.GetAttributeAtPath(path)
color_model_items = self._color_model.get_item_children()
if color_attr:
color = color_attr.Get()
for i in range(len(color)):
component = self._color_model.get_item_value_model(color_model_items[i])
component.set_value(color[i])
def _on_path_changed(self, model):
if Sdf.Path.IsValidPathString(model.as_string):
attr_path = Sdf.Path(model.as_string)
color_attr = self._stage.GetAttributeAtPath(attr_path)
if color_attr:
self._change_info_path_subscription = omni.usd.get_watcher().subscribe_to_change_info_path(
attr_path,
self._on_mtl_attr_changed
)
def _on_color_changed(self, model):
values = []
for item in self._color_model.get_item_children():
component = self._color_model.get_item_value_model(item)
values.append(component.as_float)
if Sdf.Path.IsValidPathString(self._path_model.as_string):
attr_path = Sdf.Path(self._path_model.as_string)
color_attr = self._stage.GetAttributeAtPath(attr_path)
if color_attr:
color_attr.Set(Gf.Vec3f(*values[0:3]))
def destroy(self) -> None:
self._change_info_path_subscription = None
self._color_changed_subs = None
self._path_changed_sub = None
return super().destroy()
class MyExtension(omni.ext.IExt):
def on_startup(self, ext_id):
carb.log_info("[maticodes.doh_2022_09_09] Dev Office Hours Extension (2022-09-09) startup")
self._window = MyWindow("MyWindow", width=300, height=300)
def on_shutdown(self):
carb.log_info("[maticodes.doh_2022_09_09] Dev Office Hours Extension (2022-09-09) shutdown")
if self._window:
self._window.destroy()
self._window = None
| 3,584 | Python | 39.738636 | 117 | 0.588728 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_09_09/scripts/undo_group.py | # SPDX-License-Identifier: Apache-2.0
import omni.kit.commands
import omni.kit.undo
# Requires Ctrl+Z twice to undo
omni.kit.commands.execute('CreateMeshPrimWithDefaultXform',
prim_type='Cube')
omni.kit.commands.execute('CreateMeshPrimWithDefaultXform',
prim_type='Cube')
# Grouped into one undo
with omni.kit.undo.group():
omni.kit.commands.execute('CreateMeshPrimWithDefaultXform',
prim_type='Cube')
omni.kit.commands.execute('CreateMeshPrimWithDefaultXform',
prim_type='Cube') | 506 | Python | 27.166665 | 63 | 0.76087 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_09_09/scripts/skip_undo_history.py | # SPDX-License-Identifier: Apache-2.0
import omni.kit.commands
import omni.kit.primitive.mesh as mesh_cmds
# Creates history
omni.kit.commands.execute('CreateMeshPrimWithDefaultXform',
prim_type='Cube')
# Doesn't create history
mesh_cmds.CreateMeshPrimWithDefaultXformCommand("Cube").do()
| 293 | Python | 23.499998 | 60 | 0.798635 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_09_09/docs/README.md | # Developer Office Hour - 09/09/2022
This is the sample code from the Developer Office Hour held on 09/09/2022, Mati answered some developer questions
from the NVIDIA Omniverse forums regarding Kit, Omniverse Code, Python, and USD.
## Questions
- How do I connect a ColorWidget to the base color of a material at a specific path?
- How do I create an undo group?
- How do I avoid adding a command to the undo history?
| 420 | Markdown | 45.777773 | 114 | 0.769048 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_08_18/maticodes/doh_2023_08_18/extension.py | # SPDX-License-Identifier: Apache-2.0
import asyncio
from pathlib import Path
import carb
import omni.ext
import omni.ui as ui
import omni.usd
test_stage_path = Path(__file__).parent.parent.parent / "data" / "test_stage.usd"
class MyWindow(ui.Window):
def __init__(self, title: str = None, **kwargs):
super().__init__(title, **kwargs)
self.frame.set_build_fn(self._build_window)
def _build_window(self):
with ui.ScrollingFrame():
with ui.VStack(height=0):
ui.Label("My Label")
def clicked():
carb.log_info(test_stage_path)
# Synch option
# omni.usd.get_context().open_stage(str(test_stage_path))
# Async Option
# asyncio.ensure_future(self.open_stage())
# Async with Callback
omni.usd.get_context().open_stage_with_callback(str(test_stage_path), self.on_stage_open_finished)
ui.Button("Click Me", clicked_fn=clicked)
async def open_stage(self):
(result, error) = await omni.usd.get_context().open_stage_async(str(test_stage_path))
#Now that we've waited for the scene to open, we should be able to get the stage
stage = omni.usd.get_context().get_stage()
print (f"opened stage {stage} with result {result}")
def on_stage_open_finished(self, result: bool, path: str):
stage = omni.usd.get_context().get_stage()
print (f"opened stage {stage} with result {result}")
class MyExtension(omni.ext.IExt):
def on_startup(self, ext_id):
carb.log_info("[maticodes.doh_2023_08_18] Dev Office Hours Extension (2023-08-18) startup")
self._window = MyWindow("MyWindow", width=300, height=300)
def on_shutdown(self):
carb.log_info("[maticodes.doh_2023_08_18] Dev Office Hours Extension (2023-08-18) shutdown")
if self._window:
self._window.destroy()
self._window = None
| 2,025 | Python | 33.931034 | 118 | 0.599506 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_08_18/docs/README.md | # Developer Office Hour - 08/18/2023
This is the sample code from the Developer Office Hour held on 08/18/2023, Mati answered some developer questions
from the NVIDIA Omniverse forums regarding Kit, Omniverse Code, Python, and USD.
## Questions
- 02:16 - How do I programmatically open a Stage in Kit?
| 304 | Markdown | 42.571422 | 114 | 0.773026 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_08_26/maticodes/doh_2022_08_26/extension.py | # SPDX-License-Identifier: Apache-2.0
import carb
import omni.ext
class MyExtension(omni.ext.IExt):
def on_startup(self, ext_id):
carb.log_info("[maticodes.doh_2022_08_26] Dev Office Hours Extension (2022-08-26) startup")
def on_shutdown(self):
carb.log_info("[maticodes.doh_2022_08_26] Dev Office Hours Extension (2022-08-26) shutdown")
| 366 | Python | 27.230767 | 100 | 0.699454 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_08_26/maticodes/doh_2022_08_26/math.py | # SPDX-License-Identifier: Apache-2.0
def add(a, b):
return a + b | 71 | Python | 16.999996 | 37 | 0.633803 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_08_26/scripts/get_world_pos.py | # SPDX-License-Identifier: Apache-2.0
import omni.usd
from pxr import Usd, Gf
stage = omni.usd.get_context().get_stage()
prim = stage.GetPrimAtPath("/World/Xform/Cube")
matrix:Gf.Matrix4d = omni.usd.get_world_transform_matrix(prim, time_code=Usd.TimeCode.Default())
translation = matrix.ExtractTranslation()
print(translation) | 328 | Python | 31.899997 | 96 | 0.771341 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_08_26/scripts/mouse_cursor_shape.py | # SPDX-License-Identifier: Apache-2.0
####################################
# MUST ENABLE omni.kit.window.cursor
####################################
import carb
import omni.kit.window.cursor
cursor = omni.kit.window.cursor.get_main_window_cursor()
# OPTIONS:
# carb.windowing.CursorStandardShape.ARROW
# carb.windowing.CursorStandardShape.IBEAM
# carb.windowing.CursorStandardShape.VERTICAL_RESIZE
# carb.windowing.CursorStandardShape.HORIZONTAL_RESIZE
# carb.windowing.CursorStandardShape.HAND
# carb.windowing.CursorStandardShape.CROSSHAIR
cursor.override_cursor_shape(carb.windowing.CursorStandardShape.CROSSHAIR)
# clear back to arrow
cursor.clear_overridden_cursor_shape() | 681 | Python | 31.476189 | 74 | 0.73862 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_08_26/scripts/refresh_combobox.py | # SPDX-License-Identifier: Apache-2.0
import omni.ui as ui
def next_num(n):
while True:
yield n
n += 1
my_window = ui.Window("Example Window", width=300, height=300)
item_changed_sub = None
with my_window.frame:
with ui.VStack():
combo = ui.ComboBox(0, "Option1", "Option2", "Option3")
# I'm just using this to generate unique data
nums = next_num(0)
def clicked():
# clear the list
items = combo.model.get_item_children()
for item in items:
combo.model.remove_item(item)
# generate a new list
for x in range(5):
combo.model.append_child_item(None, ui.SimpleIntModel(next(nums)))
ui.Button("Regenerate Combo", clicked_fn=clicked) | 803 | Python | 27.714285 | 82 | 0.572852 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_08_26/scripts/focus_prim.py | # SPDX-License-Identifier: Apache-2.0
#######################################
# MUST ENABLE omni.kit.viewport.utility
#######################################
from omni.kit.viewport.utility import get_active_viewport, frame_viewport_selection
import omni.usd
# Select what you want to focus on
selection = omni.usd.get_selection()
selection.set_selected_prim_paths(["/World/Cube"], True)
# focus on selection
active_viewport = get_active_viewport()
if active_viewport:
frame_viewport_selection(active_viewport) | 517 | Python | 29.470587 | 83 | 0.65764 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_08_26/scripts/corner_rounding.py | # SPDX-License-Identifier: Apache-2.0
import omni.ui as ui
my_window = ui.Window("Example Window", width=300, height=300)
with my_window.frame:
with ui.VStack():
ui.Rectangle(style={
"background_color": ui.color(1.0, 0.0, 0.0),
"border_radius":20.0
})
ui.Rectangle(style={
"background_color": ui.color(0.0, 1.0, 0.0),
"border_radius":20.0,
"corner_flag": ui.CornerFlag.BOTTOM
})
ui.Rectangle(style={
"background_color": ui.color(0.0, 0.0, 1.0),
"border_radius":20.0,
"corner_flag": ui.CornerFlag.TOP
})
ui.Rectangle(style={
"background_color": ui.color(1.0, 0.0, 1.0),
"border_radius":20.0,
"corner_flag": ui.CornerFlag.TOP_RIGHT
}) | 834 | Python | 31.115383 | 62 | 0.526379 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_08_26/docs/README.md | # Developer Office Hour - 08/26/2022
This is the sample code from the Developer Office Hour held on 08/26/2022, Mati answered some developer questions
from the NVIDIA Omniverse forums regarding Kit, Omniverse Code, Python, and USD.
## Questions
- How do I execute arbitrary code from VSCode in Omniverse?
- How do I create omni.ui.Rectangle with only top or bottom rounded corners?
- How do I update a list of items in a ui.ComboBox without rebuilding the widget?
- How do I change the shape of the mouse cursor?
- How do I get the world position of a prim?
- What are the conventions for naming extensions?
- How do I add a custom window in the Window menu?
- How do I share Python code between extensions?
| 710 | Markdown | 49.785711 | 114 | 0.770423 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_05_26/scripts/get_references.py | # SPDX-License-Identifier: Apache-2.0
import omni.usd
stage = omni.usd.get_context().get_stage()
prim = stage.GetPrimAtPath("/World/Xform")
refs = prim.GetMetadata("references").ApplyOperations([])
for ref in refs:
print(ref.primPath)
print(ref.assetPath)
| 260 | Python | 22.727271 | 57 | 0.742308 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_05_26/docs/README.md | # Developer Office Hour - 05/26/2023
This is the sample code from the Developer Office Hour held on 05/26/2023, Mati answered some developer questions
from the NVIDIA Omniverse forums regarding Kit, Omniverse Code, Python, and USD.
## Questions
- 02:29 - How do I get an Omniverse app to pick up a new environment variable?
| 326 | Markdown | 45.714279 | 114 | 0.773006 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_04_07/scripts/current_frame_number.py | # SPDX-License-Identifier: Apache-2.0
import carb
import omni.timeline
itimeline = omni.timeline.get_timeline_interface()
current_seconds = itimeline.get_current_time()
fps = itimeline.get_time_codes_per_seconds()
current_frame = current_seconds * fps
print(f"Current Frame: {current_frame}") | 294 | Python | 28.499997 | 50 | 0.77551 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_04_07/scripts/get_local_transform.py | # SPDX-License-Identifier: Apache-2.0
from pxr import Gf, UsdGeom, Usd
import omni.usd
def decompose_matrix(mat: Gf.Matrix4d):
reversed_ident_mtx = reversed(Gf.Matrix3d())
translate = mat.ExtractTranslation()
scale = Gf.Vec3d(*(v.GetLength() for v in mat.ExtractRotationMatrix()))
#must remove scaling from mtx before calculating rotations
mat.Orthonormalize()
#without reversed this seems to return angles in ZYX order
rotate = Gf.Vec3d(*reversed(mat.ExtractRotation().Decompose(*reversed_ident_mtx)))
return translate, rotate, scale
stage = omni.usd.get_context().get_stage()
prim = stage.GetPrimAtPath("/World/Cube")
xform = UsdGeom.Xformable(prim)
local_transformation: Gf.Matrix4d = xform.GetLocalTransformation()
print(decompose_matrix(local_transformation))
def get_local_rot(prim: Usd.Prim):
return prim.GetAttribute("xformOp:rotateXYZ").Get()
print(get_local_rot(prim))
| 922 | Python | 31.964285 | 86 | 0.744035 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_04_07/scripts/sceneui_behavior.py | # SPDX-License-Identifier: Apache-2.0
from omni.kit.viewport.utility import get_active_viewport_window
import omni.ui as ui
from omni.kit.scripting import BehaviorScript
from omni.ui import scene as sc
from omni.ui import color as cl
class NewScript(BehaviorScript):
def on_init(self):
print(f"{__class__.__name__}.on_init()->{self.prim_path}")
self.viewport_window = get_active_viewport_window()
self.frame = self.viewport_window.get_frame("Super Duper Cool!")
def on_destroy(self):
print(f"{__class__.__name__}.on_destroy()->{self.prim_path}")
def test(self):
print("Hello")
def on_play(self):
print(f"{__class__.__name__}.on_play()->{self.prim_path}")
with self.frame:
# Create a default SceneView (it has a default camera-model)
self._scene_view = sc.SceneView()
# Add the manipulator into the SceneView's scene
with self._scene_view.scene:
sc.Rectangle(5, 5, thickness=2, wireframe=False, color=cl.red)
# Register the SceneView with the Viewport to get projection and view updates
self.viewport_window.viewport_api.add_scene_view(self._scene_view)
def on_pause(self):
print(f"{__class__.__name__}.on_pause()->{self.prim_path}")
def on_stop(self):
print(f"{__class__.__name__}.on_stop()->{self.prim_path}")
if self._scene_view:
# Empty the SceneView of any elements it may have
self._scene_view.scene.clear()
# Be a good citizen, and un-register the SceneView from Viewport updates
if self.viewport_window:
self.viewport_window.viewport_api.remove_scene_view(self._scene_view)
# Remove our references to these objects
self._scene_view = None
self.frame.destroy()
def on_update(self, current_time: float, delta_time: float):
# print(f"{__class__.__name__}.on_update(current_time={current_time}, delta_time={delta_time})->{self.prim_path}")
pass
| 2,069 | Python | 36.636363 | 122 | 0.618173 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_04_07/docs/README.md | # Developer Office Hour - 04/07/2023
This is the sample code from the Developer Office Hour held on 04/07/2023, Mati answered some developer questions
from the NVIDIA Omniverse forums regarding Kit, Omniverse Code, Python, and USD.
## Questions
- Where can I find extension API documentation?
- How do I get the current frame number?
| 336 | Markdown | 41.124995 | 114 | 0.779762 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_05_05/scripts/omnilive.py | # SPDX-License-Identifier: Apache-2.0
from omni.kit.usd.layers import get_live_syncing
import omni.client
import asyncio
UNIQUE_SESSION_NAME = "my_unique_session"
# Get the interface
live_syncing = get_live_syncing()
# Create a session
live_session = live_syncing.create_live_session(UNIQUE_SESSION_NAME)
# Simulate joining a session
for session in live_syncing.get_all_live_sessions():
if session.name == UNIQUE_SESSION_NAME:
live_syncing.join_live_session(session)
break
# Merge changes into base layer and disconnect from live session
loop = asyncio.get_event_loop()
loop.create_task(live_syncing.merge_and_stop_live_session_async(layer_identifier=session.base_layer_identifier))
# Disconnect from live session without merging. When you reconnect, changes will still be in your live session.
live_syncing.stop_live_session(session.base_layer_identifier)
# Delete the session once you're all done. You can add a callback for the second arg to know if completed.
omni.client.delete_with_callback(session.url, lambda: None) | 1,051 | Python | 35.275861 | 112 | 0.7745 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_05_05/scripts/change_renderer.py | # SPDX-License-Identifier: Apache-2.0
# RTX Path Tracing
import omni.kit.actions.core
action_registry = omni.kit.actions.core.get_action_registry()
action = action_registry.get_action("omni.kit.viewport.actions", "set_renderer_rtx_pathtracing")
action.execute()
# RTX Real-Time
import omni.kit.actions.core
action_registry = omni.kit.actions.core.get_action_registry()
action = action_registry.get_action("omni.kit.viewport.actions", "set_renderer_rtx_realtime")
action.execute()
# Storm
import omni.kit.actions.core
action_registry = omni.kit.actions.core.get_action_registry()
action = action_registry.get_action("omni.kit.viewport.actions", "set_renderer_pxr_storm")
action.execute()
# Iray
import omni.kit.actions.core
action_registry = omni.kit.actions.core.get_action_registry()
action = action_registry.get_action("omni.kit.viewport.actions", "set_renderer_iray")
action.execute() | 891 | Python | 34.679999 | 96 | 0.776655 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_05_05/scripts/change_variants.py | # SPDX-License-Identifier: Apache-2.0
from pxr import Usd, UsdGeom
import omni.usd
stage = omni.usd.get_context().get_stage()
world_prim = stage.GetPrimAtPath("/World")
vset = world_prim.GetVariantSets().AddVariantSet('shapes')
vset.AddVariant('cube')
vset.AddVariant('sphere')
vset.AddVariant('cone')
vset.SetVariantSelection('cube')
with vset.GetVariantEditContext():
UsdGeom.Cube.Define(stage, "/World/Cube")
vset.SetVariantSelection('sphere')
with vset.GetVariantEditContext():
UsdGeom.Sphere.Define(stage, "/World/Sphere")
vset.SetVariantSelection('cone')
with vset.GetVariantEditContext():
UsdGeom.Cone.Define(stage, "/World/Cone")
stage = omni.usd.get_context().get_stage()
world_prim = stage.GetPrimAtPath("/World")
vsets = world_prim.GetVariantSets()
vset = vsets.GetVariantSet("shapes")
vset.SetVariantSelection("cone") | 848 | Python | 27.299999 | 58 | 0.755896 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2023_05_05/docs/README.md | # Developer Office Hour - 05/05/2023
This is the sample code from the Developer Office Hour held on 05/05/2023, Mati answered some developer questions
from the NVIDIA Omniverse forums regarding Kit, Omniverse Code, Python, and USD.
## Questions
- 02:05 - How can I programmatically create and join an OmniLive session?
- 14:17 - How can I programmatically change the renderer to RTX Real-Time, RTX Path Tracing, Iray, or Storm?
| 430 | Markdown | 52.874993 | 114 | 0.774419 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_12_09/scripts/screenshot_viewport.py | # SPDX-License-Identifier: Apache-2.0
from omni.kit.viewport.utility import get_active_viewport, capture_viewport_to_file
vp_api = get_active_viewport()
capture_viewport_to_file(vp_api, r"C:\temp\screenshot.png") | 214 | Python | 34.833328 | 83 | 0.775701 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_12_09/scripts/collect_asset.py | # SPDX-License-Identifier: Apache-2.0
import asyncio
from omni.kit.tool.collect.collector import Collector
async def collect_async(input_usd, output_dir, usd_only, flat_collection, mtl_only, prog_cb, finish_cb):
"""Collect input_usd related assets to output_dir.
Args:
input_usd (str): The usd stage to be collected.
output_dir (str): The target dir to collect the usd stage to.
usd_only (bool): Collects usd files only or not. It will ignore all asset types.
flat_collection (bool): Collects stage without keeping the original dir structure.
mtl_only (bool): Collects material and textures only or not. It will ignore all other asset types.
prog_cb: Progress callback function
finish_cb: Finish callback function
"""
collector = Collector(input_usd, output_dir, usd_only, flat_collection, mtl_only)
await collector.collect(prog_cb, finish_cb)
def finished():
print("Finished!")
asyncio.ensure_future(collect_async(r"C:\temp\bookcase.usd", r"C:\temp\test_collect",
False, False, False, None, finished)) | 1,095 | Python | 42.839998 | 106 | 0.70411 |
mati-nvidia/developer-office-hours/exts/maticodes.doh_2022_12_09/docs/README.md | # Developer Office Hour - 12/09/2022
This is the sample code from the Developer Office Hour held on 12/09/2022, Mati answered some developer questions
from the NVIDIA Omniverse forums regarding Kit, Omniverse Code, Python, and USD.
## Questions
- How do I Collect Asset using Python to share a full Omniverse project?
- How do I capture a screenshot of the viewport?
- How do I change the visibility of a prim using Action Graph?
| 432 | Markdown | 47.111106 | 114 | 0.777778 |
Subsets and Splits