file_path
stringlengths 21
207
| content
stringlengths 5
1.02M
| size
int64 5
1.02M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.27
0.93
|
---|---|---|---|---|---|---|
NVIDIA-Omniverse/IsaacSim-Automator/src/ansible/roles/rdesktop/tasks/main.yml | # region copyright
# Copyright 2023 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# endregion
---
# check if we need to skip stuff
- name: Check installed services
service_facts:
- name: Prerequisites (1)
apt: |
name="{{ item }}"
state=latest
update_cache=yes
install_recommends=no
with_items:
- ubuntu-desktop
- python3-pip
# install only if ubuntu 20
- name: Prerequisites (2)
apt: name=yaru-theme-gtk state=latest
when: ansible_distribution_release == "focal"
- name: Configure desktop environment
import_tasks: desktop.yml
- name: Virtual display
import_tasks: virtual-display.yml
# updates bus id of the gpu in the xorg.conf file
# needed for starting from the image without ansible
- name: Bus ID updater
import_tasks: busid.yml
# install misc utils
- name: Misc utils
import_tasks: utils.yml
# install visual studio code
- name: Visual Studio Code
import_tasks: vscode.yml
tags:
- __vscode
# VNC
- name: VNC server
import_tasks: vnc.yml
# NoMachine
- name: NoMachine server
import_tasks: nomachine.yml
when: "'nxserver.service' not in ansible_facts.services"
tags:
- skip_in_ovami
# NoVNC
- name: NoVNC server
import_tasks: novnc.yml
# do reboots if needed
- name: Reboot if needed
meta: flush_handlers
| 1,808 | YAML | 22.493506 | 74 | 0.724004 |
NVIDIA-Omniverse/IsaacSim-Automator/src/ansible/roles/rdesktop/tasks/desktop.yml | # region copyright
# Copyright 2023 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# endregion
---
#
# Good desktop experience
#
- name: Configure auto login [1]
lineinfile:
path: /etc/gdm3/custom.conf
state: present
line: "AutomaticLoginEnable=true"
insertafter: "\\[daemon\\]"
notify: reboot
- name: Configure auto login [2]
lineinfile:
path: /etc/gdm3/custom.conf
state: present
line: "AutomaticLogin=ubuntu"
insertafter: "\\[daemon\\]"
notify: reboot
# disable blank screen
- name: Mask sleep targets
shell: systemctl mask sleep.target suspend.target hibernate.target hybrid-sleep.target
notify: reboot
# disable screen lock
- name: Disable screen lock
shell: "{{ item }}"
with_items:
- gsettings set org.gnome.desktop.session idle-delay 0
- gsettings set org.gnome.desktop.screensaver lock-enabled 'false'
- gsettings set org.gnome.desktop.lockdown disable-lock-screen 'true'
- gsettings set org.gnome.desktop.screensaver idle-activation-enabled 'false'
- gsettings set org.gnome.settings-daemon.plugins.power sleep-inactive-battery-timeout 0
become_user: "{{ ansible_user }}"
notify: reboot
# increase font size
- name: Set font size to 125%
shell: gsettings set org.gnome.desktop.interface text-scaling-factor 1.25
become_user: "{{ ansible_user }}"
# enable dark theme
- name: Make it dark
shell: gsettings set org.gnome.desktop.interface gtk-theme 'Yaru-dark'
become_user: "{{ ansible_user }}"
# fix terminal font
- name: Fix terminal font
shell: "{{ item }}"
become_user: "{{ ansible_user }}"
with_items:
- gsettings set org.gnome.Terminal.Legacy.Profile:/org/gnome/terminal/legacy/profiles:/:$(gsettings get org.gnome.Terminal.ProfilesList default|tr -d \')/ use-system-font false
- gsettings set org.gnome.Terminal.Legacy.Profile:/org/gnome/terminal/legacy/profiles:/:$(gsettings get org.gnome.Terminal.ProfilesList default|tr -d \')/ font "Monospace 12"
# make terminal semi-transparent
- name: Make terminal semi-transparent
shell: "{{ item }}"
become_user: "{{ ansible_user }}"
with_items:
- gsettings set org.gnome.Terminal.Legacy.Profile:/org/gnome/terminal/legacy/profiles:/:$(gsettings get org.gnome.Terminal.ProfilesList default|tr -d \')/ background-transparency-percent 12
- gsettings set org.gnome.Terminal.Legacy.Profile:/org/gnome/terminal/legacy/profiles:/:$(gsettings get org.gnome.Terminal.ProfilesList default|tr -d \')/ use-transparent-background true
# disable new ubuntu version prompt
- name: Disable new ubuntu version prompt
lineinfile:
path: /etc/update-manager/release-upgrades
regexp: "Prompt=.*"
line: "Prompt=never"
notify: reboot
| 3,223 | YAML | 35.224719 | 193 | 0.730686 |
NVIDIA-Omniverse/IsaacSim-Automator/src/ansible/roles/rdesktop/defaults/main.yml | # region copyright
# Copyright 2023 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# endregion
---
vnc_password: empty
| 638 | YAML | 32.631577 | 74 | 0.761755 |
NVIDIA-Omniverse/IsaacSim-Automator/src/ansible/roles/rdesktop/meta/main.yml | # region copyright
# Copyright 2023 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# endregion
---
galaxy_info:
role_name: rdesktop
author: NVIDIA Corporation
description: Enables remote desktop access
standalone: false
dependencies:
- { role: system }
- { role: nvidia }
| 803 | YAML | 29.923076 | 74 | 0.750934 |
NVIDIA-Omniverse/IsaacSim-Automator/src/packer/aws/README.md | # Running packer for AWS
Set env vars with credentials:
```sh
export AWS_ACCESS_KEY_ID ="..."
export AWS_SECRET_ACCESS_KEY ="..."
export NGC_API_KEY="..." # optional
```
Alternatively you can pass them as variables in the packer command (`packer -var=varname=value...`).
Then launch image builds with:
```sh
packer build [-force] [-var=aws_region="us-east-1"] [-var=image_name="..."] [-var=system_user_password="..."] [-var=vnc_password="..."] <folder>/
```
For example:
```sh
packer build -force -var=isaac_image="nvcr.io/nvidia/isaac-sim:2023.1.0-hotfix.1" /app/src/packer/aws/isaac
```
```sh
packer build -force \
-var=aws_region="us-east-1" \
-var=image_name=ovami-test-1 \
-var=system_user_password="nvidia123" \
-var=vnc_password="nvidia123" \
/app/src/packer/aws/ovami
```
| 788 | Markdown | 22.90909 | 145 | 0.673858 |
NVIDIA-Omniverse/IsaacSim-Automator/src/packer/azure/README.md | # Running packer for Azure
See <https://learn.microsoft.com/en-us/azure/virtual-machines/linux/build-image-with-packer>
## 1. Create resource group for packer output
```sh
az group create -n isa.packer -l westus3
```
## 2. Create Azure service principal
```sh
export AZURE_SUBSCRIPTION_ID=`az account list | jq -r .[0].id`
az ad sp create-for-rbac \
--role Contributor \
--scopes /subscriptions/$AZURE_SUBSCRIPTION_ID \
--query "{ client_id: appId, client_secret: password, tenant_id: tenant }"
```
## 3. Build image
Set env vars with credentials:
```sh
export AZURE_SUBSCRIPTION_ID="..."
export AZURE_TENANT_ID="..."
export AZURE_SP_CLIENT_ID="..."
export AZURE_SP_CLIENT_SECRET="..."
export NGC_API_KEY="..."
```
Alternatively you can pass them as variables in the packer command (`packer -var=varname=value...`).
Then launch image builds with:
```sh
packer build [-var=image_name=...] <folder>/
```
For example:
```sh
packer build isaac/
packer build -var=image_name=my_image_1 isaac/
```
| 1,011 | Markdown | 20.531914 | 100 | 0.693373 |
NVIDIA-Omniverse/IsaacSim-Automator/src/python/config.py | # region copyright
# Copyright 2023 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# endregion
from typing import Any, Dict
c: Dict[str, Any] = {}
# paths
c["app_dir"] = "/app"
c["state_dir"] = "/app/state"
c["results_dir"] = "/app/results"
c["uploads_dir"] = "/app/uploads"
c["tests_dir"] = "/app/src/tests"
c["ansible_dir"] = "/app/src/ansible"
c["terraform_dir"] = "/app/src/terraform"
# app image name
c["app_image_name"] = "isa"
# gcp driver
# @see https://cloud.google.com/compute/docs/gpus/grid-drivers-table
c[
"gcp_driver_url"
] = "https://storage.googleapis.com/nvidia-drivers-us-public/GRID/vGPU16.2/NVIDIA-Linux-x86_64-535.129.03-grid.run"
# aws/alicloud driver
c["generic_driver_apt_package"] = "nvidia-driver-535-server"
# default remote dirs
c["default_remote_uploads_dir"] = "/home/ubuntu/uploads"
c["default_remote_results_dir"] = "/home/ubuntu/results"
c["default_remote_workspace_dir"] = "/home/ubuntu/workspace"
# defaults
# --isaac-image
c["default_isaac_image"] = "nvcr.io/nvidia/isaac-sim:2023.1.1"
# --ssh-port
c["default_ssh_port"] = 22
# --from-image
c["azure_default_from_image"] = False
c["aws_default_from_image"] = False
# --omniverse-user
c["default_omniverse_user"] = "omniverse"
# --remote-dir
c["default_remote_uploads_dir"] = "/home/ubuntu/uploads"
c["default_remote_results_dir"] = "/home/ubuntu/results"
# --isaac-instance-type
c["aws_default_isaac_instance_type"] = "g5.2xlarge"
# str, 1-index in DeployAzureCommand.AZURE_OVKIT_INSTANCE_TYPES
c["azure_default_isaac_instance_type"] = "2"
c["gcp_default_isaac_instance_type"] = "g2-standard-8"
c["alicloud_default_isaac_instance_type"] = "ecs.gn7i-c16g1.4xlarge"
# --isaac-gpu-count
c["gcp_default_isaac_gpu_count"] = 1
# --region
c["alicloud_default_region"] = "us-east-1"
# --prefix for the created cloud resources
c["default_prefix"] = "isa"
# --oige
c["default_oige_git_checkpoint"] = "main"
# --orbit
c["default_orbit_git_checkpoint"] = "devel"
| 2,478 | Python | 27.494253 | 115 | 0.705408 |
NVIDIA-Omniverse/IsaacSim-Automator/src/python/aws.py | # region copyright
# Copyright 2023 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# endregion
"""
Utils for AWS
"""
from src.python.utils import read_meta, shell_command
def aws_configure_cli(
deployment_name,
verbose=False,
):
"""
Configure AWS CLI for deployment
"""
meta = read_meta(deployment_name)
aws_access_key_id = meta["params"]["aws_access_key_id"]
aws_secret_access_key = meta["params"]["aws_secret_access_key"]
region = meta["params"]["region"]
shell_command(
f"aws configure set aws_access_key_id '{aws_access_key_id}'",
verbose=verbose,
exit_on_error=True,
capture_output=True,
)
shell_command(
f"aws configure set aws_secret_access_key '{aws_secret_access_key}'",
verbose=verbose,
exit_on_error=True,
capture_output=True,
)
shell_command(
f"aws configure set region '{region}'",
verbose=verbose,
exit_on_error=True,
capture_output=True,
)
def aws_stop_instance(instance_id, verbose=False):
shell_command(
f"aws ec2 stop-instances --instance-ids '{instance_id}'",
verbose=verbose,
exit_on_error=True,
capture_output=True,
)
def aws_start_instance(instance_id, verbose=False):
shell_command(
f"aws ec2 start-instances --instance-ids '{instance_id}'",
verbose=verbose,
exit_on_error=True,
capture_output=True,
)
def aws_get_instance_status(instance_id, verbose=False):
"""
Query instance status
Returns: "stopping" | "stopped" | "pending" | "running"
"""
status = (
shell_command(
f"aws ec2 describe-instances --instance-ids '{instance_id}'"
+ " | jq -r .Reservations[0].Instances[0].State.Name",
verbose=verbose,
exit_on_error=True,
capture_output=True,
)
.stdout.decode()
.strip()
)
return status
| 2,500 | Python | 25.606383 | 77 | 0.632 |
NVIDIA-Omniverse/IsaacSim-Automator/src/python/debug.py | # region copyright
# Copyright 2023 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# endregion
import debugpy
__debug_started = False
def debug_start():
global __debug_started
if not __debug_started:
debugpy.listen(("0.0.0.0", 5678))
print("Waiting for debugger to attach...")
debugpy.wait_for_client()
print("Debugger attached.")
__debug_started = True
def debug_break():
debug_start()
debugpy.breakpoint()
| 988 | Python | 27.257142 | 74 | 0.705466 |
NVIDIA-Omniverse/IsaacSim-Automator/src/python/ngc.py | # region copyright
# Copyright 2023 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# endregion
import pathlib
import subprocess
SELF_DIR = pathlib.Path(__file__).parent.resolve()
def check_ngc_access(ngc_api_key, org="", team="", verbose=False):
"""
Checks if NGC API key is valid and user has access to DRIVE Sim.
Returns:
- 0 - all is fine
- 100 - invalid api key
- 102 - user is not in the team
"""
proc = subprocess.run(
[f"{SELF_DIR}/ngc_check.expect", ngc_api_key, org, team],
capture_output=not verbose,
timeout=60,
)
if proc.returncode not in [0, 100, 101, 102]:
raise RuntimeError(
f"Error checking NGC API Key. Return code: {proc.returncode}"
)
return proc.returncode
| 1,301 | Python | 27.304347 | 74 | 0.680246 |
NVIDIA-Omniverse/IsaacSim-Automator/src/python/alicloud.py | # region copyright
# Copyright 2023 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# endregion
"""
Utils for AliCloud
"""
from src.python.utils import read_meta, shell_command
def alicloud_configure_cli(
deployment_name,
verbose=False,
):
"""
Configure Aliyun CLI
"""
meta = read_meta(deployment_name)
aliyun_access_key = meta["params"]["aliyun_access_key"]
aliyun_secret_key = meta["params"]["aliyun_secret_key"]
region = meta["params"]["region"]
shell_command(
"aliyun configure set "
+ f"--access-key-id '{aliyun_access_key}'"
+ f" --access-key-secret '{aliyun_secret_key}'"
+ f" --region '{region}'",
verbose=verbose,
exit_on_error=True,
capture_output=True,
)
def alicloud_start_instance(vm_id, verbose=False):
"""
Start VM
"""
shell_command(
f"aliyun ecs StartInstance --InstanceId '{vm_id}'",
verbose=verbose,
exit_on_error=True,
capture_output=True,
)
def alicloud_stop_instance(vm_id, verbose=False):
"""
Stop VM
"""
shell_command(
f"aliyun ecs StopInstance --InstanceId '{vm_id}'",
verbose=verbose,
exit_on_error=True,
capture_output=True,
)
def alicloud_get_instance_status(vm_id, verbose=False):
"""
Query VM status
Returns: "Stopping" | "Stopped" | "Starting" | "Running"
"""
status = (
shell_command(
f"aliyun ecs DescribeInstances --InstanceIds '[\"{vm_id}\"]'"
+ " | jq -r .Instances.Instance[0].Status",
verbose=verbose,
exit_on_error=True,
capture_output=True,
)
.stdout.decode()
.strip()
)
return status
def alicloud_list_regions(
aliyun_access_key,
aliyun_secret_key,
verbose=False,
):
"""
List regions
"""
res = (
shell_command(
f"aliyun --access-key-id {aliyun_access_key}"
+ f" --access-key-secret {aliyun_secret_key}"
+ " --region cn-beijing ecs DescribeRegions"
+ " | jq -r '.Regions.Region[].RegionId'",
capture_output=True,
exit_on_error=True,
verbose=verbose,
)
.stdout.decode()
.strip()
)
valid_regions = res.split("\n")
return valid_regions
| 2,886 | Python | 23.675213 | 74 | 0.596674 |
NVIDIA-Omniverse/IsaacSim-Automator/src/python/deployer.py | # region copyright
# Copyright 2023 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# endregion
import json
import os
import re
import shlex
import sys
from pathlib import Path
import click
from src.python.utils import (
colorize_error,
colorize_info,
colorize_prompt,
colorize_result,
read_meta,
shell_command,
)
from src.python.debug import debug_break # noqa
from src.python.ngc import check_ngc_access
class Deployer:
def __init__(self, params, config):
self.tf_outputs = {}
self.params = params
self.config = config
self.existing_behavior = None
# save original params so we can recreate command line
self.input_params = params.copy()
# convert "in_china"
self.params["in_china"] = {"yes": True, "no": False, "auto": False}[
self.params["in_china"]
]
# create state directory if it doesn't exist
os.makedirs(self.config["state_dir"], exist_ok=True)
# print complete command line
if self.params["debug"]:
click.echo(colorize_info("* Command:\n" + self.recreate_command_line()))
def __del__(self):
# update meta info
self.save_meta()
def save_meta(self):
"""
Save command parameters in json file, just in case
"""
meta_file = (
f"{self.config['state_dir']}/{self.params['deployment_name']}/meta.json"
)
data = {
"command": self.recreate_command_line(separator=" "),
"input_params": self.input_params,
"params": self.params,
"config": self.config,
}
Path(meta_file).parent.mkdir(parents=True, exist_ok=True)
Path(meta_file).write_text(json.dumps(data, indent=4))
if self.params["debug"]:
click.echo(colorize_info(f"* Meta info saved to '{meta_file}'"))
def read_meta(self):
return read_meta(
self.params["deployment_name"],
self.params["debug"],
)
def recreate_command_line(self, separator=" \\\n"):
"""
Recreate command line
"""
command_line = sys.argv[0]
for k, v in self.input_params.items():
k = k.replace("_", "-")
if isinstance(v, bool):
if v:
command_line += separator + "--" + k
else:
not_prefix = "--no-"
if k in ["from-image"]:
not_prefix = "--not-"
command_line += separator + not_prefix + k
else:
command_line += separator + "--" + k + " "
if isinstance(v, str):
command_line += "'" + shlex.quote(v) + "'"
else:
command_line += str(v)
return command_line
def ask_existing_behavior(self):
"""
Ask what to do if deployment already exists
"""
deployment_name = self.params["deployment_name"]
existing = self.params["existing"]
self.existing_behavior = existing
if existing == "ask" and os.path.isfile(
f"{self.config['state_dir']}/{deployment_name}/.tfvars"
):
self.existing_behavior = click.prompt(
text=colorize_prompt(
"* Deploymemnt exists, what would you like to do? See --help for details."
),
type=click.Choice(["repair", "modify", "replace", "run_ansible"]),
default="replace",
)
if (
self.existing_behavior == "repair"
or self.existing_behavior == "run_ansible"
):
# restore params from meta file
r = self.read_meta()
self.params = r["params"]
click.echo(
colorize_info(
f"* Repairing existing deployment \"{self.params['deployment_name']}\"..."
)
)
# update meta info (with new value for existing_behavior)
self.save_meta()
# destroy existing deployment``
if self.existing_behavior == "replace":
debug = self.params["debug"]
click.echo(colorize_info("* Deleting existing deployment..."))
shell_command(
command=f'{self.config["app_dir"]}/destroy "{deployment_name}" --yes'
+ f' {"--debug" if debug else ""}',
verbose=debug,
)
# update meta info if deployment was destroyed
self.save_meta()
def validate_ngc_api_key(self, image, restricted_image=False):
"""
Check if NGC API key allows to log in and has access to appropriate NGC image
@param image: NGC image to check access to
@param restricted_image: If image is restricted to specific org/team?
"""
debug = self.params["debug"]
ngc_api_key = self.params["ngc_api_key"]
ngc_api_key_check = self.params["ngc_api_key_check"]
# extract org and team from the image path
r = re.findall(
"^nvcr\\.io/([a-z0-9\\-_]+)/([a-z0-9\\-_]+/)?[a-z0-9\\-_]+:[a-z0-9\\-_.]+$",
image,
)
ngc_org, ngc_team = r[0]
ngc_team = ngc_team.rstrip("/")
if ngc_org == "nvidia":
click.echo(
colorize_info(
"* Access to docker image can't be checked for NVIDIA org. But you'll be fine. Fingers crossed."
)
)
return
if debug:
click.echo(colorize_info(f'* Will check access to NGC Org: "{ngc_org}"'))
click.echo(colorize_info(f'* Will check access to NGC Team: "{ngc_team}"'))
if ngc_api_key_check and ngc_api_key != "none":
click.echo(colorize_info("* Validating NGC API key... "))
r = check_ngc_access(
ngc_api_key=ngc_api_key, org=ngc_org, team=ngc_team, verbose=debug
)
if r == 100:
raise Exception(colorize_error("NGC API key is invalid."))
# only check access to org/team if restricted image is deployed
elif restricted_image and (r == 101 or r == 102):
raise Exception(
colorize_error(
f'NGC API key is valid but you don\'t have access to image "{image}".'
)
)
click.echo(colorize_info(("* NGC API Key is valid!")))
def create_tfvars(self, tfvars: dict = {}):
"""
- Check if deployment with this deployment_name exists and deal with it
- Create/update tfvars file
Expected values for "existing_behavior" arg:
- repair: keep tfvars/tfstate, don't ask for user input
- modify: keep tfstate file, update tfvars file with user input
- replace: delete tfvars/tfstate files
- run_ansible: keep tfvars/tfstate, don't ask for user input, skip terraform steps
"""
# default values common for all clouds
tfvars.update(
{
"isaac_enabled": self.params["isaac"]
if "isaac" in self.params
else False,
#
"isaac_instance_type": self.params["isaac_instance_type"]
if "isaac_instance_type" in self.params
else "none",
#
"prefix": self.params["prefix"],
"ssh_port": self.params["ssh_port"],
#
"from_image": self.params["from_image"]
if "from_image" in self.params
else False,
#
"deployment_name": self.params["deployment_name"],
}
)
debug = self.params["debug"]
deployment_name = self.params["deployment_name"]
# deal with existing deployment:
tfvars_file = f"{self.config['state_dir']}/{deployment_name}/.tfvars"
tfstate_file = f"{self.config['state_dir']}/{deployment_name}/.tfstate"
# tfvars
if os.path.exists(tfvars_file):
if (
self.existing_behavior == "modify"
or self.existing_behavior == "overwrite"
):
os.remove(tfvars_file)
if debug:
click.echo(colorize_info(f'* Deleted "{tfvars_file}"...'))
# tfstate
if os.path.exists(tfstate_file):
if self.existing_behavior == "overwrite":
os.remove(tfstate_file)
if debug:
click.echo(colorize_info(f'* Deleted "{tfstate_file}"...'))
# create tfvars file
if (
self.existing_behavior == "modify"
or self.existing_behavior == "overwrite"
or not os.path.exists(tfvars_file)
):
self._write_tfvars_file(path=tfvars_file, tfvars=tfvars)
def _write_tfvars_file(self, path: str, tfvars: dict):
"""
Write tfvars file
"""
debug = self.params["debug"]
if debug:
click.echo(colorize_info(f'* Created tfvars file "{path}"'))
# create <dn>/ directory if it doesn't exist
Path(path).parent.mkdir(parents=True, exist_ok=True)
with open(path, "w") as f:
for key, value in tfvars.items():
# convert booleans to strings
if isinstance(value, bool):
value = {
True: "true",
False: "false",
}[value]
# format key names
key = key.replace("-", "_")
# write values
if isinstance(value, str):
value = value.replace('"', '\\"')
f.write(f'{key} = "{value}"\n')
elif isinstance(value, list):
f.write(f"{key} = " + str(value).replace("'", '"') + "\n")
else:
f.write(f"{key} = {value}\n")
def create_ansible_inventory(self, write: bool = True):
"""
Create Ansible inventory, return it as text
Write to file if write=True
"""
debug = self.params["debug"]
deployment_name = self.params["deployment_name"]
ansible_vars = self.params.copy()
# add config
ansible_vars["config"] = self.config
# get missing values from terraform
for k in [
"isaac_ip",
"ovami_ip",
"cloud",
]:
if k not in self.params or ansible_vars[k] is None:
ansible_vars[k] = self.tf_output(k)
# convert booleans to ansible format
ansible_booleans = {True: "true", False: "false"}
for k, v in ansible_vars.items():
if isinstance(v, bool):
ansible_vars[k] = ansible_booleans[v]
template = Path(f"{self.config['ansible_dir']}/inventory.template").read_text()
res = template.format(**ansible_vars)
# write to file
if write:
inventory_file = f"{self.config['state_dir']}/{deployment_name}/.inventory"
Path(inventory_file).parent.mkdir(parents=True, exist_ok=True) # create dir
Path(inventory_file).write_text(res) # write file
if debug:
click.echo(
colorize_info(
f'* Created Ansible inventory file "{inventory_file}"'
)
)
return res
def initialize_terraform(self, cwd: str):
"""
Initialize Terraform via shell command
cwd: directory where terraform scripts are located
"""
debug = self.params["debug"]
shell_command(
f"terraform init -upgrade -no-color -input=false {' > /dev/null' if not debug else ''}",
verbose=debug,
cwd=cwd,
)
def run_terraform(self, cwd: str):
"""
Apply Terraform via shell command
cwd: directory where terraform scripts are located
"""
debug = self.params["debug"]
deployment_name = self.params["deployment_name"]
shell_command(
"terraform apply -auto-approve "
+ f"-state={self.config['state_dir']}/{deployment_name}/.tfstate "
+ f"-var-file={self.config['state_dir']}/{deployment_name}/.tfvars",
cwd=cwd,
verbose=debug,
)
def export_ssh_key(self):
"""
Export SSH key from Terraform state
"""
debug = self.params["debug"]
deployment_name = self.params["deployment_name"]
shell_command(
f"terraform output -state={self.config['state_dir']}/{deployment_name}/.tfstate -raw ssh_key"
+ f" > {self.config['state_dir']}/{deployment_name}/key.pem && "
+ f"chmod 0600 {self.config['state_dir']}/{deployment_name}/key.pem",
verbose=debug,
)
def run_ansible(self, playbook_name: str, cwd: str):
"""
Run Ansible playbook via shell command
"""
debug = self.params["debug"]
deployment_name = self.params["deployment_name"]
shell_command(
f"ansible-playbook -i {self.config['state_dir']}/{deployment_name}/.inventory "
+ f"{playbook_name}.yml {'-vv' if self.params['debug'] else ''}",
cwd=cwd,
verbose=debug,
)
def run_all_ansible(self):
# run ansible for isaac
if "isaac" in self.params and self.params["isaac"]:
click.echo(colorize_info("* Running Ansible for Isaac Sim..."))
self.run_ansible(playbook_name="isaac", cwd=f"{self.config['ansible_dir']}")
# run ansible for ovami
# todo: move to ./deploy-aws
if "ovami" in self.params and self.params["ovami"]:
click.echo(colorize_info("* Running Ansible for OV AMI..."))
self.run_ansible(playbook_name="ovami", cwd=f"{self.config['ansible_dir']}")
def tf_output(self, key: str, default: str = ""):
"""
Read Terraform output.
Cache read values in self._tf_outputs.
"""
if key not in self.tf_outputs:
debug = self.params["debug"]
deployment_name = self.params["deployment_name"]
r = shell_command(
f"terraform output -state='{self.config['state_dir']}/{deployment_name}/.tfstate' -raw '{key}'",
capture_output=True,
exit_on_error=False,
verbose=debug,
)
if r.returncode == 0:
self.tf_outputs[key] = r.stdout.decode()
else:
if self.params["debug"]:
click.echo(
colorize_error(
f"* Warning: Terraform output '{key}' cannot be read."
),
err=True,
)
self.tf_outputs[key] = default
# update meta file to reflect tf outputs
self.save_meta()
return self.tf_outputs[key]
def upload_user_data(self):
shell_command(
f'./upload "{self.params["deployment_name"]}" '
+ f'{"--debug" if self.params["debug"] else ""}',
cwd=self.config["app_dir"],
verbose=self.params["debug"],
exit_on_error=True,
capture_output=False,
)
# generate ssh connection command for the user
def ssh_connection_command(self, ip: str):
r = f"ssh -i state/{self.params['deployment_name']}/key.pem "
r += f"-o StrictHostKeyChecking=no ubuntu@{ip}"
if self.params["ssh_port"] != 22:
r += f" -p {self.params['ssh_port']}"
return r
def output_deployment_info(self, extra_text: str = "", print_text=True):
"""
Print connection info for the user
Save info to file (_state_dir_/_deployment_name_/info.txt)
"""
isaac = "isaac" in self.params and self.params["isaac"]
ovami = "ovami" in self.params and self.params["ovami"]
vnc_password = self.params["vnc_password"]
deployment_name = self.params["deployment_name"]
# templates
nomachine_instruction = f"""* To connect to __app__ via NoMachine:
0. Download NoMachine client at https://downloads.nomachine.com/, install and launch it.
1. Click "Add" button.
2. Enter Host: "__ip__".
3. In "Configuration" > "Use key-based authentication with a key you provide",
select file "state/{deployment_name}/key.pem".
4. Click "Connect" button.
5. Enter "ubuntu" as a username when prompted.
"""
vnc_instruction = f"""* To connect to __app__ via VNC:
- IP: __ip__
- Port: 5900
- Password: {vnc_password}"""
nonvc_instruction = f"""* To connect to __app__ via noVNC:
1. Open http://__ip__:6080/vnc.html?host=__ip__&port=6080 in your browser.
2. Click "Connect" and use password \"{vnc_password}\""""
# print connection info
instructions_file = f"{self.config['state_dir']}/{deployment_name}/info.txt"
instructions = ""
if isaac:
instructions += f"""{'*' * (29+len(self.tf_output('isaac_ip')))}
* Isaac Sim is deployed at {self.tf_output('isaac_ip')} *
{'*' * (29+len(self.tf_output('isaac_ip')))}
* To connect to Isaac Sim via SSH:
{self.ssh_connection_command(self.tf_output('isaac_ip'))}
{nonvc_instruction}
{nomachine_instruction}""".replace(
"__app__", "Isaac Sim"
).replace(
"__ip__", self.tf_output("isaac_ip")
)
# todo: move to ./deploy-aws
if ovami:
instructions += f"""\n
* OV AMI is deployed at {self.tf_output('ovami_ip')}
* To connect to OV AMI via SSH:
{self.ssh_connection_command(self.tf_output('ovami_ip'))}
* To connect to OV AMI via NICE DCV:
- IP: __ip__
{vnc_instruction}
{nomachine_instruction}
""".replace(
"__app__", "OV AMI"
).replace(
"__ip__", self.tf_output("ovami_ip")
)
# extra text
if len(extra_text) > 0:
instructions += extra_text + "\n"
# print instructions for the user
if print_text:
click.echo(colorize_result("\n" + instructions))
# create <dn>/ directory if it doesn't exist
Path(instructions_file).parent.mkdir(parents=True, exist_ok=True)
# write file
Path(instructions_file).write_text(instructions)
return instructions
| 19,262 | Python | 31.760204 | 116 | 0.53224 |
NVIDIA-Omniverse/IsaacSim-Automator/src/python/utils.py | # region copyright
# Copyright 2023 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# endregion
"""
CLI Utils
"""
import json
import os
import subprocess
from glob import glob
from pathlib import Path
import click
from src.python.config import c as config
def colorize_prompt(text):
return click.style(text, fg="bright_cyan", italic=True)
def colorize_error(text):
return click.style(text, fg="bright_red", italic=True)
def colorize_info(text):
return click.style(text, fg="bright_magenta", italic=True)
def colorize_result(text):
return click.style(text, fg="bright_green", italic=True)
def shell_command(
command, verbose=False, cwd=None, exit_on_error=True, capture_output=False
):
"""
Execute shell command, print it if debug is enabled
"""
if verbose:
if cwd is not None:
click.echo(colorize_info(f"* Running `(cd {cwd} && {command})`..."))
else:
click.echo(colorize_info(f"* Running `{command}`..."))
res = subprocess.run(
command,
shell=True,
cwd=cwd,
capture_output=capture_output,
)
if res.returncode == 0:
if verbose and res.stdout is not None:
click.echo(res.stdout.decode())
elif exit_on_error:
if res.stderr is not None:
click.echo(
colorize_error(f"Error: {res.stderr.decode()}"),
err=True,
)
exit(1)
return res
def deployments():
"""List existing deployments by name"""
state_dir = config["state_dir"]
deployments = sorted(
[
os.path.basename(os.path.dirname(d))
for d in glob(os.path.join(state_dir, "*/"))
]
)
return deployments
def read_meta(deployment_name: str, verbose: bool = False):
"""
Read metadata from json file
"""
meta_file = f"{config['state_dir']}/{deployment_name}/meta.json"
if os.path.isfile(meta_file):
data = json.loads(Path(meta_file).read_text())
if verbose:
click.echo(colorize_info(f"* Meta info loaded from '{meta_file}'"))
return data
raise Exception(f"Meta file '{meta_file}' not found")
def read_tf_output(deployment_name, output, verbose=False):
"""
Read terraform output from tfstate file
"""
return (
shell_command(
f"terraform output -state={config['state_dir']}/{deployment_name}/.tfstate -raw {output}",
capture_output=True,
exit_on_error=False,
verbose=verbose,
)
.stdout.decode()
.strip()
)
def format_app_name(app_name):
"""
Format app name for user output
"""
formatted = {
"isaac": "Isaac Sim",
"ovami": "OV AMI",
}
if app_name in formatted:
return formatted[app_name]
return app_name
def format_cloud_name(cloud_name):
"""
Format cloud name for user output
"""
formatted = {
"aws": "AWS",
"azure": "Azure",
"gcp": "GCP",
"alicloud": "Alibaba Cloud",
}
if cloud_name in formatted:
return formatted[cloud_name]
return cloud_name
def gcp_login(verbose=False):
"""
Log into GCP
"""
# detect if we need to re-login
click.echo(colorize_info("* Checking GCP login status..."), nl=False)
res = shell_command(
"gcloud auth application-default print-access-token 2>&1 > /dev/null",
verbose=verbose,
exit_on_error=False,
capture_output=True,
)
logged_in = res.returncode == 0
if logged_in:
click.echo(colorize_info(" logged in!"))
if not logged_in:
click.echo(colorize_info(" not logged in"))
shell_command(
"gcloud auth application-default login --no-launch-browser --disable-quota-project --verbosity none",
verbose=verbose,
)
| 4,426 | Python | 22.42328 | 113 | 0.608676 |
NVIDIA-Omniverse/IsaacSim-Automator/src/python/azure.py | # region copyright
# Copyright 2023 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# endregion
"""
Utils for Azure
"""
import click
from src.python.utils import colorize_info, read_meta, shell_command
def azure_login(verbose=False):
"""
Log into Azure
"""
# detect if we need to re-login
logged_in = (
'"Enabled"'
== shell_command(
"az account show --query state",
verbose=verbose,
exit_on_error=False,
capture_output=True,
)
.stdout.decode()
.strip()
)
if not logged_in:
click.echo(colorize_info("* Logging into Azure..."))
shell_command("az login --use-device-code", verbose=verbose)
def azure_stop_instance(vm_id, verbose=False):
shell_command(
f"az vm deallocate --ids {vm_id}",
verbose=verbose,
exit_on_error=True,
capture_output=False,
)
def azure_start_instance(vm_id, verbose=False):
shell_command(
f"az vm start --ids {vm_id}",
verbose=verbose,
exit_on_error=True,
capture_output=False,
)
| 1,639 | Python | 24.230769 | 74 | 0.640635 |
NVIDIA-Omniverse/IsaacSim-Automator/src/python/deploy_command.py | # region copyright
# Copyright 2023 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# endregion
"""
Base deploy- command
"""
import os
import re
import click
import randomname
from pwgen import pwgen
from src.python.config import c as config
from src.python.debug import debug_break # noqa
from src.python.utils import colorize_error, colorize_prompt
class DeployCommand(click.core.Command):
"""
Defines common cli options for "deploy-*" commands.
"""
@staticmethod
def isaac_callback(ctx, param, value):
"""
Called after --isaac option is parsed
"""
# disable isaac instance type selection if isaac is disabled
if value is False:
for p in ctx.command.params:
if p.name.startswith("isaac"):
p.prompt = None
return value
@staticmethod
def deployment_name_callback(ctx, param, value):
# validate
if not re.match("^[a-z0-9\\-]{1,32}$", value):
raise click.BadParameter(
colorize_error(
"Only lower case letters, numbers and '-' are allowed."
+ f" Length should be between 1 and 32 characters ({len(value)} provided)."
)
)
return value
@staticmethod
def ngc_api_key_callback(ctx, param, value):
"""
Validate NGC API key
"""
# fix click bug
if value is None:
return value
# allow "none" as a special value
if "none" == value:
return value
# check if it contains what's allowed
if not re.match("^[A-Za-z0-9]{32,}$", value):
raise click.BadParameter(
colorize_error("Key contains invalid characters or too short.")
)
return value
@staticmethod
def ngc_image_callback(ctx, param, value):
"""
Called after parsing --isaac-image options are parsed
"""
# ignore case
value = value.lower()
if not re.match(
"^nvcr\\.io/[a-z0-9\\-_]+/([a-z0-9\\-_]+/)?[a-z0-9\\-_]+:[a-z0-9\\-_.]+$",
value,
):
raise click.BadParameter(
colorize_error(
"Invalid image name. "
+ "Expected: nvcr.io/<org>/[<team>/]<image>:<tag>"
)
)
return value
@staticmethod
def oige_callback(ctx, param, value):
"""
Called after parsing --oige option
"""
if "" == value:
return config["default_oige_git_checkpoint"]
return value
@staticmethod
def orbit_callback(ctx, param, value):
"""
Called after parsing --orbit option
"""
if "" == value:
return config["default_orbit_git_checkpoint"]
return value
def param_index(self, param_name):
"""
Return index of parameter with given name.
Useful for inserting new parameters at a specific position.
"""
return list(
filter(
lambda param: param[1].name == param_name,
enumerate(self.params),
)
)[0][0]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# add common options
self.params.insert(
len(self.params),
click.core.Option(
("--debug/--no-debug",),
default=False,
show_default=True,
help="Enable debug output.",
),
)
# --prefix
self.params.insert(
len(self.params),
click.core.Option(
("--prefix",),
default=config["default_prefix"],
show_default=True,
help="Prefix for all cloud resources.",
),
)
# --from-image/--not-from-image
self.params.insert(
len(self.params),
click.core.Option(
("--from-image/--not-from-image",),
default=False,
show_default=True,
help="Deploy from pre-built image, from bare OS otherwise.",
),
)
# --in-china
self.params.insert(
len(self.params),
click.core.Option(
("--in-china",),
type=click.Choice(["auto", "yes", "no"]),
prompt=False,
default="auto",
show_default=True,
help="Is deployment in China? (Local mirrors will be used.)",
),
)
self.params.insert(
len(self.params),
click.core.Option(
("--deployment-name", "--dn"),
prompt=colorize_prompt(
'* Deployment Name (lower case letters, numbers and "-")'
),
default=randomname.get_name,
callback=DeployCommand.deployment_name_callback,
show_default="<randomly generated>",
help="Name of the deployment. Used to identify the created cloud resources and files.",
),
)
self.params.insert(
len(self.params),
click.core.Option(
("--existing",),
type=click.Choice(
["ask", "repair", "modify", "replace", "run_ansible"]
),
default="ask",
show_default=True,
help="""What to do if deployment already exists:
\n* 'repair' will try to fix broken deployment without applying new user parameters.
\n* 'modify' will update user selected parameters and attempt to update existing cloud resources.
\n* 'replace' will attempt to delete old deployment's cloud resources first.
\n* 'run_ansible' will re-run Ansible playbooks.""",
),
)
self.params.insert(
len(self.params),
click.core.Option(
("--isaac/--no-isaac",),
default=True,
show_default="yes",
prompt=colorize_prompt("* Deploy Isaac Sim?"),
callback=DeployCommand.isaac_callback,
help="Deploy Isaac Sim (BETA)?",
),
)
self.params.insert(
len(self.params),
click.core.Option(
("--isaac-image",),
default=config["default_isaac_image"],
prompt=colorize_prompt("* Isaac Sim docker image"),
show_default=True,
callback=DeployCommand.ngc_image_callback,
help="Isaac Sim docker image to use.",
),
)
# --oige
help = (
"Install Omni Isaac Gym Envs? Valid values: 'no', "
+ "or <git ref in github.com/NVIDIA-Omniverse/OmniIsaacGymEnvs>"
)
self.params.insert(
len(self.params),
click.core.Option(
("--oige",),
help=help,
default="main",
show_default=True,
prompt=colorize_prompt("* " + help),
callback=DeployCommand.oige_callback,
),
)
# --orbit
help = (
"[EXPERIMENTAL] Install Isaac Sim Orbit? Valid values: 'no', "
+ "or <git ref in github.com/NVIDIA-Omniverse/orbit>"
)
self.params.insert(
len(self.params),
click.core.Option(
("--orbit",),
help=help,
default="no",
show_default=True,
prompt=colorize_prompt("* " + help),
callback=DeployCommand.orbit_callback,
),
)
self.params.insert(
len(self.params),
click.core.Option(
("--ngc-api-key",),
type=str,
prompt=colorize_prompt(
"* NGC API Key (can be obtained at https://ngc.nvidia.com/setup/api-key)"
),
default=os.environ.get("NGC_API_KEY", ""),
show_default='"NGC_API_KEY" environment variable',
help="NGC API Key (can be obtained at https://ngc.nvidia.com/setup/api-key)",
callback=DeployCommand.ngc_api_key_callback,
),
)
self.params.insert(
len(self.params),
click.core.Option(
("--ngc-api-key-check/--no-ngc-api-key-check",),
default=True,
help="Skip NGC API key validity check.",
),
)
self.params.insert(
len(self.params),
click.core.Option(
("--vnc-password",),
default=lambda: pwgen(10),
help="Password for VNC access to DRIVE Sim/Isaac Sim/etc.",
show_default="<randomly generated>",
),
)
self.params.insert(
len(self.params),
click.core.Option(
("--system-user-password",),
default=lambda: pwgen(10),
help="System user password",
show_default="<randomly generated>",
),
)
self.params.insert(
len(self.params),
click.core.Option(
("--ssh-port",),
default=config["default_ssh_port"],
help="SSH port for connecting to the deployed machines.",
show_default=True,
),
)
# --upload/--no-upload
self.params.insert(
len(self.params),
click.core.Option(
("--upload/--no-upload",),
prompt=False,
default=True,
show_default=True,
help=f"Upload user data from \"{config['uploads_dir']}\" to cloud "
+ f"instances (to \"{config['default_remote_uploads_dir']}\")?",
),
)
default_nucleus_admin_password = pwgen(10)
# --omniverse-user
self.params.insert(
len(self.params),
click.core.Option(
("--omniverse-user",),
default=config["default_omniverse_user"],
help="Username for accessing content on the Nucleus server.",
show_default=True,
),
)
# --omniverse-password
self.params.insert(
len(self.params),
click.core.Option(
("--omniverse-password",),
default=default_nucleus_admin_password,
help="Password for accessing content on the Nucleus server.",
show_default="<randomly generated>",
),
)
| 11,460 | Python | 29.975676 | 113 | 0.494503 |
NVIDIA-Omniverse/IsaacSim-Automator/src/python/ngc.test.py | #!/usr/bin/env python3
# region copyright
# Copyright 2023 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# endregion
import os
import unittest
from src.python.ngc import check_ngc_access
class Test_NGC_Key_Validation(unittest.TestCase):
INVALID_KEY = "__invalid__"
VALID_KEY = os.environ.get("NGC_API_KEY", "__none__")
def test_invalid_key(self):
"""Test invalid key"""
r = check_ngc_access(self.INVALID_KEY)
self.assertEqual(r, 100)
def test_valid_key(self):
"""Test valid key (should be set in NGC_API_KEY env var)"""
if "__none__" == self.VALID_KEY:
self.skipTest("No NGC_API_KEY env var set")
return
r = check_ngc_access(self.VALID_KEY)
self.assertEqual(r, 0)
if __name__ == "__main__":
unittest.main()
| 1,337 | Python | 27.468085 | 74 | 0.672401 |
NVIDIA-Omniverse/IsaacSim-Automator/src/tests/deployer.test.py | #!/usr/bin/env python3
import unittest
from src.python.config import c
from src.python.deployer import Deployer
from pathlib import Path
class Test_Deployer(unittest.TestCase):
def setUp(self):
self.config = c
self.config["state_dir"] = f"{c['tests_dir']}/res/state"
self.deployer = Deployer(
params={
"debug": False,
"prefix": "isa",
"from_image": False,
"deployment_name": "test-1",
"existing": "ask",
"region": "us-east-1",
"isaac": True,
"isaac_instance_type": "g5.2xlarge",
"isaac_image": "nvcr.io/nvidia/isaac-sim:2022.2.0",
"ngc_api_key": "__ngc_api_key__",
"ngc_api_key_check": True,
"vnc_password": "__vnc_password__",
"omniverse_user": "ovuser",
"omniverse_password": "__omniverse_password__",
"ssh_port": 22,
"upload": True,
"aws_access_key_id": "__aws_access_key_id__",
"aws_secret_access_key": "__aws_secret_access_key__",
},
config=self.config,
)
def tearDown(self):
self.deployer = None
def test_output_deployment_info(self):
self.deployer.output_deployment_info(print_text=False)
file_generated = f"{self.config['state_dir']}/test-1/info.txt"
file_expected = f"{self.config['state_dir']}/test-1/info.expected.txt"
file_generated = Path(file_generated).read_text()
file_expected = Path(file_expected).read_text()
self.assertEqual(file_generated, file_expected)
if __name__ == "__main__":
unittest.main()
| 1,760 | Python | 29.894736 | 78 | 0.526136 |
NVIDIA-Omniverse/synthetic-data-examples/README.md | # Synthetic Data Examples
This public repository is for examples of the generation and/or use of synthetic data, primarily using tools like [NVIDIA Omniverse](https://www.nvidia.com/en-us/omniverse/), [Omniverse Replicator](https://docs.omniverse.nvidia.com/prod_extensions/prod_extensions/ext_replicator.html), [NVIDIA Tao](https://developer.nvidia.com/tao-toolkit), and [NVIDIA NGC](https://www.nvidia.com/en-us/gpu-cloud/).
## Synthetic Data Blogs & Repositories
* [Blog - How to Train Autonomous Mobile Robots to Detect Warehouse Pallet Jacks](https://developer.nvidia.com/blog/how-to-train-autonomous-mobile-robots-to-detect-warehouse-pallet-jacks-using-synthetic-data/)
[GitHub](https://github.com/NVIDIA-AI-IOT/synthetic_data_generation_training_workflow)
* [Blog - Developing a Pallet Detection Model Using OpenUSD and Synthetic Data](https://developer.nvidia.com/blog/developing-a-pallet-detection-model-using-openusd-and-synthetic-data/)
[GitHub](https://github.com/NVIDIA-AI-IOT/sdg_pallet_model)
| 1,020 | Markdown | 67.066662 | 400 | 0.781373 |
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator/README.md | # Omniverse Replicator Examples
Code here requires the installation of[NVIDIA Omniverse](https://www.nvidia.com/en-us/omniverse/) and [Omniverse Replicator](https://docs.omniverse.nvidia.com/prod_extensions/prod_extensions/ext_replicator.html).
| 246 | Markdown | 60.749985 | 212 | 0.808943 |
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator/snippets/replicator_trigger_intervals.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
#
# Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import omni.replicator.core as rep
with rep.new_layer():
camera = rep.create.camera(position=(0, 500, 1000), look_at=(0, 0, 0))
# Create simple shapes to manipulate
plane = rep.create.plane(
semantics=[("class", "plane")], position=(0, -100, 0), scale=(100, 1, 100)
)
spheres = rep.create.sphere(
semantics=[("class", "sphere")], position=(0, 0, 100), count=6
)
# Modify the position every 5 frames
with rep.trigger.on_frame(num_frames=10, interval=5):
with spheres:
rep.modify.pose(
position=rep.distribution.uniform((-300, 0, -300), (300, 0, 300)),
scale=rep.distribution.uniform(0.1, 2),
)
# Modify color every frame for 50 frames
with rep.trigger.on_frame(num_frames=50):
with spheres:
rep.randomizer.color(
colors=rep.distribution.normal((0.1, 0.1, 0.1), (1.0, 1.0, 1.0))
)
render_product = rep.create.render_product(camera, (512, 512))
writer = rep.WriterRegistry.get("BasicWriter")
writer.initialize(
output_dir="trigger_intervals",
rgb=True,
)
writer.attach([render_product])
| 2,854 | Python | 41.61194 | 84 | 0.709881 |
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator/snippets/replicator_light_modification.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
#
# Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
This snippet shows how to modify attributes on prims that Replicator
may not have a direct functional mapping for.
"""
import omni.replicator.core as rep
with rep.new_layer():
camera = rep.create.camera(position=(0, 500, 1000), look_at=(0, 0, 0))
# Create simple shapes to manipulate
plane = rep.create.plane(
semantics=[("class", "plane")], position=(0, -100, 0), scale=(100, 1, 100)
)
cubes = rep.create.cube(
semantics=[("class", "cube")],
position=rep.distribution.uniform((-300, 0, -300), (300, 0, 300)),
count=6,
)
spheres = rep.create.sphere(
semantics=[("class", "sphere")],
position=rep.distribution.uniform((-300, 0, -300), (300, 0, 300)),
count=6,
)
lights = rep.create.light(
light_type="Sphere",
intensity=rep.distribution.normal(500, 35000),
position=rep.distribution.uniform((-300, 300, -300), (300, 1000, 300)),
scale=rep.distribution.uniform(50, 100),
count=3,
)
with rep.trigger.on_frame(num_frames=10):
with lights:
rep.modify.pose(
position=rep.distribution.uniform((-300, 300, -300), (300, 1000, 300))
)
rep.modify.attribute("intensity", rep.distribution.uniform(1.0, 50000.0))
rep.modify.attribute(
"color", rep.distribution.normal((0.2, 0.2, 0.2), (1.0, 1.0, 1.0))
)
| 3,125 | Python | 43.028168 | 86 | 0.69056 |
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator/snippets/replicator_multiple_semantic_classes.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
#
# Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import omni.replicator.core as rep
with rep.new_layer():
sphere = rep.create.sphere(semantics=[("class", "sphere")], position=(0, 100, 100))
cube = rep.create.cube(semantics=[("class2", "cube")], position=(200, 200, 100))
plane = rep.create.plane(semantics=[("class3", "plane")], scale=10)
def get_shapes():
shapes = rep.get.prims(semantics=[("class", "cube"), ("class", "sphere")])
with shapes:
rep.modify.pose(
position=rep.distribution.uniform((-500, 50, -500), (500, 50, 500)),
rotation=rep.distribution.uniform((0, -180, 0), (0, 180, 0)),
scale=rep.distribution.normal(1, 0.5),
)
return shapes.node
with rep.trigger.on_frame(num_frames=2):
rep.randomizer.register(get_shapes)
# Setup Camera
camera = rep.create.camera(position=(500, 500, 500), look_at=(0, 0, 0))
render_product = rep.create.render_product(camera, (512, 512))
writer = rep.WriterRegistry.get("BasicWriter")
writer.initialize(
output_dir="semantics_classes",
rgb=True,
semantic_segmentation=True,
colorize_semantic_segmentation=True,
semantic_types=["class", "class2", "class3"],
)
writer.attach([render_product])
rep.orchestrator.run()
| 2,956 | Python | 41.855072 | 87 | 0.715832 |
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator/snippets/replicator_scatter_multi_trigger.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
This snippet shows how to setup multiple independent triggers that happen
at different intervals in the simulation.
"""
import omni.graph.core as og
import omni.replicator.core as rep
# A light to see
distance_light = rep.create.light(rotation=(-45, 0, 0), light_type="distant")
# Create a plane to sample on
plane_samp = rep.create.plane(scale=4, rotation=(20, 0, 0))
# Create a larger sphere to sample on the surface of
sphere_samp = rep.create.sphere(scale=2.4, position=(0, 100, -180))
# Create a larger cylinder we do not want to collide with
cylinder = rep.create.cylinder(semantics=[("class", "cylinder")], scale=(2, 1, 2))
def randomize_spheres():
# create small spheres to sample inside the plane
spheres = rep.create.sphere(scale=0.4, count=60)
# scatter small spheres
with spheres:
rep.randomizer.scatter_2d(
surface_prims=[plane_samp, sphere_samp],
no_coll_prims=[cylinder],
check_for_collisions=True,
)
# Add color to small spheres
rep.randomizer.color(
colors=rep.distribution.uniform((0.2, 0.2, 0.2), (1, 1, 1))
)
return spheres.node
rep.randomizer.register(randomize_spheres)
# Trigger will execute 5 times, every-other-frame (interval=2)
with rep.trigger.on_frame(num_frames=5, interval=2):
rep.randomizer.randomize_spheres()
# Trigger will execute 10 times, once every frame
with rep.trigger.on_frame(num_frames=10):
with cylinder:
rep.modify.visibility(rep.distribution.sequence([True, False]))
og.Controller.evaluate_sync() # Only for snippet demonstration preview, not needed for production
rep.orchestrator.preview() # Only for snippet demonstration preview, not needed for production
rp = rep.create.render_product("/OmniverseKit_Persp", (1024, 768))
# Initialize and attach writer
writer = rep.WriterRegistry.get("BasicWriter")
writer.initialize(output_dir="scatter_example", rgb=True)
writer.attach([rp])
rep.orchestrator.run()
| 3,612 | Python | 40.056818 | 98 | 0.745847 |
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator/snippets/replicator_writer_segmentation_colors.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
#
# Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
A snippet showing to how create a custom writer to output specific colors
in the semantic annotator output image.
"""
import omni.replicator.core as rep
from omni.replicator.core import Writer, BackendDispatch, WriterRegistry
class MyWriter(Writer):
def __init__(self, output_dir: str):
self._frame_id = 0
self.backend = BackendDispatch({"paths": {"out_dir": output_dir}})
self.annotators = ["rgb", "semantic_segmentation"]
# Dictionary mapping of label to RGBA color
self.CUSTOM_LABELS = {
"unlabelled": (0, 0, 0, 0),
"sphere": (128, 64, 128, 255),
"cube": (244, 35, 232, 255),
"plane": (102, 102, 156, 255),
}
def write(self, data):
render_products = [k for k in data.keys() if k.startswith("rp_")]
self._write_rgb(data, "rgb")
self._write_segmentation(data, "semantic_segmentation")
self._frame_id += 1
def _write_rgb(self, data, annotator: str):
# Save the rgb data under the correct path
rgb_file_path = f"rgb_{self._frame_id}.png"
self.backend.write_image(rgb_file_path, data[annotator])
def _write_segmentation(self, data, annotator: str):
seg_filepath = f"seg_{self._frame_id}.png"
semantic_seg_data_colorized = rep.tools.colorize_segmentation(
data[annotator]["data"],
data[annotator]["info"]["idToLabels"],
mapping=self.CUSTOM_LABELS,
)
self.backend.write_image(seg_filepath, semantic_seg_data_colorized)
def on_final_frame(self):
self.backend.sync_pending_paths()
# Register new writer
WriterRegistry.register(MyWriter)
# Create a new layer for our work to be performed in.
# This is a good habit to develop for later when working on existing Usd scenes
with rep.new_layer():
light = rep.create.light(light_type="dome")
# Create a simple camera with a position and a point to look at
camera = rep.create.camera(position=(0, 500, 1000), look_at=(0, 0, 0))
# Create some simple shapes to manipulate
plane = rep.create.plane(
semantics=[("class", "plane")], position=(0, -100, 0), scale=(100, 1, 100)
)
torus = rep.create.torus(position=(200, 0, 100)) # Torus will be unlabeled
sphere = rep.create.sphere(semantics=[("class", "sphere")], position=(0, 0, 100))
cube = rep.create.cube(semantics=[("class", "cube")], position=(-200, 0, 100))
# Randomize position and scale of each object on each frame
with rep.trigger.on_frame(num_frames=10):
# Creating a group so that our modify.pose operation works on all the shapes at once
with rep.create.group([torus, sphere, cube]):
rep.modify.pose(
position=rep.distribution.uniform((-300, 0, -300), (300, 0, 300)),
scale=rep.distribution.uniform(0.1, 2),
)
# Initialize render product and attach a writer
render_product = rep.create.render_product(camera, (1024, 1024))
writer = rep.WriterRegistry.get("MyWriter")
writer.initialize(output_dir="myWriter_output")
writer.attach([render_product])
rep.orchestrator.run() # Run the simulation
| 4,866 | Python | 43.245454 | 92 | 0.690506 |
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator/snippets/replicator_remove_semantics.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
#
# Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import omni.graph.core as og
import omni.replicator.core as rep
from omni.usd._impl.utils import get_prim_at_path
from pxr import Semantics
from semantics.schema.editor import remove_prim_semantics
# Setup simple scene
with rep.new_layer():
# Simple scene setup
camera = rep.create.camera(position=(0, 500, 1000), look_at=(0, 0, 0))
# Create simple shapes to manipulate
plane = rep.create.plane(
semantics=[("class", "plane")], position=(0, -100, 0), scale=(100, 1, 100)
)
cubes = rep.create.cube(
semantics=[("class", "cube")],
position=rep.distribution.uniform((-300, 0, -300), (300, 0, 300)),
count=6,
)
spheres = rep.create.sphere(
semantics=[("class", "sphere")],
position=rep.distribution.uniform((-300, 0, -300), (300, 0, 300)),
count=6,
)
# Get prims to remove semantics on - Execute this first by itself
my_spheres = rep.get.prims(semantics=[("class", "sphere")])
og.Controller.evaluate_sync() # Trigger an OmniGraph evaluation of the graph to set the values
get_targets = rep.utils.get_node_targets(my_spheres.node, "outputs_prims")
print(get_targets)
# [Sdf.Path('/Replicator/Sphere_Xform'), Sdf.Path('/Replicator/Sphere_Xform_01'), Sdf.Path('/Replicator/Sphere_Xform_02'), Sdf.Path('/Replicator/Sphere_Xform_03'), Sdf.Path('/Replicator/Sphere_Xform_04'), Sdf.Path('/Replicator/Sphere_Xform_05')]
# Loop through each prim_path and remove all semantic data
for prim_path in get_targets:
prim = get_prim_at_path(prim_path)
# print(prim.HasAPI(Semantics.SemanticsAPI))
result = remove_prim_semantics(prim) # To remove all semantics
# result = remove_prim_semantics(prim, label_type='class') # To remove only 'class' semantics
print(result)
| 3,457 | Python | 45.729729 | 245 | 0.731559 |
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator/snippets/replcator_clear_layer.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
#
# Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import omni.usd
stage = omni.usd.get_context().get_stage()
for layer in stage.GetLayerStack():
if layer.GetDisplayName() == "test":
# del layer
layer.Clear()
| 1,868 | Python | 46.923076 | 84 | 0.770343 |
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator/snippets/replicator_time_intervals.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
#
# Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import omni.replicator.core as rep
with rep.new_layer():
camera = rep.create.camera(position=(0, 500, 1000), look_at=(0, 0, 0))
# Create simple shapes to manipulate
plane = rep.create.plane(
semantics=[("class", "plane")], position=(0, -100, 0), scale=(100, 1, 100)
)
spheres = rep.create.sphere(
semantics=[("class", "sphere")], position=(0, 0, 100), count=6
)
with rep.trigger.on_time(num=10, interval=5):
with spheres:
rep.modify.pose(
position=rep.distribution.uniform((-300, 0, -300), (300, 0, 300)),
scale=rep.distribution.uniform(0.1, 2),
)
with rep.trigger.on_time(num=50):
with spheres:
rep.randomizer.color(
colors=rep.distribution.normal((0.1, 0.1, 0.1), (1.0, 1.0, 1.0))
)
render_product = rep.create.render_product(camera, (512, 512))
writer = rep.WriterRegistry.get("BasicWriter")
writer.initialize(
output_dir="time_intervals",
rgb=True,
)
writer.attach([render_product])
| 2,748 | Python | 41.953124 | 84 | 0.708879 |
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator/snippets/replicator_annotator_segmentation.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
#
# Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
This is an example of how to view annotator data if needed.
"""
import asyncio
import omni.replicator.core as rep
import omni.syntheticdata as sd
async def test_semantics():
cone = rep.create.cone(semantics=[("prim", "cone")], position=(100, 0, 0))
sphere = rep.create.sphere(semantics=[("prim", "sphere")], position=(-100, 0, 0))
invalid_type = rep.create.cube(semantics=[("shape", "boxy")], position=(0, 100, 0))
# Setup semantic filter
# sd.SyntheticData.Get().set_instance_mapping_semantic_filter("prim:*")
cam = rep.create.camera(position=(500, 500, 500), look_at=(0, 0, 0))
rp = rep.create.render_product(cam, (1024, 512))
segmentation = rep.AnnotatorRegistry.get_annotator("semantic_segmentation")
segmentation.attach(rp)
# step_async() tells Omniverse to update, otherwise the annoation buffer could be empty
await rep.orchestrator.step_async()
data = segmentation.get_data()
print(data)
# Example Output:
# {
# "data": array(
# [
# [0, 0, 0, ..., 0, 0, 0],
# [0, 0, 0, ..., 0, 0, 0],
# [0, 0, 0, ..., 0, 0, 0],
# ...,
# [0, 0, 0, ..., 0, 0, 0],
# [0, 0, 0, ..., 0, 0, 0],
# [0, 0, 0, ..., 0, 0, 0],
# ],
# dtype=uint32,
# ),
# "info": {
# "_uniqueInstanceIDs": array([1, 1, 1], dtype=uint8),
# "idToLabels": {
# "0": {"class": "BACKGROUND"},
# "2": {"prim": "cone"},
# "3": {"prim": "sphere"},
# "4": {"shape": "boxy"},
# },
# },
# }
asyncio.ensure_future(test_semantics())
| 3,332 | Python | 38.211764 | 91 | 0.653962 |
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator/snippets/replicator_multi_object_visibility_toggle.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
#
# Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""This will create a group from a list of objects and
1. Render all the objects together
2. Toggle sole visiblity for each object & render
3. Randomize pose for all objects, repeat
This can be useful for training on object occlusions.
"""
import omni.replicator.core as rep
NUM_POSE_RANDOMIZATIONS = 10
# Make a list-of-lists of True/False for each object
# In this example of 3 objects:
# [[True, True, True]
# [True, False, False]
# [False, True, False]
# [False, False, True]]
def make_visibility_lists(num_objects):
visib = []
# Make an all-visible first pass
visib.append(tuple([True for x in range(num_objects)]))
# List to toggle one object visible at a time
for x in range(num_objects):
sub_vis = []
for i in range(num_objects):
if x == i:
sub_vis.append(True)
else:
sub_vis.append(False)
visib.append(tuple(sub_vis))
return visib
with rep.new_layer():
# Setup camera and simple light
camera = rep.create.camera(position=(0, 500, 1000), look_at=(0, 0, 0))
light = rep.create.light(rotation=(-45, 45, 0))
# Create simple shapes to manipulate
plane = rep.create.plane(
semantics=[("class", "plane")], position=(0, -100, 0), scale=(100, 1, 100)
)
torus = rep.create.torus(semantics=[("class", "torus")], position=(200, 0, 100))
sphere = rep.create.sphere(semantics=[("class", "sphere")], position=(0, 0, 100))
cube = rep.create.cube(semantics=[("class", "cube")], position=(-200, 0, 100))
# Create a group of the objects we will be manipulating
# Leaving-out camera, light, and plane from visibility toggling and pose randomization
object_group = rep.create.group([torus, sphere, cube])
# Get the number of objects to toggle, can work with any number of objects
num_objects_to_toggle = len(object_group.get_output_prims()["prims"])
# Create our lists-of-lists for visibility
visibility_sequence = make_visibility_lists(num_objects_to_toggle)
# Trigger to toggle visibility one at a time
with rep.trigger.on_frame(
max_execs=(num_objects_to_toggle + 1) * NUM_POSE_RANDOMIZATIONS
):
with object_group:
rep.modify.visibility(rep.distribution.sequence(visibility_sequence))
# Trigger to randomize position and scale, interval set to number of objects +1(1 extra for the "all visible" frame)
with rep.trigger.on_frame(
max_execs=NUM_POSE_RANDOMIZATIONS, interval=num_objects_to_toggle + 1
):
with object_group:
rep.modify.pose(
position=rep.distribution.uniform((-300, 0, -300), (300, 0, 300)),
scale=rep.distribution.uniform(0.1, 2),
)
# Initialize render product and attach writer
render_product = rep.create.render_product(camera, (512, 512))
writer = rep.WriterRegistry.get("BasicWriter")
writer.initialize(
output_dir="toggle_multi_visibility",
rgb=True,
semantic_segmentation=True,
)
writer.attach([render_product])
rep.orchestrator.run()
| 4,759 | Python | 40.391304 | 120 | 0.703929 |
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator/snippets/replicator_filter_semantics.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
#
# Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
This snippet shows how to use semantic filtering to remove labels or entire prims
with specific semantic labels from being output to the semantic annotator
https://docs.omniverse.nvidia.com/prod_extensions/prod_extensions/ext_replicator/semantics_schema_editor.html
"""
import omni.replicator.core as rep
from omni.syntheticdata import SyntheticData
with rep.new_layer():
camera = rep.create.camera(position=(0, 500, 1000), look_at=(0, 0, 0))
# Create simple shapes to manipulate
plane = rep.create.plane(
semantics=[("class", "plane")], position=(0, -100, 0), scale=(100, 1, 100)
)
cubes = rep.create.cube(
semantics=[("class", "cube")],
position=rep.distribution.uniform((-300, 0, -300), (300, 0, 300)),
count=6,
)
spheres = rep.create.sphere(
semantics=[("class", "sphere")],
position=rep.distribution.uniform((-300, 0, -300), (300, 0, 300)),
count=6,
)
# 10 Frames of randomizations, randomizing colors for spheres and cubes independent of each other
with rep.trigger.on_frame(num_frames=10):
with cubes:
rep.randomizer.color(
colors=rep.distribution.normal((0.2, 0.2, 0.2), (1.0, 1.0, 1.0))
)
with spheres:
rep.randomizer.color(
colors=rep.distribution.normal((0.2, 0.2, 0.2), (1.0, 1.0, 1.0))
)
render_product = rep.create.render_product(camera, (512, 512))
writer = rep.WriterRegistry.get("BasicWriter")
writer.initialize(
output_dir="filter_semantics",
rgb=True,
semantic_segmentation=True,
colorize_semantic_segmentation=True,
)
# Filter class:sphere objects (_after_ BasicWriter is initialized)
# https://docs.omniverse.nvidia.com/prod_extensions/prod_extensions/ext_replicator/semantics_schema_editor.html
SyntheticData.Get().set_instance_mapping_semantic_filter("class:!sphere")
writer.attach([render_product])
rep.orchestrator.run()
| 3,652 | Python | 43.012048 | 111 | 0.720701 |
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator/snippets/scattering/scatter2D_multi_surface.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import omni.graph.core as og
import omni.replicator.core as rep
# A light to see
distance_light = rep.create.light(rotation=(-45, 0, 0), light_type="distant")
# Create a plane to sample on
plane_samp = rep.create.plane(scale=4, rotation=(20, 0, 0))
# Create a larger sphere to sample on the surface of
sphere_samp = rep.create.sphere(scale=2.4, position=(0, 100, -180))
def randomize_spheres():
# create small spheres to sample inside the plane
spheres = rep.create.sphere(scale=0.4, count=40)
# scatter small spheres
with spheres:
rep.randomizer.scatter_2d(
surface_prims=[plane_samp, sphere_samp], check_for_collisions=True
)
# Add color to small spheres
rep.randomizer.color(
colors=rep.distribution.uniform((0.2, 0.2, 0.2), (1, 1, 1))
)
return spheres.node
rep.randomizer.register(randomize_spheres)
with rep.trigger.on_frame(num_frames=10):
rep.randomizer.randomize_spheres()
og.Controller.evaluate_sync()
rep.orchestrator.preview()
| 2,652 | Python | 39.196969 | 98 | 0.749246 |
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator/snippets/scattering/scatter2D_basic.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import omni.graph.core as og
import omni.replicator.core as rep
# create a plane to sample on
plane_samp = rep.create.plane(scale=4, rotation=(20, 0, 0))
def randomize_spheres():
# create small spheres to sample inside the plane
spheres = rep.create.sphere(scale=0.4, count=30)
# randomize
with spheres:
rep.randomizer.scatter_2d(plane_samp, check_for_collisions=True)
# Add color to small spheres
rep.randomizer.color(
colors=rep.distribution.uniform((0.2, 0.2, 0.2), (1, 1, 1))
)
return spheres.node
rep.randomizer.register(randomize_spheres)
with rep.trigger.on_frame(num_frames=10):
rep.randomizer.randomize_spheres()
og.Controller.evaluate_sync()
rep.orchestrator.preview()
| 2,371 | Python | 39.896551 | 98 | 0.759595 |
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator/snippets/scattering/scatter2D_avoid_objects.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import omni.graph.core as og
import omni.replicator.core as rep
# A light to see
distance_light = rep.create.light(rotation=(-45, 0, 0), light_type="distant")
# Create a plane to sample on
plane_samp = rep.create.plane(scale=4, rotation=(20, 0, 0))
# Create a larger sphere to sample on the surface of
sphere_samp = rep.create.sphere(scale=2.4, position=(0, 100, -180))
# Create a larger cylinder we do not want to collide with
cylinder = rep.create.cylinder(semantics=[("class", "cylinder")], scale=(2, 1, 2))
def randomize_spheres():
# create small spheres to sample inside the plane
spheres = rep.create.sphere(scale=0.4, count=40)
# scatter small spheres
with spheres:
rep.randomizer.scatter_2d(
surface_prims=[plane_samp, sphere_samp],
no_coll_prims=[cylinder],
check_for_collisions=True,
)
# Add color to small spheres
rep.randomizer.color(
colors=rep.distribution.uniform((0.2, 0.2, 0.2), (1, 1, 1))
)
return spheres.node
rep.randomizer.register(randomize_spheres)
with rep.trigger.on_frame(num_frames=10):
rep.randomizer.randomize_spheres()
og.Controller.evaluate_sync()
rep.orchestrator.preview()
| 2,845 | Python | 39.084506 | 98 | 0.740246 |
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator/snippets/scattering/scattering.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import omni.graph.core as og
import omni.replicator.core as rep
# create a plane to sample on
plane_samp = rep.create.plane(scale=4, rotation=(20, 0, 0))
def randomize_spheres():
# create small spheres to sample inside the plane
spheres = rep.create.sphere(scale=0.4, count=30)
# randomize
with spheres:
rep.randomizer.scatter_2d(plane_samp, check_for_collisions=False)
# Add color to small spheres
rep.randomizer.color(
colors=rep.distribution.uniform((0.2, 0.2, 0.2), (1, 1, 1))
)
return spheres.node
rep.randomizer.register(randomize_spheres)
with rep.trigger.on_frame(num_frames=10):
rep.randomizer.randomize_spheres()
rep.orchestrator.preview()
og.Controller.evaluate_sync()
rep.orchestrator.preview()
| 2,399 | Python | 39.677965 | 98 | 0.760317 |
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator/snippets/scattering/scatter2D_with_limits.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import omni.replicator.core as rep
# A light to see
distance_light = rep.create.light(rotation=(-45, 0, 0), light_type="distant")
# Create a plane to sample on
plane_samp = rep.create.plane(scale=4, rotation=(20, 0, 0))
# create a larger sphere to sample on the surface of
sphere_samp = rep.create.sphere(scale=2.4, position=(0, 100, -180))
def randomize_spheres():
# create small spheres to sample inside the plane
spheres = rep.create.sphere(scale=0.4, count=60)
# scatter small spheres
with spheres:
rep.randomizer.scatter_2d(
[plane_samp, sphere_samp],
min_samp=(None, None, None),
# Small spheres will not go beyond 0 in X, 110 in Y, 30 in Z world space
max_samp=(0, 110, 30),
check_for_collisions=False,
)
# Add color to small spheres
rep.randomizer.color(
colors=rep.distribution.uniform((0.2, 0.2, 0.2), (1, 1, 1))
)
return spheres.node
rep.randomizer.register(randomize_spheres)
with rep.trigger.on_frame(num_frames=10):
rep.randomizer.randomize_spheres()
og.Controller.evaluate_sync()
rep.orchestrator.preview()
| 2,784 | Python | 39.362318 | 98 | 0.731681 |
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator/snippets/surface_scratches/scratches_randomization.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
#
# Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from pathlib import Path
import carb
import omni.replicator.core as rep
import omni.usd
from pxr import Sdf, UsdGeom
"""
Instructions:
Open the example scene file "scratches_randomization.usda",
located adjacent to this script, in Omniverse prior to using this script
"""
# Get the current Usd "stage". This is where all the scene objects live
stage = omni.usd.get_context().get_stage()
with rep.new_layer():
camera = rep.create.camera(position=(-30, 38, 60), look_at=(0, 0, 0))
render_product = rep.create.render_product(camera, (1280, 720))
# Get Scene cube
cube_prim = stage.GetPrimAtPath("/World/RoundedCube2/Cube/Cube")
# Set the primvars on the cubes once
primvars_api = UsdGeom.PrimvarsAPI(cube_prim)
primvars_api.CreatePrimvar("random_color", Sdf.ValueTypeNames.Float3).Set(
(1.0, 1.0, 1.0)
)
primvars_api.CreatePrimvar("random_intensity", Sdf.ValueTypeNames.Float3).Set(
(1.0, 1.0, 1.0)
)
def change_colors():
# Change color primvars
cubes = rep.get.prims(
path_pattern="/World/RoundedCube2/Cube/Cube", prim_types=["Mesh"]
)
with cubes:
rep.modify.attribute(
"primvars:random_color",
rep.distribution.uniform((0.0, 0.0, 0.0), (1.0, 1.0, 1.0)),
attribute_type="float3",
)
rep.modify.attribute(
"primvars:random_intensity",
rep.distribution.uniform((0.0, 0.0, 0.0), (10.0, 10.0, 10.0)),
attribute_type="float3",
)
return cubes.node
rep.randomizer.register(change_colors)
# Setup randomization of colors, different each frame
with rep.trigger.on_frame(num_frames=10):
rep.randomizer.change_colors()
# (optional) Write output images to disk
writer = rep.WriterRegistry.get("BasicWriter")
writer.initialize(
output_dir="~/replicator_examples/box_scratches",
rgb=True,
bounding_box_2d_tight=True,
semantic_segmentation=True,
distance_to_image_plane=True,
)
writer.attach([render_product])
carb.log_info("scratches randomization complete")
| 3,884 | Python | 37.465346 | 84 | 0.697477 |
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator/tutorials/fall_2022_DLI/22_Change_Textures.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
#
# Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import omni.replicator.core as rep
# create new objects to be used in the dataset
with rep.new_layer():
sphere = rep.create.sphere(
semantics=[("class", "sphere")], position=(0, 100, 100), count=5
)
cube = rep.create.cube(
semantics=[("class", "cube")], position=(200, 200, 100), count=5
)
cone = rep.create.cone(
semantics=[("class", "cone")], position=(200, 400, 200), count=10
)
cylinder = rep.create.cylinder(
semantics=[("class", "cylinder")], position=(200, 100, 200), count=5
)
# create new camera & render product and attach to camera
camera = rep.create.camera(position=(0, 0, 1000))
render_product = rep.create.render_product(camera, (1024, 1024))
# create plane if needed (but unused here)
plane = rep.create.plane(scale=10)
# function to get shapes that you've created above, via their semantic labels
def get_shapes():
shapes = rep.get.prims(
semantics=[
("class", "cube"),
("class", "sphere"),
("class", "cone"),
("class", "cylinder"),
]
)
with shapes:
# assign textures to the different objects
rep.randomizer.texture(
textures=[
"omniverse://localhost/NVIDIA/Materials/vMaterials_2/Ground/textures/aggregate_exposed_diff.jpg",
"omniverse://localhost/NVIDIA/Materials/vMaterials_2/Ground/textures/gravel_track_ballast_diff.jpg",
"omniverse://localhost/NVIDIA/Materials/vMaterials_2/Ground/textures/gravel_track_ballast_multi_R_rough_G_ao.jpg",
"omniverse://localhost/NVIDIA/Materials/vMaterials_2/Ground/textures/rough_gravel_rough.jpg",
]
)
# modify pose and distribution
rep.modify.pose(
position=rep.distribution.uniform((-500, 50, -500), (500, 50, 500)),
rotation=rep.distribution.uniform((0, -180, 0), (0, 180, 0)),
scale=rep.distribution.normal(1, 0.5),
)
return shapes.node
# register the get shapes function as a randomizer function
rep.randomizer.register(get_shapes)
# Setup randomization. 100 variations here from 'num_frames'
with rep.trigger.on_frame(num_frames=100):
rep.randomizer.get_shapes()
# Initialize and attach writer
writer = rep.WriterRegistry.get("BasicWriter")
writer.initialize(output_dir="~/replicator_examples/dli_example_22", rgb=True)
writer.attach([render_product])
rep.orchestrator.run()
| 4,321 | Python | 43.556701 | 134 | 0.672992 |
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator/tutorials/fall_2022_DLI/03_replicator_advanced.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
#
# Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import omni.replicator.core as rep
with rep.new_layer():
def dome_lights():
lights = rep.create.light(
light_type="Dome",
rotation=(270, 0, 0),
texture=rep.distribution.choice(
[
"omniverse://localhost/NVIDIA/Assets/Skies/Indoor/ZetoCGcom_ExhibitionHall_Interior1.hdr",
"omniverse://localhost/NVIDIA/Assets/Skies/Indoor/ZetoCG_com_WarehouseInterior2b.hdr",
]
),
)
return lights.node
rep.randomizer.register(dome_lights)
conference_tables = (
"omniverse://localhost/NVIDIA/Assets/ArchVis/Commercial/Conference/"
)
# create randomizer function conference table assets.
# This randomization includes placement and rotation of the assets on the surface.
def env_conference_table(size=5):
confTable = rep.randomizer.instantiate(
rep.utils.get_usd_files(conference_tables, recursive=False),
size=size,
mode="scene_instance",
)
with confTable:
rep.modify.pose(
position=rep.distribution.uniform((-500, 0, -500), (500, 0, 500)),
rotation=rep.distribution.uniform((-90, -180, 0), (-90, 180, 0)),
)
return confTable.node
# Register randomization
rep.randomizer.register(env_conference_table)
# Setup camera and attach it to render product
camera = rep.create.camera()
render_product = rep.create.render_product(camera, resolution=(1024, 1024))
surface = rep.create.disk(scale=100, visible=False)
# trigger on frame for an interval
with rep.trigger.on_frame(5):
rep.randomizer.env_conference_table(2)
rep.randomizer.dome_lights()
with camera:
rep.modify.pose(
position=rep.distribution.uniform((-500, 200, 1000), (500, 500, 1500)),
look_at=surface,
)
# Initialize and attach writer
writer = rep.WriterRegistry.get("BasicWriter")
writer.initialize(output_dir="~/replicator_examples/dli_example_3", rgb=True)
writer.attach([render_product])
| 3,860 | Python | 40.074468 | 110 | 0.688083 |
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator/tutorials/fall_2022_DLI/01_hello_replicator.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
#
# Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import omni.replicator.core as rep
# Create a new layer for our work to be performed in.
# This is a good habit to develop for later when working on existing Usd scenes
with rep.new_layer():
# Create a simple camera with a position and a point to look at
camera = rep.create.camera(position=(0, 500, 1000), look_at=(0, 0, 0))
# Create some simple shapes to manipulate
plane = rep.create.plane(
semantics=[("class", "plane")], position=(0, -100, 0), scale=(100, 1, 100)
)
torus = rep.create.torus(semantics=[("class", "torus")], position=(200, 0, 100))
sphere = rep.create.sphere(semantics=[("class", "sphere")], position=(0, 0, 100))
cube = rep.create.cube(semantics=[("class", "cube")], position=(-200, 0, 100))
# Randomize position and scale of each object on each frame
with rep.trigger.on_frame(num_frames=10):
# Creating a group so that our modify.pose operation works on all the shapes at once
with rep.create.group([torus, sphere, cube]):
rep.modify.pose(
position=rep.distribution.uniform((-300, 0, -300), (300, 0, 300)),
scale=rep.distribution.uniform(0.1, 2),
)
# Initialize render product and attach a writer
render_product = rep.create.render_product(camera, (1024, 1024))
writer = rep.WriterRegistry.get("BasicWriter")
writer.initialize(
output_dir="~/replicator_examples/dli_hello_replicator/",
rgb=True,
semantic_segmentation=True,
bounding_box_2d_tight=True,
)
writer.attach([render_product])
rep.orchestrator.run()
| 3,261 | Python | 46.970588 | 92 | 0.726771 |
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator/tutorials/fall_2022_DLI/physics.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
#
# Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import omni.replicator.core as rep
with rep.new_layer():
# Define paths for the character, the props, the environment and the surface where the assets will be scattered in.
PROPS = "omniverse://localhost/NVIDIA/Assets/Isaac/2022.1/Isaac/Props/YCB/Axis_Aligned_Physics"
SURFACE = (
"omniverse://localhost/NVIDIA/Assets/Scenes/Templates/Basic/display_riser.usd"
)
ENVS = "omniverse://localhost/NVIDIA/Assets/Scenes/Templates/Interior/ZetCG_ExhibitionHall.usd"
# Define randomizer function for Base assets. This randomization includes placement and rotation of the assets on the surface.
def env_props(size=50):
instances = rep.randomizer.instantiate(
rep.utils.get_usd_files(PROPS, recursive=True),
size=size,
mode="scene_instance",
)
with instances:
rep.modify.pose(
position=rep.distribution.uniform((-50, 5, -50), (50, 20, 50)),
rotation=rep.distribution.uniform((0, -180, 0), (0, 180, 0)),
scale=100,
)
rep.physics.rigid_body(
velocity=rep.distribution.uniform((-0, 0, -0), (0, 0, 0)),
angular_velocity=rep.distribution.uniform((-0, 0, -100), (0, 0, 0)),
)
return instances.node
# Register randomization
rep.randomizer.register(env_props)
# Setup the static elements
env = rep.create.from_usd(ENVS)
surface = rep.create.from_usd(SURFACE)
with surface:
rep.physics.collider()
# Setup camera and attach it to render product
camera = rep.create.camera()
render_product = rep.create.render_product(camera, resolution=(1024, 1024))
# sphere lights for extra randomization
def sphere_lights(num):
lights = rep.create.light(
light_type="Sphere",
temperature=rep.distribution.normal(6500, 500),
intensity=rep.distribution.normal(35000, 5000),
position=rep.distribution.uniform((-300, -300, -300), (300, 300, 300)),
scale=rep.distribution.uniform(50, 100),
count=num,
)
return lights.node
rep.randomizer.register(sphere_lights)
# trigger on frame for an interval
with rep.trigger.on_time(interval=2, num=10):
rep.randomizer.env_props(10)
rep.randomizer.sphere_lights(10)
with camera:
rep.modify.pose(
position=rep.distribution.uniform((-50, 20, 100), (50, 50, 150)),
look_at=surface,
)
# Initialize and attach writer
writer = rep.WriterRegistry.get("BasicWriter")
writer.initialize(
output_dir="~/replicator_examples/dli_physics",
rgb=True,
bounding_box_2d_tight=True,
)
writer.attach([render_product])
| 4,509 | Python | 41.952381 | 130 | 0.681082 |
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator/tutorials/fall_2022_DLI/02_background_randomization.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
#
# Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import omni.replicator.core as rep
with rep.new_layer():
def dome_lights():
lights = rep.create.light(
light_type="Dome",
rotation=(270, 0, 0),
texture=rep.distribution.choice(
[
"omniverse://localhost/NVIDIA/Assets/Skies/Cloudy/champagne_castle_1_4k.hdr",
"omniverse://localhost/NVIDIA/Assets/Skies/Clear/evening_road_01_4k.hdr",
"omniverse://localhost/NVIDIA/Assets/Skies/Cloudy/kloofendal_48d_partly_cloudy_4k.hdr",
"omniverse://localhost/NVIDIA/Assets/Skies/Clear/qwantani_4k.hdr",
]
),
)
return lights.node
rep.randomizer.register(dome_lights)
torus = rep.create.torus(semantics=[("class", "torus")], position=(0, -200, 100))
# create surface
surface = rep.create.disk(scale=5, visible=False)
# create camera & render product for the scene
camera = rep.create.camera()
render_product = rep.create.render_product(camera, resolution=(1024, 1024))
with rep.trigger.on_frame(num_frames=10, interval=10):
rep.randomizer.dome_lights()
with rep.create.group([torus]):
rep.modify.pose(
position=rep.distribution.uniform((-100, -100, -100), (200, 200, 200)),
scale=rep.distribution.uniform(0.1, 2),
)
with camera:
rep.modify.pose(
position=rep.distribution.uniform((-500, 200, 1000), (500, 500, 1500)),
look_at=surface,
)
# Initialize and attach writer
writer = rep.WriterRegistry.get("BasicWriter")
writer.initialize(output_dir="~/replicator_examples/dli_example_02", rgb=True)
writer.attach([render_product])
# Run Replicator
# rep.orchestrator.run()
| 3,520 | Python | 41.421686 | 107 | 0.685227 |
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator/tutorials/fall_2022_DLI/33_replicator_solution.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
#
# Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import omni.replicator.core as rep
with rep.new_layer():
def dome_lights():
lights = rep.create.light(
light_type="Dome",
rotation=(270, 0, 0),
texture=rep.distribution.choice(
[
"omniverse://localhost/NVIDIA/Assets/Skies/Indoor/ZetoCGcom_ExhibitionHall_Interior1.hdr",
"omniverse://localhost/NVIDIA/Assets/Skies/Indoor/ZetoCG_com_WarehouseInterior2b.hdr",
]
),
)
return lights.node
rep.randomizer.register(dome_lights)
bar_stools = (
"omniverse://localhost/NVIDIA/Assets/ArchVis/Residential/Furniture/BarStools/"
)
reception_tables = (
"omniverse://localhost/NVIDIA/Assets/ArchVis/Commercial/Reception/"
)
chairs = "omniverse://localhost/NVIDIA/Assets/ArchVis/Commercial/Seating/"
normal_tables = "omniverse://localhost/NVIDIA/Assets/ArchVis/Commercial/Tables/"
sofa_set = "omniverse://localhost/NVIDIA/Assets/ArchVis/Residential/Furniture/FurnitureSets/Dutchtown/"
# create randomizer function conference table assets.
# This randomization includes placement and rotation of the assets on the surface.
def env_bar_stools(size=5):
barstool = rep.randomizer.instantiate(
rep.utils.get_usd_files(bar_stools, recursive=False),
size=size,
mode="scene_instance",
)
rep.modify.semantics([("class", "chair_barstool")])
with barstool:
rep.modify.pose(
position=rep.distribution.uniform((-500, 0, -500), (500, 0, 500)),
rotation=rep.distribution.uniform((-90, -180, 0), (-90, 180, 0)),
)
return barstool.node
# Register randomization
rep.randomizer.register(env_bar_stools)
# create randomizer function conference table assets.
# This randomization includes placement and rotation of the assets on the surface.
def env_reception_table(size=5):
receptTable = rep.randomizer.instantiate(
rep.utils.get_usd_files(reception_tables, recursive=False),
size=size,
mode="scene_instance",
)
rep.modify.semantics([("class", "reception_table")])
with receptTable:
rep.modify.pose(
position=rep.distribution.uniform((-500, 0, -500), (500, 0, 500)),
rotation=rep.distribution.uniform((-90, -180, 0), (-90, 180, 0)),
)
return receptTable.node
# Register randomization
rep.randomizer.register(env_reception_table)
# create randomizer function conference table assets.
# This randomization includes placement and rotation of the assets on the surface.
def env_chairs_notable(size=10):
chairsEnv = rep.randomizer.instantiate(
rep.utils.get_usd_files(chairs, recursive=False),
size=size,
mode="scene_instance",
)
rep.modify.semantics([("class", "office_chairs")])
with chairsEnv:
rep.modify.pose(
position=rep.distribution.uniform((-500, 0, -500), (500, 0, 500)),
rotation=rep.distribution.uniform((-90, -180, 0), (-90, 180, 0)),
)
return chairsEnv.node
# Register randomization
rep.randomizer.register(env_chairs_notable)
# create randomizer function conference table assets.
# This randomization includes placement and rotation of the assets on the surface.
def env_normal_table(size=5):
normTable = rep.randomizer.instantiate(
rep.utils.get_usd_files(normal_tables, recursive=False),
size=size,
mode="scene_instance",
)
rep.modify.semantics([("class", "normal_table")])
with normTable:
rep.modify.pose(
position=rep.distribution.uniform((-500, 0, -500), (500, 0, 500)),
rotation=rep.distribution.uniform((-90, -180, 0), (-90, 180, 0)),
)
return normTable.node
# Register randomization
rep.randomizer.register(env_normal_table)
# create randomizer function conference table assets.
# This randomization includes placement and rotation of the assets on the surface.
def env_sofaset(size=5):
sofaset = rep.randomizer.instantiate(
rep.utils.get_usd_files(sofa_set, recursive=False),
size=size,
mode="scene_instance",
)
rep.modify.semantics([("class", "sofa")])
with sofaset:
rep.modify.pose(
position=rep.distribution.uniform((-500, 0, -500), (500, 0, 500)),
rotation=rep.distribution.uniform((-90, -180, 0), (-90, 180, 0)),
)
return sofaset.node
# Register randomization
rep.randomizer.register(env_sofaset)
# Setup camera and attach it to render product
camera = rep.create.camera()
render_product = rep.create.render_product(camera, resolution=(1024, 1024))
# sphere lights for bonus challenge randomization
def sphere_lights(num):
lights = rep.create.light(
light_type="Sphere",
temperature=rep.distribution.normal(6500, 500),
intensity=rep.distribution.normal(35000, 5000),
position=rep.distribution.uniform((-300, -300, -300), (300, 300, 300)),
scale=rep.distribution.uniform(50, 100),
count=num,
)
return lights.node
rep.randomizer.register(sphere_lights)
# create the surface for the camera to focus on
surface = rep.create.disk(scale=100, visible=False)
# trigger on frame for an interval
with rep.trigger.on_frame(5):
rep.randomizer.env_bar_stools(2)
rep.randomizer.env_reception_table(2)
rep.randomizer.env_chairs_notable(2)
rep.randomizer.env_normal_table(2)
rep.randomizer.env_sofaset(2)
# rep.randomizer.sphere_lights(10)
rep.randomizer.dome_lights()
with camera:
rep.modify.pose(
position=rep.distribution.uniform((-500, 200, 1000), (500, 500, 1500)),
look_at=surface,
)
# Initialize and attach writer
writer = rep.WriterRegistry.get("BasicWriter")
writer.initialize(output_dir="~/replicator_examples/dli_example_33", rgb=True)
writer.attach([render_product])
| 8,087 | Python | 39.848485 | 110 | 0.650921 |
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator_yaml/tutorial_randomizer_materials.yaml | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
#
# Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Default scene settings. This script default is 1 (IsaacSim), so if you change
# the meters per unit or up axis, you need to alter the coordinates and rotations.
# IsaacSim default is 1, Code is 0.01
stage_unit_setting:
settings.set_stage_meters_per_unit:
meters_per_unit: 1
# IsaacSim default is "Z"", Code is "Y"
stage_up_axis_setting:
settings.set_stage_up_axis:
up_axis: "Z"
# This YAML script example demonstrates:
# Creating materials, and applying them to objects in a scene with a randomizer
# Create the materials to apply to the objects
mats:
create.material_omnipbr:
diffuse:
distribution.uniform:
lower: [0, 0, 0]
upper: [1, 1, 1]
count: 100
# Create the objects in the scene
spheres:
create.sphere:
scale: 0.2
position:
distribution.uniform:
lower: [-1, -1, -1]
upper: [1, 1, 1]
count: 100
plane:
create.plane:
semantics: [["class", "plane"]]
position: [0, 0, -1.5]
visible: true
scale: 100
# Create the camera and render product
camera:
create.camera:
position: [5, 0, 0]
render_product:
create.render_product:
camera: camera
resolution: [1024, 1024]
# Create the writer and initialize
writer:
writers.get:
name: "BasicWriter"
init_params:
output_dir: "_output_yaml/TutorialRandomizerMaterials/"
rgb: True
writer_attach:
writer.attach:
render_products: render_product
# Register a randomizer that sets the materials of the spheres
register_materials:
randomizer.register:
get_spheres:
inputs:
spheres: null
mats: null
with.spheres:
randomizer.materials:
materials: mats
# Set the trigger as on_frame, setting subframes to accumulate frames for a
# higher quality render
trigger:
trigger.on_frame:
max_execs: 20
rt_subframes: 3
# When the trigger executes, apply the randomizer
with_trigger:
with.trigger:
randomizer.get_spheres:
spheres: spheres
mats: mats
| 3,700 | YAML | 29.841666 | 84 | 0.721892 |
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator_yaml/tutorial_randomizer_scatter_surface.yaml | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
#
# Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Default scene settings. This script default is 1 (IsaacSim), so if you change
# the meters per unit or up axis, you need to alter the coordinates and rotations.
# IsaacSim default is 1, Code is 0.01
stage_unit_setting:
settings.set_stage_meters_per_unit:
meters_per_unit: 1
# IsaacSim default is "Z"", Code is "Y"
stage_up_axis_setting:
settings.set_stage_up_axis:
up_axis: "Z"
# This YAML script example demonstrates:
# Scattering objects on a plane
# Create the objects in the scene
light:
create.light:
light_type: distant
sphere:
create.sphere:
semantics: [["class", "sphere"]]
position: [1, 0, 1]
cube:
create.cube:
semantics: [["class", "cube"]]
position: [1, 2, 2]
plane:
create.plane:
semantics: [["class", "plane"]]
visible: true
scale: 10
# Create the camera and render product
camera:
create.camera:
position: [13, -6, 9]
rotation: [0, -35, -27]
render_product:
create.render_product:
camera: camera
resolution: [1024, 1024]
# Create the writer and initialize
writer:
writers.get:
name: "BasicWriter"
init_params:
output_dir: "_output_yaml/TutorialRandomizerScatterSurface/"
rgb: True
writer_attach:
writer.attach:
render_products: render_product
# Register a randomizer that scatters objects on a plane
register_scatter2d:
randomizer.register:
get_scatter2d:
inputs:
input_plane: null
shapes:
get.prims:
semantics: [['class', 'cube'], ['class', 'sphere']]
with.shapes:
randomizer.scatter_2d:
surface_prims: [input_plane]
# Set the trigger as on_frame, setting subframes to accumulate frames for a
# higher quality render
trigger:
trigger.on_frame:
max_execs: 20
rt_subframes: 12
# When the trigger executes, apply the randomizer
with_trigger:
with.trigger:
randomizer.get_scatter2d:
input_plane: plane
| 3,607 | YAML | 30.373913 | 84 | 0.724147 |
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator_yaml/tutorial_randomizer_dome_light.yaml | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
#
# Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Default scene settings. This script default is 1 (IsaacSim), so if you change
# the meters per unit or up axis, you need to alter the coordinates and rotations.
# IsaacSim default is 1, Code is 0.01
stage_unit_setting:
settings.set_stage_meters_per_unit:
meters_per_unit: 1
# IsaacSim default is "Z"", Code is "Y"
stage_up_axis_setting:
settings.set_stage_up_axis:
up_axis: "Z"
# This YAML script example demonstrates:
# Randomizing the a domelight
# Create the camera and render product
camera:
create.camera:
position: [5, 0, 0]
render_product:
create.render_product:
camera: camera
resolution: [1024, 1024]
# Create the writer and initialize
writer:
writers.get:
name: "BasicWriter"
init_params:
output_dir: "_output_yaml/TutorialRandomizerDomeLight/"
rgb: True
render_products: render_product
# Register a randomizer that sets the HDR texture of a domelight
register_dome_light:
randomizer.register:
dome_lights:
dome_light:
create.light:
light_type: "Dome"
rotation: [0, 0, 90]
texture:
distribution.sequence:
items:
- "omniverse://localhost/NVIDIA/Assets/Skies/Cloudy/champagne_castle_1_4k.hdr"
- "omniverse://localhost/NVIDIA/Assets/Skies/Clear/evening_road_01_4k.hdr"
- "omniverse://localhost/NVIDIA/Assets/Skies/Clear/mealie_road_4k.hdr"
- "omniverse://localhost/NVIDIA/Assets/Skies/Clear/qwantani_4k.hdr"
# Set the trigger as on_frame
trigger:
trigger.on_frame:
max_execs: 10
interval: 1
# When the trigger executes, apply the randomizer
with_trigger:
with.trigger:
randomizer.dome_lights: null
| 3,415 | YAML | 35.731182 | 94 | 0.727965 |
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator_yaml/tutorial_basic_functionality.yaml | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
#
# Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Default scene settings. This script default is 1 (IsaacSim), so if you change
# the meters per unit or up axis, you need to alter the coordinates and rotations.
# IsaacSim default is 1, Code is 0.01
stage_unit_setting:
settings.set_stage_meters_per_unit:
meters_per_unit: 1
# IsaacSim default is "Z"", Code is "Y"
stage_up_axis_setting:
settings.set_stage_up_axis:
up_axis: "Z"
# This YAML script example demonstrates:
# Moving a group of objects to varied positions using a distribution
# Create the camera and render product
camera:
create.camera:
position: [5, -1, 4]
rotation: [0, -30, -27]
render_product:
create.render_product:
camera: camera
resolution: [1024, 1024]
# Create the objects in the scene
light:
create.light:
light_type: "distant"
torus:
create.torus:
semantics: [["class", "torus"]]
position: [1, 0, -2]
sphere:
create.sphere:
semantics: [["class", "sphere"]]
position: [1, 0, 1]
cube:
create.cube:
semantics: [["class", "cube"]]
position: [1, 1, -2]
group:
create.group:
items: [torus, sphere, cube]
# Create the writer and initialize
writer:
writers.get:
name: "BasicWriter"
init_params:
output_dir: "_output_yaml/TutorialBasicFunctionality/"
rgb: True
bounding_box_2d_tight: True
writer_attach:
writer.attach:
render_products: render_product
# Set the trigger as on_frame
trigger:
trigger.on_frame:
max_execs: 10
# When the trigger executes, modify the poses of the group
with_trigger:
with.trigger:
with.group:
modify.pose:
position:
distribution.uniform:
lower: [-1, -1, -1]
upper: [2, 2, 2]
scale:
distribution.uniform:
lower: 0.1
upper: 2
| 3,482 | YAML | 29.552631 | 84 | 0.71166 |
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator_yaml/tutorial_randomizer_textures.yaml | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
#
# Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Default scene settings. This script default is 1 (IsaacSim), so if you change
# the meters per unit or up axis, you need to alter the coordinates and rotations.
# IsaacSim default is 1, Code is 0.01
stage_unit_setting:
settings.set_stage_meters_per_unit:
meters_per_unit: 1
# IsaacSim default is "Z"", Code is "Y"
stage_up_axis_setting:
settings.set_stage_up_axis:
up_axis: "Z"
# This YAML script example demonstrates:
# Randomizing the textures on materials of prims in the scene
# Create the objects in the scene
sphere:
create.sphere:
semantics: [["class", "sphere"]]
position: [1, 0, 1]
cube:
create.cube:
semantics: [["class", "cube"]]
position: [1, 2, 2]
count: 10
plane:
create.plane:
semantics: [["class", "plane"]]
visible: true
scale: 10
# Create a group of objects with these semantic labels
shapes:
get.prims:
semantics: [['class', 'cube'], ['class', 'sphere']]
# Create the camera and render product
camera:
create.camera:
position: [5, -1, 4]
rotation: [0, -30, -27]
render_product:
create.render_product:
camera: camera
resolution: [1024, 1024]
# Create the writer and initialize
writer:
writers.get:
name: "BasicWriter"
init_params:
output_dir: "_output_yaml/TutorialRandomizerTextures/"
rgb: True
writer_attach:
writer.attach:
render_products: render_product
# Register a randomizer that sets the textures on the materials of prims
register_shape_textures:
randomizer.register:
randomize_texture:
inputs:
prims: null
with.prims:
randomizer.texture:
textures:
- "omniverse://localhost/NVIDIA/Materials/vMaterials_2/Ground/textures/aggregate_exposed_diff.jpg"
- "omniverse://localhost/NVIDIA/Materials/vMaterials_2/Ground/textures/gravel_track_ballast_diff.jpg"
- "omniverse://localhost/NVIDIA/Materials/vMaterials_2/Ground/textures/gravel_track_ballast_multi_R_rough_G_ao.jpg"
- "omniverse://localhost/NVIDIA/Materials/vMaterials_2/Ground/textures/rough_gravel_rough.jpg"
# Set the trigger as on_frame, setting subframes to accumulate frames for a
# higher quality render
trigger:
trigger.on_frame:
max_execs: 20
rt_subframes: 3
# When the trigger executes, apply the randomizer
with_trigger:
with.trigger:
randomizer.randomize_texture:
prims: shapes
| 4,089 | YAML | 33.369748 | 127 | 0.729029 |
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator_yaml/randomizeCameraPositionList.yaml | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
#
# Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Default scene settings. This script default is 1 (IsaacSim), so if you change
# the meters per unit or up axis, you need to alter the coordinates and rotations.
# IsaacSim default is 1, Code is 0.01
stage_unit_setting:
settings.set_stage_meters_per_unit:
meters_per_unit: 1
# IsaacSim default is "Z"", Code is "Y"
stage_up_axis_setting:
settings.set_stage_up_axis:
up_axis: "Z"
# This YAML script example demonstrates:
# Moving a camera to positions defined in a list
# but rotated to look at the center of the the scene, showing a cube on a plane
# Define camera positions list
camera_positions: [[2,4,5], [-5,-5,5], [-3,4,5], [3,-2,3], [6,7,4]]
# Create the camera and render product
camera:
create.camera:
look_at: [0, 0, 0]
position: [10, 10, 10]
render_product:
create.render_product:
camera: camera
resolution: [1024, 1024]
# Create the objects in the scene
cube:
create.cube:
semantics: [["class", "cube"]]
position: [0, 0, 0]
plane:
create.plane:
semantics: [["class", "plane"]]
position: [0, 0, -0.75]
visible: true
scale: 10
# Create the writer and initialize
writer:
writers.get:
name: "BasicWriter"
init_params:
output_dir: "_output_yaml/RandomizeCameraPositionsList/"
rgb: True
bounding_box_2d_tight: True
render_products: render_product
# Set the trigger as on_frame
trigger:
trigger.on_frame:
max_execs: 10
# When the trigger executes, modify the poses of the camera
with_trigger:
with.trigger:
with.camera:
modify.pose:
position:
distribution.choice:
choices: camera_positions
look_at: [0,0,0]
| 3,364 | YAML | 31.669903 | 84 | 0.723543 |
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator_yaml/tutorial_randomizer_light.yaml | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
#
# Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Default scene settings. This script default is 1 (IsaacSim), so if you change
# the meters per unit or up axis, you need to alter the coordinates and rotations.
# IsaacSim default is 1, Code is 0.01
stage_unit_setting:
settings.set_stage_meters_per_unit:
meters_per_unit: 1
# IsaacSim default is "Z"", Code is "Y"
stage_up_axis_setting:
settings.set_stage_up_axis:
up_axis: "Z"
# This YAML script example demonstrates:
# Randomizing light positions in a scene
# Create the objects in the scene
sphere:
create.sphere:
semantics: [["class", "sphere"]]
position: [1, 0, 1]
cube:
create.cube:
semantics: [["class", "cube"]]
position: [1, 2, 2]
plane:
create.plane:
semantics: [["class", "plane"]]
visible: true
scale: 10
# Create the camera and render product
camera:
create.camera:
position: [13, -6, 9]
rotation: [0, -35, -27]
render_product:
create.render_product:
camera: camera
resolution: [1024, 1024]
# Create the writer and initialize
writer:
writers.get:
name: "BasicWriter"
init_params:
output_dir: "_output_yaml/TutorialRandomizerLight/"
rgb: True
writer_attach:
writer.attach:
render_products: render_product
# Register a randomizer that sets light positions
register_sphere_light:
randomizer.register:
sphere_lights:
inputs:
num: 1
sphere_light:
create.light:
light_type: "Sphere"
temperature:
distribution.normal:
mean: 6500
std: 500
intensity:
distribution.normal:
mean: 35000
std: 5000
position:
distribution.uniform:
lower: [-3, -3, 2]
upper: [3, 3, 3]
scale:
distribution.uniform:
lower: 0.5
upper: 1
count: num
# Set the trigger as on_frame
trigger:
trigger.on_frame:
max_execs: 10
rt_subframes: 50
# When the trigger executes, apply the randomizer
with_trigger:
with.trigger:
randomizer.sphere_lights:
num: 10
| 3,798 | YAML | 29.637097 | 84 | 0.688784 |
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator_yaml/multipleCamerasWithBasicWriter.yaml | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
#
# Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Default scene settings. This script default is 1 (IsaacSim), so if you change
# the meters per unit or up axis, you need to alter the coordinates and rotations.
# IsaacSim default is 1, Code is 0.01
stage_unit_setting:
settings.set_stage_meters_per_unit:
meters_per_unit: 1
# IsaacSim default is "Z"", Code is "Y"
stage_up_axis_setting:
settings.set_stage_up_axis:
up_axis: "Z"
# This YAML script example demonstrates:
# A multi camera scenario, with 2 cameras positioned in different locations
# but rotated to look at the center of the the scene, showing a cube, a sphere and a cone.
# Create the objects in the scene
sphere:
create.sphere:
semantics: [["class", "sphere"]]
position: [0, 0, 0]
cube:
create.cube:
semantics: [["class", "cube"]]
position: [2, 0, 0]
cone:
create.cone:
semantics: [["class", "cone"]]
position: [-2, 0, 0]
# Create cameras and attach render product
camera:
create.camera:
look_at: [0, 0, 0]
position: [0, 5, 1]
camera2:
create.camera:
look_at: [0, 0, 0]
position: [0, 0, 5]
render_product:
create.render_product:
camera: camera
resolution: [512, 512]
render_product2:
create.render_product:
camera: camera2
resolution: [320, 240]
# Create the writer and initialize
writer:
writers.get:
name: "BasicWriter"
init_params:
output_dir: "_output_yaml/MultipleCamerasWithBasicWriter"
rgb: True
bounding_box_2d_loose: True
bounding_box_2d_tight: True
bounding_box_3d: True
distance_to_camera: True
distance_to_image_plane: True
instance_segmentation: True
normals: True
semantic_segmentation: True
writer_attach:
writer.attach:
render_products: [render_product, render_product2]
# Set the trigger as on_frame
trigger:
trigger.on_frame:
max_execs: 10
# When the trigger executes, modify the poses of the cameras
with_trigger:
with.trigger:
with.camera:
modify.pose:
position:
distribution.uniform:
lower: [-5, 1, 5]
upper: [5, 5, 5]
look_at: [0,0,0]
with.camera2:
modify.pose:
position:
distribution.uniform:
lower: [-5, 1, 2]
upper: [5, 5, 5]
look_at: [0,0,0]
| 3,974 | YAML | 30.054687 | 90 | 0.701309 |
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator_yaml/tutorial_randomizer_pose.yaml | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
#
# Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Default scene settings. This script default is 1 (IsaacSim), so if you change
# the meters per unit or up axis, you need to alter the coordinates and rotations.
# IsaacSim default is 1, Code is 0.01
stage_unit_setting:
settings.set_stage_meters_per_unit:
meters_per_unit: 1
# IsaacSim default is "Z"", Code is "Y"
stage_up_axis_setting:
settings.set_stage_up_axis:
up_axis: "Z"
# This YAML script example demonstrates:
# Randomizing the pose of objects in a scene with a randomizer
# https://docs.omniverse.nvidia.com/extensions/latest/ext_replicator/randomizer_details.html#randomizing-pose-position-rotation-and-scale
# Create the objects in the scene
sphere:
create.sphere:
semantics: [["class", "sphere"]]
position: [1, 0, 1]
cube:
create.cube:
semantics: [["class", "cube"]]
position: [1, 2, 2]
# Create the camera and render product
camera:
create.camera:
position: [13, -6, 9]
rotation: [0, -35, -27]
render_product:
create.render_product:
camera: camera
resolution: [1024, 1024]
# Create the writer and initialize
writer:
writers.get:
name: "BasicWriter"
init_params:
output_dir: "_output_yaml/TutorialRandomizerPose/"
rgb: True
writer_attach:
writer.attach:
render_products: render_product
# Register a randomizer that sets the pose of the objects
register_move_shapes:
randomizer.register:
move_shapes:
shapes:
get.prims:
semantics: [['class', 'cube'], ['class', 'sphere']]
with.shapes:
modify.pose:
position:
distribution.uniform:
lower: [-5, -5, 0.5]
upper: [5, 5, 0.5]
rotation:
distribution.uniform:
lower: [0, 0, -180]
upper: [0, 0, 180]
scale:
distribution.normal:
mean: 1.0
std: 0.5
# Set the trigger as on_frame
trigger:
trigger.on_frame:
max_execs: 20
# When the trigger executes, apply the randomizer
with_trigger:
with.trigger:
randomizer.move_shapes: null
| 3,766 | YAML | 32.336283 | 137 | 0.703399 |
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator_yaml/randomizeCameraPositionUniformly.yaml | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
#
# Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Default scene settings. This script default is 1 (IsaacSim), so if you change
# the meters per unit or up axis, you need to alter the coordinates and rotations.
# IsaacSim default is 1, Code is 0.01
stage_unit_setting:
settings.set_stage_meters_per_unit:
meters_per_unit: 1
# IsaacSim default is "Z"", Code is "Y"
stage_up_axis_setting:
settings.set_stage_up_axis:
up_axis: "Z"
# This YAML script example demonstrates:
# Moving a camera to positions using a distribution
# but rotated to look at the center of the the scene, showing a cube and sphere on a plane
# Create the objects in the scene
sphere:
create.sphere:
semantics: [["class", "sphere"]]
position: [0, 1, 1]
cube:
create.cube:
semantics: [["class", "cube"]]
position: [2, 2, 1]
plane:
create.plane:
semantics: [["class", "plane"]]
visible: true
scale: 10
# Create the camera and render product
camera:
create.camera:
position: [10, 0, 0]
render_product:
create.render_product:
camera: camera
resolution: [512, 512]
# Create the writer and initialize
writer:
writers.get:
name: "BasicWriter"
init_params:
output_dir: "_output_yaml/RandomizeCameraPositionUniformly/"
rgb: True
bounding_box_2d_tight: True
render_products: render_product
# Set the trigger as on_frame
trigger:
trigger.on_frame:
max_execs: 10
# When the trigger executes, modify the poses of the camera
with_trigger:
with.trigger:
with.camera:
modify.pose:
position:
distribution.uniform:
lower: [-5, 2, 1]
upper: [5, 5, 5]
look_at: [0,0,0]
| 3,331 | YAML | 32.32 | 90 | 0.724707 |
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator_yaml/tutorial_randomizer_colors.yaml | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
#
# Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Default scene settings. This script default is 1 (IsaacSim), so if you change
# the meters per unit or up axis, you need to alter the coordinates and rotations.
# IsaacSim default is 1, Code is 0.01
stage_unit_setting:
settings.set_stage_meters_per_unit:
meters_per_unit: 1
# IsaacSim default is "Z"", Code is "Y"
stage_up_axis_setting:
settings.set_stage_up_axis:
up_axis: "Z"
# This YAML script example demonstrates:
# Randomizing the colors of a cube and sphere's materials
# Create the objects in the scene
sphere:
create.sphere:
semantics: [["class", "sphere"]]
position: [1, 0, 1]
cube:
create.cube:
semantics: [["class", "cube"]]
position: [1, 2, 2]
plane:
create.plane:
semantics: [["class", "plane"]]
visible: true
scale: 10
# Create the camera and render product
camera:
create.camera:
position: [5, -1, 4]
rotation: [0, -30, -27]
render_product:
create.render_product:
camera: camera
resolution: [1024, 1024]
# Create the writer and initialize
writer:
writers.get:
name: "BasicWriter"
init_params:
output_dir: "_output_yaml/TutorialRandomizerColors/"
rgb: True
writer_attach:
writer.attach:
render_products: render_product
# Register a randomizer that sets the semantic label and randomizes the color
register_colors:
randomizer.register:
get_color:
shapes:
get.prims:
semantics: [['class', 'cube'], ['class', 'sphere']]
with.shapes:
randomizer.color:
colors:
distribution.uniform:
lower: [0, 0, 0]
upper: [1, 1, 1]
# Set the trigger as on_frame, setting subframes to accumulate frames for a
# higher quality render
trigger:
trigger.on_frame:
max_execs: 20
rt_subframes: 10
# When the trigger executes, apply the randomizer
with_trigger:
with.trigger:
randomizer.get_color: null
| 3,596 | YAML | 31.405405 | 84 | 0.718298 |
NVIDIA-Omniverse/synthetic-data-examples/end-to-end-workflows/palletjack_with_tao/CLA.md | ## Individual Contributor License Agreement (CLA)
**Thank you for submitting your contributions to this project.**
By signing this CLA, you agree that the following terms apply to all of your past, present and future contributions
to the project.
### License.
You hereby represent that all present, past and future contributions are governed by the
[MIT License](https://opensource.org/licenses/MIT)
copyright statement.
This entails that to the extent possible under law, you transfer all copyright and related or neighboring rights
of the code or documents you contribute to the project itself or its maintainers.
Furthermore you also represent that you have the authority to perform the above waiver
with respect to the entirety of you contributions.
### Moral Rights.
To the fullest extent permitted under applicable law, you hereby waive, and agree not to
assert, all of your “moral rights” in or relating to your contributions for the benefit of the project.
### Third Party Content.
If your Contribution includes or is based on any source code, object code, bug fixes, configuration changes, tools,
specifications, documentation, data, materials, feedback, information or other works of authorship that were not
authored by you (“Third Party Content”) or if you are aware of any third party intellectual property or proprietary
rights associated with your Contribution (“Third Party Rights”),
then you agree to include with the submission of your Contribution full details respecting such Third Party
Content and Third Party Rights, including, without limitation, identification of which aspects of your
Contribution contain Third Party Content or are associated with Third Party Rights, the owner/author of the
Third Party Content and Third Party Rights, where you obtained the Third Party Content, and any applicable
third party license terms or restrictions respecting the Third Party Content and Third Party Rights. For greater
certainty, the foregoing obligations respecting the identification of Third Party Content and Third Party Rights
do not apply to any portion of a Project that is incorporated into your Contribution to that same Project.
### Representations.
You represent that, other than the Third Party Content and Third Party Rights identified by
you in accordance with this Agreement, you are the sole author of your Contributions and are legally entitled
to grant the foregoing licenses and waivers in respect of your Contributions. If your Contributions were
created in the course of your employment with your past or present employer(s), you represent that such
employer(s) has authorized you to make your Contributions on behalf of such employer(s) or such employer
(s) has waived all of their right, title or interest in or to your Contributions.
### Disclaimer.
To the fullest extent permitted under applicable law, your Contributions are provided on an "as is"
basis, without any warranties or conditions, express or implied, including, without limitation, any implied
warranties or conditions of non-infringement, merchantability or fitness for a particular purpose. You are not
required to provide support for your Contributions, except to the extent you desire to provide support.
### No Obligation.
You acknowledge that the maintainers of this project are under no obligation to use or incorporate your contributions
into the project. The decision to use or incorporate your contributions into the project will be made at the
sole discretion of the maintainers or their authorized delegates. | 3,543 | Markdown | 60.103447 | 117 | 0.812024 |
NVIDIA-Omniverse/synthetic-data-examples/end-to-end-workflows/palletjack_with_tao/README.md | # Synthetic Data Generation and Training with Sim Ready Assets
This project provides a workflow for Training Computer Vision models with Synthetic Data. We will use Isaac Sim with Omniverse Replicator to generate data for our use case and objects of interest. To ensure seamless compatibility with model training, the data generated is in the KITTI format.
These steps can be followed on a Cloud/remote GPU instance or locally
## How to use this repository
- [Guide](local/README.md) for running the workflow locally
- [Guide](cloud/README.md) for running on a cloud/remote instance
## Workflow Components:
* Generating Data: Use Isaac Sim to generate data
* Training: We will use TAO toolkit, however users can train a model in a framework of their choice with data generated
### SDG
- Using the `palletjack` assets from the Warehouse Sim Ready Asset collection
- Carry out Domain Randomization in the scene with Replicator:
- Various attributes of the scene like lighting, textures, object pose and materials can be modified
- Important to generate a good quality dataset to ensure model detects objects in the real world
- Data output KITTI format
- We will use the KITTI Writer for generating annotations
- Possible to implement a custom writer (can be useful when data is expected in a certain format for your model)
- Sample generated images:
<p>
<img src="images/sample_synthetic/21.png" height="256"/>
<img src="images/sample_synthetic/653.png" height="256"/>
</p>
<p>
<img src="images/sample_synthetic/896.png" height="256"/>
<img src="images/sample_synthetic/1545.png" height="256"/>
</p>
### Training
- TAO: Outline of steps
- Generating Tfrecords
- Model training and evaluation
- Model backbone selction
- Hyperparameters specified via `spec` file (provided with repo)
- Running inference with trained model
- Sample real world detections on LOCO dataset images:
<p>
<img src="images/real_world_results/1564562568.298206.jpg" height="256"/>
<img src="images/real_world_results/1564562843.0618184.jpg" height="256"/>
</p>
<p>
<img src="images/real_world_results/593768,3659.jpg" height="256"/>
<img src="images/real_world_results/510196244,1362.jpg" height="256"/>
</p>
<p>
<img src="images/real_world_results/1574675156.7667925.jpg" height="256"/>
<img src="images/real_world_results/426023,9672.jpg" height="256"/>
</p>
### Deployment
- Perform Optimizations: Pruning and QAT with TAO to reduce model size and improve performance
- Deploy on NVIDIA Jetson powered Robot with Isaac ROS or Deepstream
## References:
- Real world images from the [LOCO dataset](https://github.com/tum-fml/loco) are used for visualizing model performance
| 2,771 | Markdown | 36.972602 | 294 | 0.738001 |
NVIDIA-Omniverse/synthetic-data-examples/end-to-end-workflows/palletjack_with_tao/LICENSE.md | SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
SPDX-License-Identifier: MIT
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
| 1,167 | Markdown | 54.619045 | 97 | 0.796058 |
NVIDIA-Omniverse/synthetic-data-examples/end-to-end-workflows/palletjack_with_tao/cloud/README.md | # Requirements
- Access to a cloud/remote GPU instance (workflow tested on a `g4dn` AWS EC2 instance with T4 GPU)
- Docker setup instructions are provided in the notebooks
- Entire workflow can be run in `headless` mode (SDG script and training)
## Synthetic Data Generation
- Use the Isaac Sim docker container for running the Data Generation [script](../palletjack_sdg/palletjack_datagen.sh)
- We will generate data for warehouse `palletjack` objects in KITTI format
- Follow the steps in the `cloud_sdg` notebook
- This generated data can be used to train your own model (framework and architecture of your choice), in this workflow we demonstrate using TAO for training
## Training with TAO Toolkit
- The `training/cloud_train` notebook provides a walkthrough of the steps:
- Setting up TAO docker container
- Downloading pre-trained model, we will use the `DetectNet_v2` model with a `resnet_18` backbone
- Running TAO training with `spec` files provided
- Visualizing model performance on real world data
- Visualize model metric with Tensorboard
<img src="../images/tensorboard/tensorboard_resized_palletjack.png"/>
## Next steps
### Generating Synthetic Data for your use case
- Make changes in the Domain Randomization under the Synthetic Data Generation [script](../palletjack_sdg/standalone_palletjack_sdg.py)
- Add additional objects of interest in the scene (similar to how palletjacks are added, you can add forklifts, ladders etc.) to generate dataUse different models for training with TAO (for object detection, you can use YOLO, SSD, EfficientDet)
- Replicator provides Semantic Segmentation, Instance Segmentation, Depth and various other ground truth annotations along with RGB. You can also write your own ground truth annotator (eg: Pose Estimation: Refer to [sample](https://docs.omniverse.nvidia.com/isaacsim/latest/tutorial_replicator_offline_pose_estimation.html) These can be used for training a model of your own framework and choice)
- Exploring the option of using Synthetic + Real data for training a network. Can be particularly useful for generating more data around particular corner cases
### Deploying Trained Models
- The trained model can be pruned and optimized for inference with TAO
- This can then be deployed on a robot with NVIDIA Jetson | 2,308 | Markdown | 66.911763 | 396 | 0.786395 |
NVIDIA-Omniverse/synthetic-data-examples/end-to-end-workflows/palletjack_with_tao/local/README.md | # Requirements
- Install [Isaac Sim](https://docs.omniverse.nvidia.com/isaacsim/latest/install_workstation.html)
- Training via TAO Toolkit Docker container (TAO setup instructions in `local_train` notebook)
## Synthetic Data Generation
- Provide the path of your Isaac Sim installation folder in the `generate_data.sh` script
- Make the script an executable after adding the Isaac Sim Path (`chmod +x generate_data.sh`)
- Run the script (`./generate_data.sh`)
- We will generate data for the `palletjack` class of objects with annotations in KITTI format
- This generated data can be used to train your own model (framework and architecture of your choice)
## Training with TAO Toolkit
- The data generated in the previus step can be directly fed to TAO for training
- The `local_train` notebook provides a walkthrough of the steps:
- Setting up TAO docker container
- Downloading pre-trained model, we will use the `DetectNet_v2` model with a `resnet_18` backbone
- Running TAO training with `spec` files provided
- Visualizing model performance on real world data
- Visualize model metric with Tensorboard
<img src="../images/tensorboard/tensorboard_resized_palletjack.png"/>
## Next steps
### Generating Synthetic Data for your use case
- Make changes in the Domain Randomization under the Synthetic Data Generation [script](../palletjack_sdg/standalone_palletjack_sdg.py)
- Add additional objects of interest in the scene (similar to how palletjacks are added, you can add forklifts, ladders etc.) to generate dataUse different models for training with TAO (for object detection, you can use YOLO, SSD, EfficientDet)
- Replicator provides Semantic Segmentation, Instance Segmentation, Depth and various other ground truth annotations along with RGB. You can also write your own ground truth annotator (eg: Pose Estimation: Refer to [sample](https://docs.omniverse.nvidia.com/isaacsim/latest/tutorial_replicator_offline_pose_estimation.html) These can be used for training a model of your own framework and choice)
- Exploring the option of using Synthetic + Real data for training a network. Can be particularly useful for generating more data around particular corner cases
### Deploying Trained Models
- The trained model can be pruned and optimized for inference with TAO
- This can then be deployed on a robot with NVIDIA Jetson | 2,370 | Markdown | 66.742855 | 396 | 0.7827 |
NVIDIA-Omniverse/synthetic-data-examples/end-to-end-workflows/palletjack_with_tao/palletjack_sdg/standalone_palletjack_sdg.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from omni.isaac.kit import SimulationApp
import os
import argparse
parser = argparse.ArgumentParser("Dataset generator")
parser.add_argument("--headless", type=bool, default=False, help="Launch script headless, default is False")
parser.add_argument("--height", type=int, default=544, help="Height of image")
parser.add_argument("--width", type=int, default=960, help="Width of image")
parser.add_argument("--num_frames", type=int, default=1000, help="Number of frames to record")
parser.add_argument("--distractors", type=str, default="warehouse",
help="Options are 'warehouse' (default), 'additional' or None")
parser.add_argument("--data_dir", type=str, default=os.getcwd() + "/_palletjack_data",
help="Location where data will be output")
args, unknown_args = parser.parse_known_args()
# This is the config used to launch simulation.
CONFIG = {"renderer": "RayTracedLighting", "headless": args.headless,
"width": args.width, "height": args.height, "num_frames": args.num_frames}
simulation_app = SimulationApp(launch_config=CONFIG)
## This is the path which has the background scene in which objects will be added.
ENV_URL = "/Isaac/Environments/Simple_Warehouse/warehouse.usd"
import carb
import omni
import omni.usd
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import get_current_stage, open_stage
from pxr import Semantics
import omni.replicator.core as rep
from omni.isaac.core.utils.semantics import get_semantics
# Increase subframes if shadows/ghosting appears of moving objects
# See known issues: https://docs.omniverse.nvidia.com/prod_extensions/prod_extensions/ext_replicator.html#known-issues
rep.settings.carb_settings("/omni/replicator/RTSubframes", 4)
# This is the location of the palletjacks in the simready asset library
PALLETJACKS = ["http://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/DigitalTwin/Assets/Warehouse/Equipment/Pallet_Trucks/Scale_A/PalletTruckScale_A01_PR_NVD_01.usd",
"http://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/DigitalTwin/Assets/Warehouse/Equipment/Pallet_Trucks/Heavy_Duty_A/HeavyDutyPalletTruck_A01_PR_NVD_01.usd",
"http://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/DigitalTwin/Assets/Warehouse/Equipment/Pallet_Trucks/Low_Profile_A/LowProfilePalletTruck_A01_PR_NVD_01.usd"]
# The warehouse distractors which will be added to the scene and randomized
DISTRACTORS_WAREHOUSE = 2 * ["/Isaac/Environments/Simple_Warehouse/Props/S_TrafficCone.usd",
"/Isaac/Environments/Simple_Warehouse/Props/S_WetFloorSign.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_BarelPlastic_A_01.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_BarelPlastic_A_02.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_BarelPlastic_A_03.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_BarelPlastic_B_01.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_BarelPlastic_B_01.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_BarelPlastic_B_03.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_BarelPlastic_C_02.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_BottlePlasticA_02.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_BottlePlasticB_01.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_BottlePlasticA_02.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_BottlePlasticA_02.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_BottlePlasticD_01.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_BottlePlasticE_01.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_BucketPlastic_B.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_CardBoxB_01_1262.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_CardBoxB_01_1268.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_CardBoxB_01_1482.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_CardBoxB_01_1683.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_CardBoxB_01_291.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_CardBoxD_01_1454.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_CardBoxD_01_1513.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_CratePlastic_A_04.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_CratePlastic_B_03.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_CratePlastic_B_05.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_CratePlastic_C_02.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_CratePlastic_E_02.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_PushcartA_02.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_RackPile_04.usd",
"/Isaac/Environments/Simple_Warehouse/Props/SM_RackPile_03.usd"]
## Additional distractors which can be added to the scene
DISTRACTORS_ADDITIONAL = ["/Isaac/Environments/Hospital/Props/Pharmacy_Low.usd",
"/Isaac/Environments/Hospital/Props/SM_BedSideTable_01b.usd",
"/Isaac/Environments/Hospital/Props/SM_BooksSet_26.usd",
"/Isaac/Environments/Hospital/Props/SM_BottleB.usd",
"/Isaac/Environments/Hospital/Props/SM_BottleA.usd",
"/Isaac/Environments/Hospital/Props/SM_BottleC.usd",
"/Isaac/Environments/Hospital/Props/SM_Cart_01a.usd",
"/Isaac/Environments/Hospital/Props/SM_Chair_02a.usd",
"/Isaac/Environments/Hospital/Props/SM_Chair_01a.usd",
"/Isaac/Environments/Hospital/Props/SM_Computer_02b.usd",
"/Isaac/Environments/Hospital/Props/SM_Desk_04a.usd",
"/Isaac/Environments/Hospital/Props/SM_DisposalStand_02.usd",
"/Isaac/Environments/Hospital/Props/SM_FirstAidKit_01a.usd",
"/Isaac/Environments/Hospital/Props/SM_GasCart_01c.usd",
"/Isaac/Environments/Hospital/Props/SM_Gurney_01b.usd",
"/Isaac/Environments/Hospital/Props/SM_HospitalBed_01b.usd",
"/Isaac/Environments/Hospital/Props/SM_MedicalBag_01a.usd",
"/Isaac/Environments/Hospital/Props/SM_Mirror.usd",
"/Isaac/Environments/Hospital/Props/SM_MopSet_01b.usd",
"/Isaac/Environments/Hospital/Props/SM_SideTable_02a.usd",
"/Isaac/Environments/Hospital/Props/SM_SupplyCabinet_01c.usd",
"/Isaac/Environments/Hospital/Props/SM_SupplyCart_01e.usd",
"/Isaac/Environments/Hospital/Props/SM_TrashCan.usd",
"/Isaac/Environments/Hospital/Props/SM_Washbasin.usd",
"/Isaac/Environments/Hospital/Props/SM_WheelChair_01a.usd",
"/Isaac/Environments/Office/Props/SM_WaterCooler.usd",
"/Isaac/Environments/Office/Props/SM_TV.usd",
"/Isaac/Environments/Office/Props/SM_TableC.usd",
"/Isaac/Environments/Office/Props/SM_Recliner.usd",
"/Isaac/Environments/Office/Props/SM_Personenleitsystem_Red1m.usd",
"/Isaac/Environments/Office/Props/SM_Lamp02_162.usd",
"/Isaac/Environments/Office/Props/SM_Lamp02.usd",
"/Isaac/Environments/Office/Props/SM_HandDryer.usd",
"/Isaac/Environments/Office/Props/SM_Extinguisher.usd"]
# The textures which will be randomized for the wall and floor
TEXTURES = ["/Isaac/Materials/Textures/Patterns/nv_asphalt_yellow_weathered.jpg",
"/Isaac/Materials/Textures/Patterns/nv_tile_hexagonal_green_white.jpg",
"/Isaac/Materials/Textures/Patterns/nv_rubber_woven_charcoal.jpg",
"/Isaac/Materials/Textures/Patterns/nv_granite_tile.jpg",
"/Isaac/Materials/Textures/Patterns/nv_tile_square_green.jpg",
"/Isaac/Materials/Textures/Patterns/nv_marble.jpg",
"/Isaac/Materials/Textures/Patterns/nv_brick_reclaimed.jpg",
"/Isaac/Materials/Textures/Patterns/nv_concrete_aged_with_lines.jpg",
"/Isaac/Materials/Textures/Patterns/nv_wooden_wall.jpg",
"/Isaac/Materials/Textures/Patterns/nv_stone_painted_grey.jpg",
"/Isaac/Materials/Textures/Patterns/nv_wood_shingles_brown.jpg",
"/Isaac/Materials/Textures/Patterns/nv_tile_hexagonal_various.jpg",
"/Isaac/Materials/Textures/Patterns/nv_carpet_abstract_pattern.jpg",
"/Isaac/Materials/Textures/Patterns/nv_wood_siding_weathered_green.jpg",
"/Isaac/Materials/Textures/Patterns/nv_animalfur_pattern_greys.jpg",
"/Isaac/Materials/Textures/Patterns/nv_artificialgrass_green.jpg",
"/Isaac/Materials/Textures/Patterns/nv_bamboo_desktop.jpg",
"/Isaac/Materials/Textures/Patterns/nv_brick_reclaimed.jpg",
"/Isaac/Materials/Textures/Patterns/nv_brick_red_stacked.jpg",
"/Isaac/Materials/Textures/Patterns/nv_fireplace_wall.jpg",
"/Isaac/Materials/Textures/Patterns/nv_fabric_square_grid.jpg",
"/Isaac/Materials/Textures/Patterns/nv_granite_tile.jpg",
"/Isaac/Materials/Textures/Patterns/nv_marble.jpg",
"/Isaac/Materials/Textures/Patterns/nv_gravel_grey_leaves.jpg",
"/Isaac/Materials/Textures/Patterns/nv_plastic_blue.jpg",
"/Isaac/Materials/Textures/Patterns/nv_stone_red_hatch.jpg",
"/Isaac/Materials/Textures/Patterns/nv_stucco_red_painted.jpg",
"/Isaac/Materials/Textures/Patterns/nv_rubber_woven_charcoal.jpg",
"/Isaac/Materials/Textures/Patterns/nv_stucco_smooth_blue.jpg",
"/Isaac/Materials/Textures/Patterns/nv_wood_shingles_brown.jpg",
"/Isaac/Materials/Textures/Patterns/nv_wooden_wall.jpg"]
def update_semantics(stage, keep_semantics=[]):
""" Remove semantics from the stage except for keep_semantic classes"""
for prim in stage.Traverse():
if prim.HasAPI(Semantics.SemanticsAPI):
processed_instances = set()
for property in prim.GetProperties():
is_semantic = Semantics.SemanticsAPI.IsSemanticsAPIPath(property.GetPath())
if is_semantic:
instance_name = property.SplitName()[1]
if instance_name in processed_instances:
# Skip repeated instance, instances are iterated twice due to their two semantic properties (class, data)
continue
processed_instances.add(instance_name)
sem = Semantics.SemanticsAPI.Get(prim, instance_name)
type_attr = sem.GetSemanticTypeAttr()
data_attr = sem.GetSemanticDataAttr()
for semantic_class in keep_semantics:
# Check for our data classes needed for the model
if data_attr.Get() == semantic_class:
continue
else:
# remove semantics of all other prims
prim.RemoveProperty(type_attr.GetName())
prim.RemoveProperty(data_attr.GetName())
prim.RemoveAPI(Semantics.SemanticsAPI, instance_name)
# needed for loading textures correctly
def prefix_with_isaac_asset_server(relative_path):
assets_root_path = get_assets_root_path()
if assets_root_path is None:
raise Exception("Nucleus server not found, could not access Isaac Sim assets folder")
return assets_root_path + relative_path
def full_distractors_list(distractor_type="warehouse"):
"""Distractor type allowed are warehouse, additional or None. They load corresponding objects and add
them to the scene for DR"""
full_dist_list = []
if distractor_type == "warehouse":
for distractor in DISTRACTORS_WAREHOUSE:
full_dist_list.append(prefix_with_isaac_asset_server(distractor))
elif distractor_type == "additional":
for distractor in DISTRACTORS_ADDITIONAL:
full_dist_list.append(prefix_with_isaac_asset_server(distractor))
else:
print("No Distractors being added to the current scene for SDG")
return full_dist_list
def full_textures_list():
full_tex_list = []
for texture in TEXTURES:
full_tex_list.append(prefix_with_isaac_asset_server(texture))
return full_tex_list
def add_palletjacks():
rep_obj_list = [rep.create.from_usd(palletjack_path, semantics=[("class", "palletjack")], count=2) for palletjack_path in PALLETJACKS]
rep_palletjack_group = rep.create.group(rep_obj_list)
return rep_palletjack_group
def add_distractors(distractor_type="warehouse"):
full_distractors = full_distractors_list(distractor_type)
distractors = [rep.create.from_usd(distractor_path, count=1) for distractor_path in full_distractors]
distractor_group = rep.create.group(distractors)
return distractor_group
# This will handle replicator
def run_orchestrator():
rep.orchestrator.run()
# Wait until started
while not rep.orchestrator.get_is_started():
simulation_app.update()
# Wait until stopped
while rep.orchestrator.get_is_started():
simulation_app.update()
rep.BackendDispatch.wait_until_done()
rep.orchestrator.stop()
def main():
# Open the environment in a new stage
print(f"Loading Stage {ENV_URL}")
open_stage(prefix_with_isaac_asset_server(ENV_URL))
stage = get_current_stage()
# Run some app updates to make sure things are properly loaded
for i in range(100):
if i % 10 == 0:
print(f"App uppdate {i}..")
simulation_app.update()
textures = full_textures_list()
rep_palletjack_group = add_palletjacks()
rep_distractor_group = add_distractors(distractor_type=args.distractors)
# We only need labels for the palletjack objects
update_semantics(stage=stage, keep_semantics=["palletjack"])
# Create camera with Replicator API for gathering data
cam = rep.create.camera(clipping_range=(0.1, 1000000))
# trigger replicator pipeline
with rep.trigger.on_frame(num_frames=CONFIG["num_frames"]):
# Move the camera around in the scene, focus on the center of warehouse
with cam:
rep.modify.pose(position=rep.distribution.uniform((-9.2, -11.8, 0.4), (7.2, 15.8, 4)),
look_at=(0, 0, 0))
# Get the Palletjack body mesh and modify its color
with rep.get.prims(path_pattern="SteerAxles"):
rep.randomizer.color(colors=rep.distribution.uniform((0, 0, 0), (1, 1, 1)))
# Randomize the pose of all the added palletjacks
with rep_palletjack_group:
rep.modify.pose(position=rep.distribution.uniform((-6, -6, 0), (6, 12, 0)),
rotation=rep.distribution.uniform((0, 0, 0), (0, 0, 360)),
scale=rep.distribution.uniform((0.01, 0.01, 0.01), (0.01, 0.01, 0.01)))
# Modify the pose of all the distractors in the scene
with rep_distractor_group:
rep.modify.pose(position=rep.distribution.uniform((-6, -6, 0), (6, 12, 0)),
rotation=rep.distribution.uniform((0, 0, 0), (0, 0, 360)),
scale=rep.distribution.uniform(1, 1.5))
# Randomize the lighting of the scene
with rep.get.prims(path_pattern="RectLight"):
rep.modify.attribute("color", rep.distribution.uniform((0, 0, 0), (1, 1, 1)))
rep.modify.attribute("intensity", rep.distribution.normal(100000.0, 600000.0))
rep.modify.visibility(rep.distribution.choice([True, False, False, False, False, False, False]))
# select floor material
random_mat_floor = rep.create.material_omnipbr(diffuse_texture=rep.distribution.choice(textures),
roughness=rep.distribution.uniform(0, 1),
metallic=rep.distribution.choice([0, 1]),
emissive_texture=rep.distribution.choice(textures),
emissive_intensity=rep.distribution.uniform(0, 1000),)
with rep.get.prims(path_pattern="SM_Floor"):
rep.randomizer.materials(random_mat_floor)
# select random wall material
random_mat_wall = rep.create.material_omnipbr(diffuse_texture=rep.distribution.choice(textures),
roughness=rep.distribution.uniform(0, 1),
metallic=rep.distribution.choice([0, 1]),
emissive_texture=rep.distribution.choice(textures),
emissive_intensity=rep.distribution.uniform(0, 1000),)
with rep.get.prims(path_pattern="SM_Wall"):
rep.randomizer.materials(random_mat_wall)
# Set up the writer
writer = rep.WriterRegistry.get("KittiWriter")
# output directory of writer
output_directory = args.data_dir
print("Outputting data to ", output_directory)
# use writer for bounding boxes, rgb and segmentation
writer.initialize(output_dir=output_directory,
omit_semantic_type=True,)
# attach camera render products to wrieter so that data is outputted
RESOLUTION = (CONFIG["width"], CONFIG["height"])
render_product = rep.create.render_product(cam, RESOLUTION)
writer.attach(render_product)
# run rep pipeline
run_orchestrator()
simulation_app.update()
if __name__ == "__main__":
try:
main()
except Exception as e:
carb.log_error(f"Exception: {e}")
import traceback
traceback.print_exc()
finally:
simulation_app.close()
| 20,199 | Python | 52.439153 | 191 | 0.634388 |
NVIDIA-Omniverse/synthetic-data-examples/end-to-end-workflows/object_detection_fruit/docker-compose.yml | version: '3'
services:
deployment:
build: deployment
# Access the lab with: http://127.0.0.1:8884/lab?token=nvsecuretoken2
command: jupyter lab -y --allow-root --no-browser --ip=0.0.0.0 --port=8884 --notebook-dir=/opt/project/ --NotebookApp.token='nvsecuretoken2' --NotebookApp.password='nvsecurepassword'
shm_size: '2gb'
volumes:
- './deployment/code/:/opt/project/'
- './models:/opt/models/'
ports:
- "8884:8884"
training:
build: training
# Access the lab with: http://127.0.0.1:8883/lab?token=nvsecuretoken1
command: jupyter lab -y --allow-root --no-browser --ip=0.0.0.0 --port=8883 --notebook-dir=/opt/project/ --NotebookApp.token='nvsecuretoken1' --NotebookApp.password='nvsecurepassword'
volumes:
- './training/code/:/opt/project/'
ports:
- "8883:8883"
data-generation:
build: data_generation
# Access the lab with: http://127.0.0.1:8882/lab?token=nvsecuretoken0s
command: jupyter lab -y --allow-root --no-browser --ip=0.0.0.0 --port=8882 --notebook-dir=/opt/project/ --NotebookApp.token='nvsecuretoken0' --NotebookApp.password='nvsecurepassword'
volumes:
- './data_generation/code/:/opt/project/'
ports:
- "8882:8882"
| 1,238 | YAML | 40.299999 | 186 | 0.661551 |
NVIDIA-Omniverse/synthetic-data-examples/end-to-end-workflows/object_detection_fruit/README.md | ## Getting started
### Install Dependencies
- [`docker-compose`](https://docs.docker.com/compose/install/)
- [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html)
### Run the labs
```
bash run.sh
```
### Access the labs
Part 1: Generate synthetic data with Omnvierse - [http://127.0.0.1:8882/lab?token=nvsecuretoken0](http://127.0.0.1:8882/lab?token=nvsecuretoken0) \
Part 2: Training a model with synthetic data - [http://127.0.0.1:8883/lab?token=nvsecuretoken1](http://127.0.0.1:8883/lab?token=nvsecuretoken1) \
Part 3: Deploy model to Triton - [http://127.0.0.1:8884/lab?token=nvsecuretoken2](http://127.0.0.1:8884/lab?token=nvsecuretoken2)
| 708 | Markdown | 38.388887 | 147 | 0.731638 |
NVIDIA-Omniverse/synthetic-data-examples/end-to-end-workflows/object_detection_fruit/training/code/visualize.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
#
# Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import json
import hashlib
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from optparse import OptionParser
"""
Takes in the data from a specific label id and maps it to the proper color for the bounding box
"""
def data_to_colour(data):
if isinstance(data, str):
data = bytes(data, "utf-8")
else:
data = bytes(data)
m = hashlib.sha256()
m.update(data)
key = int(m.hexdigest()[:8], 16)
r = ((((key >> 0) & 0xFF) + 1) * 33) % 255
g = ((((key >> 8) & 0xFF) + 1) * 33) % 255
b = ((((key >> 16) & 0xFF) + 1) * 33) % 255
# illumination normalization to 128
inv_norm_i = 128 * (3.0 / (r + g + b))
return (
int(r * inv_norm_i) / 255,
int(g * inv_norm_i) / 255,
int(b * inv_norm_i) / 255,
)
"""
Takes in the path to the rgb image for the background, then it takes bounding box data, the labels and the place to store the visualization. It outputs a colorized bounding box.
"""
def colorize_bbox_2d(rgb_path, data, id_to_labels, file_path):
rgb_img = Image.open(rgb_path)
colors = [data_to_colour(bbox["semanticId"]) for bbox in data]
fig, ax = plt.subplots(figsize=(10, 10))
ax.imshow(rgb_img)
for bbox_2d, color, index in zip(data, colors, range(len(data))):
labels = id_to_labels[str(index)]
rect = patches.Rectangle(
xy=(bbox_2d["x_min"], bbox_2d["y_min"]),
width=bbox_2d["x_max"] - bbox_2d["x_min"],
height=bbox_2d["y_max"] - bbox_2d["y_min"],
edgecolor=color,
linewidth=2,
label=labels,
fill=False,
)
ax.add_patch(rect)
plt.legend(loc="upper left")
plt.savefig(file_path)
"""
Parses command line options. Requires input directory, output directory, and number for image to use.
"""
def parse_input():
usage = "usage: visualize.py [options] arg1 arg2 arg3"
parser = OptionParser(usage)
parser.add_option(
"-d",
"--data_dir",
dest="data_dir",
help="Directory location for Omniverse synthetic data",
)
parser.add_option(
"-o", "--out_dir", dest="out_dir", help="Directory location for output image"
)
parser.add_option(
"-n", "--number", dest="number", help="Number of image to use for visualization"
)
(options, args) = parser.parse_args()
return options, args
def main():
options, args = parse_input()
out_dir = options.data_dir
rgb = "png/rgb_" + options.number + ".png"
rgb_path = os.path.join(out_dir, rgb)
bbox2d_tight_file_name = "npy/bounding_box_2d_tight_" + options.number + ".npy"
data = np.load(os.path.join(options.data_dir, bbox2d_tight_file_name))
# Check for labels
bbox2d_tight_labels_file_name = (
"json/bounding_box_2d_tight_labels_" + options.number + ".json"
)
with open(
os.path.join(options.data_dir, bbox2d_tight_labels_file_name), "r"
) as json_data:
bbox2d_tight_id_to_labels = json.load(json_data)
# colorize and save image
colorize_bbox_2d(
rgb_path,
data,
bbox2d_tight_id_to_labels,
os.path.join(options.out_dir, "bbox2d_tight.png"),
)
if __name__ == "__main__":
main()
| 5,013 | Python | 32.651006 | 177 | 0.658687 |
NVIDIA-Omniverse/synthetic-data-examples/end-to-end-workflows/object_detection_fruit/training/code/export.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
#
# Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import torch
import torchvision
from optparse import OptionParser
def parse_input():
usage = "usage: export.py [options] arg1 "
parser = OptionParser(usage)
parser.add_option(
"-d",
"--pytorch_dir",
dest="pytorch_dir",
help="Location of output PyTorch model",
)
parser.add_option(
"-o",
"--output_dir",
dest="output_dir",
help="Export and save ONNX model to this path",
)
(options, args) = parser.parse_args()
return options, args
def main():
torch.manual_seed(0)
options, args = parse_input()
model = torch.load(options.pytorch_dir)
model.eval()
OUTPUT_DIR = options.output_dir
os.makedirs(OUTPUT_DIR, exist_ok=True)
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(
weights="DEFAULT", num_classes=91
)
model.eval()
dummy_input = torch.rand(1, 3, 1024, 1024)
torch.onnx.export(
model,
dummy_input,
os.path.join(OUTPUT_DIR, "model.onnx"),
opset_version=11,
input_names=["input"],
output_names=["output"],
)
if __name__ == "__main__":
main()
| 2,865 | Python | 33.119047 | 84 | 0.704363 |
NVIDIA-Omniverse/synthetic-data-examples/end-to-end-workflows/object_detection_fruit/training/code/train.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
#
# Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from PIL import Image
import os
import numpy as np
import torch
import torch.utils.data
import torchvision
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision import transforms as T
import json
import shutil
from optparse import OptionParser
from torch.utils.tensorboard import SummaryWriter
class FruitDataset(torch.utils.data.Dataset):
def __init__(self, root, transforms):
self.root = root
self.transforms = transforms
list_ = os.listdir(root)
for file_ in list_:
name, ext = os.path.splitext(file_)
ext = ext[1:]
if ext == "":
continue
if os.path.exists(root + "/" + ext):
shutil.move(root + "/" + file_, root + "/" + ext + "/" + file_)
else:
os.makedirs(root + "/" + ext)
shutil.move(root + "/" + file_, root + "/" + ext + "/" + file_)
self.imgs = list(sorted(os.listdir(os.path.join(root, "png"))))
self.label = list(sorted(os.listdir(os.path.join(root, "json"))))
self.box = list(sorted(os.listdir(os.path.join(root, "npy"))))
def __getitem__(self, idx):
img_path = os.path.join(self.root, "png", self.imgs[idx])
img = Image.open(img_path).convert("RGB")
label_path = os.path.join(self.root, "json", self.label[idx])
with open(os.path.join("root", label_path), "r") as json_data:
json_labels = json.load(json_data)
box_path = os.path.join(self.root, "npy", self.box[idx])
dat = np.load(str(box_path))
boxes = []
labels = []
for i in dat:
obj_val = i[0]
xmin = torch.as_tensor(np.min(i[1]), dtype=torch.float32)
xmax = torch.as_tensor(np.max(i[3]), dtype=torch.float32)
ymin = torch.as_tensor(np.min(i[2]), dtype=torch.float32)
ymax = torch.as_tensor(np.max(i[4]), dtype=torch.float32)
if (ymax > ymin) & (xmax > xmin):
boxes.append([xmin, ymin, xmax, ymax])
area = (xmax - xmin) * (ymax - ymin)
labels += [json_labels.get(str(obj_val)).get("class")]
label_dict = {}
static_labels = {
"apple": 0,
"avocado": 1,
"kiwi": 2,
"lime": 3,
"lychee": 4,
"pomegranate": 5,
"onion": 6,
"strawberry": 7,
"lemon": 8,
"orange": 9,
}
labels_out = []
for i in range(len(labels)):
label_dict[i] = labels[i]
for i in label_dict:
fruit = label_dict[i]
final_fruit_label = static_labels[fruit]
labels_out += [final_fruit_label]
target = {}
target["boxes"] = torch.as_tensor(boxes, dtype=torch.float32)
target["labels"] = torch.as_tensor(labels_out, dtype=torch.int64)
target["image_id"] = torch.tensor([idx])
target["area"] = area
if self.transforms is not None:
img = self.transforms(img)
return img, target
def __len__(self):
return len(self.imgs)
"""
Parses command line options. Requires input data directory, output torch file, and number epochs used to train.
"""
def parse_input():
usage = "usage: train.py [options] arg1 arg2 "
parser = OptionParser(usage)
parser.add_option(
"-d",
"--data_dir",
dest="data_dir",
help="Directory location for Omniverse synthetic data.",
)
parser.add_option(
"-o",
"--output_file",
dest="output_file",
help="Save torch model to this file and location (file ending in .pth)",
)
parser.add_option(
"-e",
"--epochs",
dest="epochs",
help="Give number of epochs to be used for training",
)
(options, args) = parser.parse_args()
return options, args
def get_transform(train):
transforms = []
transforms.append(T.PILToTensor())
transforms.append(T.ConvertImageDtype(torch.float))
return T.Compose(transforms)
def collate_fn(batch):
return tuple(zip(*batch))
def create_model(num_classes):
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(weights="DEFAULT")
in_features = model.roi_heads.box_predictor.cls_score.in_features
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
return model
def main():
writer = SummaryWriter()
options, args = parse_input()
dataset = FruitDataset(options.data_dir, get_transform(train=True))
train_size = int(len(dataset) * 0.7)
valid_size = int(len(dataset) * 0.2)
test_size = len(dataset) - valid_size - train_size
train, valid, test = torch.utils.data.random_split(
dataset, [train_size, valid_size, test_size]
)
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=16, shuffle=True, num_workers=4, collate_fn=collate_fn
)
validloader = torch.utils.data.DataLoader(
valid, batch_size=16, shuffle=True, num_workers=4, collate_fn=collate_fn
)
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
num_classes = 10
num_epochs = int(options.epochs)
model = create_model(num_classes)
model.to(device)
params = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.SGD(params, lr=0.001)
len_dataloader = len(data_loader)
model.train()
for epoch in range(num_epochs):
optimizer.zero_grad()
i = 0
for imgs, annotations in data_loader:
i += 1
imgs = list(img.to(device) for img in imgs)
annotations = [{k: v.to(device) for k, v in t.items()} for t in annotations]
loss_dict = model(imgs, annotations)
losses = sum(loss for loss in loss_dict.values())
writer.add_scalar("Loss/train", losses, epoch)
losses.backward()
optimizer.step()
print(f"Iteration: {i}/{len_dataloader}, Loss: {losses}")
writer.close()
torch.save(model, options.output_file)
if __name__ == "__main__":
main()
| 7,902 | Python | 32.487288 | 111 | 0.621362 |
NVIDIA-Omniverse/synthetic-data-examples/end-to-end-workflows/object_detection_fruit/data_generation/code/generate_data_gui.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
#
# Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import datetime
now = datetime.datetime.now()
from functools import partial
import omni.replicator.core as rep
with rep.new_layer():
# Define paths for the character, the props, the environment and the surface where the assets will be scattered in.
CRATE = "omniverse://localhost/NVIDIA/Samples/Marbles/assets/standalone/SM_room_crate_3/SM_room_crate_3.usd"
SURFACE = (
"omniverse://localhost/NVIDIA/Assets/Scenes/Templates/Basic/display_riser.usd"
)
ENVS = "omniverse://localhost/NVIDIA/Assets/Scenes/Templates/Interior/ZetCG_ExhibitionHall.usd"
FRUIT_PROPS = {
"apple": "omniverse://localhost/NVIDIA/Assets/ArchVis/Residential/Food/Fruit/Apple.usd",
"avocado": "omniverse://localhost/NVIDIA/Assets/ArchVis/Residential/Food/Fruit/Avocado01.usd",
"kiwi": "omniverse://localhost/NVIDIA/Assets/ArchVis/Residential/Food/Fruit/Kiwi01.usd",
"lime": "omniverse://localhost/NVIDIA/Assets/ArchVis/Residential/Food/Fruit/Lime01.usd",
"lychee": "omniverse://localhost/NVIDIA/Assets/ArchVis/Residential/Food/Fruit/Lychee01.usd",
"pomegranate": "omniverse://localhost/NVIDIA/Assets/ArchVis/Residential/Food/Fruit/Pomegranate01.usd",
"onion": "omniverse://localhost/NVIDIA/Assets/ArchVis/Residential/Food/Vegetables/RedOnion.usd",
"strawberry": "omniverse://localhost/NVIDIA/Assets/ArchVis/Residential/Food/Berries/strawberry.usd",
"lemon": "omniverse://localhost/NVIDIA/Assets/ArchVis/Residential/Decor/Tchotchkes/Lemon_01.usd",
"orange": "omniverse://localhost/NVIDIA/Assets/ArchVis/Residential/Decor/Tchotchkes/Orange_01.usd",
}
# Define randomizer function for Base assets. This randomization includes placement and rotation of the assets on the surface.
def random_props(file_name, class_name, max_number=1, one_in_n_chance=3):
instances = rep.randomizer.instantiate(
file_name, size=max_number, mode="scene_instance"
)
print(file_name)
with instances:
rep.modify.semantics([("class", class_name)])
rep.modify.pose(
position=rep.distribution.uniform((-8, 5, -25), (8, 30, 25)),
rotation=rep.distribution.uniform((-180, -180, -180), (180, 180, 180)),
scale=rep.distribution.uniform((0.8), (1.2)),
)
rep.modify.visibility(
rep.distribution.choice([True], [False] * (one_in_n_chance))
)
return instances.node
# Define randomizer function for sphere lights.
def sphere_lights(num):
lights = rep.create.light(
light_type="Sphere",
temperature=rep.distribution.normal(6500, 500),
intensity=rep.distribution.normal(30000, 5000),
position=rep.distribution.uniform((-300, -300, -300), (300, 300, 300)),
scale=rep.distribution.uniform(50, 100),
count=num,
)
return lights.node
rep.randomizer.register(random_props)
# Setup the static elements
env = rep.create.from_usd(ENVS)
surface = rep.create.from_usd(SURFACE)
with surface:
rep.physics.collider()
crate = rep.create.from_usd(CRATE)
with crate:
rep.physics.collider("none")
rep.physics.mass(mass=10000)
rep.modify.pose(position=(0, 20, 0), rotation=(0, 0, 90))
# Setup camera and attach it to render product
camera = rep.create.camera()
render_product = rep.create.render_product(camera, resolution=(1024, 1024))
rep.randomizer.register(sphere_lights)
# trigger on frame for an interval
with rep.trigger.on_frame(num_frames=100):
for n, f in FRUIT_PROPS.items():
random_props(f, n)
rep.randomizer.sphere_lights(5)
with camera:
rep.modify.pose(
position=rep.distribution.uniform((-3, 114, -17), (-1, 116, -15)),
look_at=(0, 20, 0),
)
# Initialize and attach writer
writer = rep.WriterRegistry.get("BasicWriter")
now = now.strftime("%Y-%m-%d")
output_dir = "fruit_data_" + now
writer.initialize(output_dir=output_dir, rgb=True, bounding_box_2d_tight=True)
writer.attach([render_product])
| 5,923 | Python | 47.162601 | 130 | 0.690866 |
NVIDIA-Omniverse/synthetic-data-examples/end-to-end-workflows/object_detection_fruit/data_generation/code/generate_data_headless.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
#
# Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import datetime
now = datetime.datetime.now()
from functools import partial
import omni.replicator.core as rep
with rep.new_layer():
# Define paths for the character, the props, the environment and the surface where the assets will be scattered in.
CRATE = "omniverse://localhost/NVIDIA/Samples/Marbles/assets/standalone/SM_room_crate_3/SM_room_crate_3.usd"
SURFACE = (
"omniverse://localhost/NVIDIA/Assets/Scenes/Templates/Basic/display_riser.usd"
)
ENVS = "omniverse://localhost/NVIDIA/Assets/Scenes/Templates/Interior/ZetCG_ExhibitionHall.usd"
FRUIT_PROPS = {
"apple": "omniverse://localhost/NVIDIA/Assets/ArchVis/Residential/Food/Fruit/Apple.usd",
"avocado": "omniverse://localhost/NVIDIA/Assets/ArchVis/Residential/Food/Fruit/Avocado01.usd",
"kiwi": "omniverse://localhost/NVIDIA/Assets/ArchVis/Residential/Food/Fruit/Kiwi01.usd",
"lime": "omniverse://localhost/NVIDIA/Assets/ArchVis/Residential/Food/Fruit/Lime01.usd",
"lychee": "omniverse://localhost/NVIDIA/Assets/ArchVis/Residential/Food/Fruit/Lychee01.usd",
"pomegranate": "omniverse://localhost/NVIDIA/Assets/ArchVis/Residential/Food/Fruit/Pomegranate01.usd",
"onion": "omniverse://localhost/NVIDIA/Assets/ArchVis/Residential/Food/Vegetables/RedOnion.usd",
"strawberry": "omniverse://localhost/NVIDIA/Assets/ArchVis/Residential/Food/Berries/strawberry.usd",
"lemon": "omniverse://localhost/NVIDIA/Assets/ArchVis/Residential/Decor/Tchotchkes/Lemon_01.usd",
"orange": "omniverse://localhost/NVIDIA/Assets/ArchVis/Residential/Decor/Tchotchkes/Orange_01.usd",
}
# Define randomizer function for Base assets. This randomization includes placement and rotation of the assets on the surface.
def random_props(file_name, class_name, max_number=1, one_in_n_chance=3):
instances = rep.randomizer.instantiate(
file_name, size=max_number, mode="scene_instance"
)
print(file_name)
with instances:
rep.modify.semantics([("class", class_name)])
rep.modify.pose(
position=rep.distribution.uniform((-8, 5, -25), (8, 30, 25)),
rotation=rep.distribution.uniform((-180, -180, -180), (180, 180, 180)),
scale=rep.distribution.uniform((0.8), (1.2)),
)
rep.modify.visibility(
rep.distribution.choice([True], [False] * (one_in_n_chance))
)
return instances.node
# Define randomizer function for sphere lights.
def sphere_lights(num):
lights = rep.create.light(
light_type="Sphere",
temperature=rep.distribution.normal(6500, 500),
intensity=rep.distribution.normal(30000, 5000),
position=rep.distribution.uniform((-300, -300, -300), (300, 300, 300)),
scale=rep.distribution.uniform(50, 100),
count=num,
)
return lights.node
rep.randomizer.register(random_props)
# Setup the static elements
env = rep.create.from_usd(ENVS)
surface = rep.create.from_usd(SURFACE)
with surface:
rep.physics.collider()
crate = rep.create.from_usd(CRATE)
with crate:
rep.physics.collider("none")
rep.physics.mass(mass=10000)
rep.modify.pose(position=(0, 20, 0), rotation=(0, 0, 90))
# Setup camera and attach it to render product
camera = rep.create.camera()
render_product = rep.create.render_product(camera, resolution=(1024, 1024))
rep.randomizer.register(sphere_lights)
# trigger on frame for an interval
with rep.trigger.on_frame(num_frames=100):
for n, f in FRUIT_PROPS.items():
random_props(f, n)
rep.randomizer.sphere_lights(5)
with camera:
rep.modify.pose(
position=rep.distribution.uniform((-3, 114, -17), (-1, 116, -15)),
look_at=(0, 20, 0),
)
# Initialize and attach writer
writer = rep.WriterRegistry.get("BasicWriter")
now = now.strftime("%Y-%m-%d")
output_dir = "fruit_data_" + now
writer.initialize(output_dir=output_dir, rgb=True, bounding_box_2d_tight=True)
writer.attach([render_product])
# Run Replicator script headlessly
rep.orchestrator.run()
| 5,990 | Python | 46.547619 | 130 | 0.690985 |
NVIDIA-Omniverse/synthetic-data-examples/end-to-end-workflows/object_detection_fruit/deployment/code/deploy.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
#
# Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import tritonclient.grpc as grpcclient
from optparse import OptionParser
# load image data
import cv2
import numpy as np
from matplotlib import pyplot as plt
import subprocess
def install(name):
subprocess.call(["pip", "install", name])
"""
Parses command line options. Requires input sample png
"""
def parse_input():
usage = "usage: deploy.py [options] arg1 "
parser = OptionParser(usage)
parser.add_option(
"-p", "--png", dest="png", help="Directory location for single sample image."
)
(options, args) = parser.parse_args()
return options, args
def main():
options, args = parse_input()
target_width, target_height = 1024, 1024
# add path to test image
image_sample = options.png
image_bgr = cv2.imread(image_sample)
image_bgr
image_bgr = cv2.resize(image_bgr, (target_width, target_height))
image_rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)
image = np.float32(image_rgb)
# preprocessing
image = image / 255
image = np.moveaxis(image, -1, 0) # HWC to CHW
image = image[np.newaxis, :] # add batch dimension
image = np.float32(image)
plt.imshow(image_rgb)
inference_server_url = "0.0.0.0:9001"
triton_client = grpcclient.InferenceServerClient(url=inference_server_url)
# find out info about model
model_name = "fasterrcnn_resnet50"
triton_client.get_model_config(model_name)
# create input
input_name = "input"
inputs = [grpcclient.InferInput(input_name, image.shape, "FP32")]
inputs[0].set_data_from_numpy(image)
output_name = "output"
outputs = [grpcclient.InferRequestedOutput("output")]
results = triton_client.infer(model_name, inputs, outputs=outputs)
output = results.as_numpy("output")
# annotate
annotated_image = image_bgr.copy()
if output.size > 0: # ensure something is found
for box in output:
box_top_left = int(box[0]), int(box[1])
box_bottom_right = int(box[2]), int(box[3])
text_origin = int(box[0]), int(box[3])
border_color = (50, 0, 100)
text_color = (255, 255, 255)
font_scale = 0.9
thickness = 1
# bounding box
cv2.rectangle(
annotated_image,
box_top_left,
box_bottom_right,
border_color,
thickness=5,
lineType=cv2.LINE_8,
)
plt.imshow(cv2.cvtColor(annotated_image, cv2.COLOR_BGR2RGB))
if __name__ == "__main__":
main()
| 4,261 | Python | 31.287879 | 85 | 0.680357 |
NVIDIA-Omniverse/synthetic-data-examples/end-to-end-workflows/object_detection_fruit/docs/part_2.md | # Part 2: Training a model with synthetic data
## Setup training
To use the training script you can see required parameters by running
-`python train.py --help`
- Example command:
- `python train.py -d /home/omni.replicator_out/fruit_data_$DATE/ -o /home/model.pth -e 10`
## Visualize training
We have included a visdualization script to run after your first training. This will show how Omniverse generates the labeled data. To see required parameters
- `python visualize.py --help`
- Example command:
- `python visualize.py -d /home/$USER/omni.replicator_out/fruit_data_$DATE -o /home/$USER -n 0`
## Export model
- To use the export script you can see required parameters by running
- `python export.py --help`
- Example command, make sure to dave to the `models/fasterrcnn_resnet50/1`
- `python export.py -d /home/out.pth -o /home/models/fasterrcnn_resnet50/1`
| 874 | Markdown | 38.772726 | 158 | 0.744851 |
NVIDIA-Omniverse/synthetic-data-examples/end-to-end-workflows/object_detection_fruit/docs/part_3.md | # Part 3: Deploy model to Triton
## Start triton server
When we start the server we want our model to be properly located in the `/models/fasterrcnn_resnet50/1` folder.
`sudo docker run --gpus=1 --rm -p9000:8000 -p9001:8001 -p9002:8002 -v /home/$USER/sdg_workflow/models/:/models nvcr.io/nvidia/tritonserver:23.01-py3 tritonserver --model-repository=/models`
Once started, you should see:
```
+---------------------+---------+--------+
| Model | Version | Status |
+---------------------+---------+--------+
| fasterrcnn_resnet50 | 1 | READY |
+---------------------+---------+--------+
```
## Start triton client
In another terminal window, with your server running start your client
- `sudo docker run -it --rm --net=host -v /home/zoe/Desktop/sdg_workflow:/workspace nvcr.io/nvidia/tritonserver:23.01-py3-sdk`
- To use the deploy script you can see required parameters by running
- `python deploy.py --help`
- Example command:
- ` python deploy.py -p /workspace/rgb_0.png`
| 1,006 | Markdown | 34.964284 | 189 | 0.622266 |
NVIDIA-Omniverse/synthetic-data-examples/end-to-end-workflows/object_detection_fruit/docs/part_1.md | # Part 1: Generate synthetic data with Omnvierse
## Install Dependencies
In this section you can generate your synthetic data using the Omniverse GUI or as a headless version in your local terminal. Either option requires an Omniverse install.
- [Install Omniverse Launcher](https://docs.omniverse.nvidia.com/prod_install-guide/prod_install-guide/overview.html#omniverse-install-guide)
## Omniverse Launcher & Code
- [Install Omniverse Code](https://docs.omniverse.nvidia.com/prod_workflows/prod_workflows/extensions/environment_configuration.html#step-2-install-omniverse-code) from the `Exchange` tab within Omniverse Launcher
## Generate data in Omniverse GUI
Copy the contents of the generate_data.py script into the Script Editor tab in the bottom section of the Code window. Press the RUn1 button or ctrl + Enter on your keyboard to load the scene in the Viewport. From there you can preview a single scene in the Replciator tab at the top by clicking Preview or run the full script by clicking Run. If you make no changes to this script it will generate 100 frames.
- From inside the Code GUI using the [script editor](https://docs.omniverse.nvidia.com/app_code/prod_extensions/ext_script-editor.html)
- If using Linux, copy code from `generate_data_gui.py` into the Script Editor window
-Execute code by clicking the `Run` button or pressing `ctrl+Enter`
- To preview what the scene will look like click Replicator then `Preview` in the top bar of your Omniverse Code window
- When you are ready to generate all your data go ahead and click `Replicator` and then `Run`, this will generate the designated number of frames and drop the RGB, bounding box data, and labels into the desired folder
## Generate data headlessly
Follow the documentation guidelines to launch a terminal in the correct folder location. The correct script to pass to your --/omni/replicator.scrip is generate_data_headless.py. This will generate and save the synthetic data in the same way as before, without utilizing the Omniverse GUI.
- [How to run](https://docs.omniverse.nvidia.com/prod_extensions/prod_extensions/ext_replicator/headless_example.html)
- Script location: `/FruitBasketOVEReplicatorDemo/data_generation/code/generate_data_headless.py`
- We need to locate `omni.code.replicator.sh`
To find look for where Omniverse ode is locally installed
- Run (script dictates where the output data is stored):
`./omni.code.replicator.sh --no-window --/omni/replicator/script= “/FruitBasketOVEReplicatorDemo/data_generation/code/generate_data_headless.py”`
| 2,562 | Markdown | 72.228569 | 411 | 0.79313 |
NVIDIA-Omniverse/synthetic-data-examples/training_examples/sdg_pallet_model/predict.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import utils
import cv2
import torch
if __name__ == "__main__":
# Parse command line arguments
parser = argparse.ArgumentParser()
parser.add_argument(
"engine",
type=str,
help="The file path of the TensorRT engine."
)
parser.add_argument(
"image",
type=str,
help="The file path of the image provided as input for inference."
)
parser.add_argument(
"--output",
type=str,
default=None,
help="The path to output the inference visualization."
)
parser.add_argument(
"--inference-size",
type=str,
default="512x512",
help="The height and width that the image is resized to for inference."
" Denoted as (height)x(width)."
)
parser.add_argument(
"--peak-window",
type=str,
default="7x7",
help="The size of the window used when finding local peaks. Denoted as "
" (window_height)x(window_width)."
)
parser.add_argument(
'--peak-threshold',
type=float,
default=0.5,
help="The heatmap threshold to use when finding peaks. Values must be "
" larger than this value to be considered peaks."
)
parser.add_argument(
'--line-thickness',
type=int,
default=1,
help="The line thickness for drawn boxes"
)
args = parser.parse_args()
# Parse inference height, width from arguments
inference_size = tuple(int(x) for x in args.inference_size.split('x'))
peak_window = tuple(int(x) for x in args.peak_window.split('x'))
if args.output is None:
output_path = '.'.join(args.image.split('.')[:-1]) + "_output.jpg"
else:
output_path = args.output
# Create offset grid
offset_grid = utils.make_offset_grid(inference_size).to("cuda")
# Load model
model = utils.load_trt_engine_wrapper(
args.engine,
input_names=["input"],
output_names=["heatmap", "vectormap"]
)
# Load image
image = cv2.imread(args.image)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Pad and resize image (aspect ratio preserving resize)
image, _, _ = utils.pad_resize(image, inference_size)
with torch.no_grad():
# Format image for inference
x = utils.format_bgr8_image(image)
x = x.to("cuda")
# Execute model
heatmap, vectormap = model(x)
# Scale and offset vectormap
keypointmap = utils.vectormap_to_keypointmap(
offset_grid,
vectormap
)
# Find local peaks
peak_mask = utils.find_heatmap_peak_mask(
heatmap,
peak_window,
args.peak_threshold
)
# Extract keypoints at local peak
keypoints = keypointmap[0][peak_mask[0, 0]]
# Draw
vis_image = utils.draw_box(
image,
keypoints,
color=(118, 186, 0),
thickness=args.line_thickness
)
vis_image = cv2.cvtColor(vis_image, cv2.COLOR_RGB2BGR)
cv2.imwrite(output_path, vis_image)
| 3,833 | Python | 26.191489 | 98 | 0.610749 |
NVIDIA-Omniverse/synthetic-data-examples/training_examples/sdg_pallet_model/utils.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn.functional as F
import numpy as np
import cv2
import einops
import tensorrt as trt
import torch2trt
from typing import Sequence
BOX_EDGES = [
[0, 1],
[1, 5],
[5, 4],
[4, 0],
[2, 3],
[3, 7],
[7, 6],
[6, 2],
[0, 2],
[1, 3],
[4, 6],
[5, 7]
]
def make_offset_grid(
size,
stride=(1, 1)
):
grid = torch.stack(
torch.meshgrid(
stride[0] * (torch.arange(size[0]) + 0.5),
stride[1] * (torch.arange(size[1]) + 0.5)
),
dim=-1
)
return grid
def vectormap_to_keypointmap(
offset_grid,
vector_map,
vector_scale: float = 1./256.
):
vector_map = vector_map / vector_scale
keypoint_map = einops.rearrange(vector_map, "b (k d) h w -> b h w k d", d=2)
keypoint_map = keypoint_map + offset_grid[:, :, None, :]
# yx -> xy
keypoint_map = keypoint_map[..., [1, 0]]
return keypoint_map
def find_heatmap_peak_mask(heatmap, window=3, threshold=0.5):
all_indices = torch.arange(
heatmap.numel(),
device=heatmap.device
)
all_indices = all_indices.reshape(heatmap.shape)
if isinstance(window, int):
window = (window, window)
values, max_indices = F.max_pool2d_with_indices(
heatmap,
kernel_size=window,
stride=1,
padding=(window[0] // 2, window[1] // 2)
)
is_above_threshold = heatmap >= threshold
is_max = max_indices == all_indices
is_peak = is_above_threshold & is_max
return is_peak
def draw_box(image_bgr, keypoints, color=(118, 186, 0), thickness=1):
num_objects = int(keypoints.shape[0])
for i in range(num_objects):
keypoints_i = keypoints[i]
kps_i = [(int(x), int(y)) for x, y in keypoints_i]
edges = BOX_EDGES
for e in edges:
cv2.line(
image_bgr,
kps_i[e[0]],
kps_i[e[1]],
(118, 186, 0),
thickness=thickness
)
return image_bgr
def pad_resize(image, output_shape):
ar_i = image.shape[1] / image.shape[0]
ar_o = output_shape[1] / output_shape[0]
# resize
if ar_i > ar_o:
w_i = output_shape[1]
h_i = min(int(w_i / ar_i), output_shape[0])
else:
h_i = output_shape[0]
w_i = min(int(h_i * ar_i), output_shape[1])
# paste
pad_left = (output_shape[1] - w_i) // 2
pad_top = (output_shape[0] - h_i) // 2
image_resize = cv2.resize(image, (w_i, h_i))
out = np.zeros_like(
image,
shape=(output_shape[0], output_shape[1], image.shape[2])
)
out[pad_top:pad_top + h_i, pad_left:pad_left + w_i] = image_resize
pad = (pad_top, pad_left)
scale = (image.shape[0] / h_i, image.shape[1] / w_i)
return out, pad, scale
def load_trt_engine(path: str):
with trt.Logger() as logger, trt.Runtime(logger) as runtime:
with open(path, 'rb') as f:
engine_bytes = f.read()
engine = runtime.deserialize_cuda_engine(engine_bytes)
return engine
def load_trt_engine_wrapper(
path: str,
input_names: Sequence,
output_names: Sequence
):
engine = load_trt_engine(path)
wrapper = torch2trt.TRTModule(
engine=engine,
input_names=input_names,
output_names=output_names
)
return wrapper
def format_bgr8_image(image, device="cuda"):
x = torch.from_numpy(image)
x = x.permute(2, 0, 1)[None, ...]
x = (x / 255 - 0.45) / 0.25
return x | 4,290 | Python | 22.194594 | 98 | 0.577156 |
NVIDIA-Omniverse/synthetic-data-examples/training_examples/sdg_pallet_model/README.md | # SDG Pallet Model
<img src="images/test_image_1_output.jpg" height="256"/>
This repository contains code for performing optimized TensorRT inference with a pre-trained
pallet detection model that was trained using synthetic data with [NVIDIA Omniverse Replicator](https://developer.nvidia.com/omniverse/replicator).
The model takes as input a monocular RGB image, and outputs the pallet box estimates. The box esimates
are defined for each pallet side face. So a single pallet may have multiple box
estimates.
If you have any questions, please feel free to reach out by opening an issue!
## Instructions
### Step 1 - Install dependencies
Assumes you've already set up your system with OpenCV, PyTorch and numpy.
Install einops for some utility functions.
```bash
pip3 install einops
```
Install [torch2trt](https://github.com/NVIDIA-AI-IOT/torch2trt). This is used
for the ``TRTModule`` class which simplifies engine inference.
```bash
git clone https://github.com/NVIDIA-AI-IOT/torch2trt
cd torch2trt
python3 setup.py develop
```
### Step 2 - Download the ONNX model
Download the pallet model ONNX file.
| Model | Notes | Links |
|-------|-------|-------|
| pallet_model_v1_all | Trained for wood and other pallets (metal, plastic). | [onnx](https://drive.google.com/file/d/1Vsl7s5YhBFxkTkd3UYYgPWFCLNRm_O_Q/view?usp=share_link) |
| pallet_model_v1_wood | Trained only for wood pallets. | [onnx](https://drive.google.com/file/d/1Fd1gS7NYkWHPhUn7iZLK43hLQ1qDkuvb/view?usp=share_link) |
### Step 3 - Build the TensorRT engine
#### Option 1 (*recommended*) - Build the FP16 engine
To build the FP16 engine, call the following:
```bash
./build_trt_fp16.sh <onnx_path> <engine_output_path>
```
#### Option 2 - Build the INT8 engine
> The INT8 model instructions do not yet include calibration. Please only use
> this model for throughput profiling. The accuracy is likely to vary from
> FP32/FP16 models. However, once calibration is included, this may become
> the recommended option given the improved throughput results.
To build the INT8 engine, call the following:
```bash
./build_trt_int8.sh <onnx_path> <engine_output_path>
```
We hope to provide instructions for using the Deep Learning Accelerator (DLA)
on Jetson AGX Orin, and INT8 calibration soon.
### Step 3 - Profile the engine
To profile the engine with the ``trtexec`` tool, call the following:
```bash
./profile_engine.sh <engine_path>
```
Here are the results for a model inference at 256x256 resolution,
profiled on Jetson AGX Orin.
<a id="throughput_results"/>
| Precision | Throughput (FPS) |
|-----------|------------------|
| FP16 | 465 |
| INT8 | 710 |
Notes:
- Called ``jetson_clocks`` before running
- Using MAXN power mode by calling ``sudo nvpmodel -m0``
- Batch size 1
- ``--useSpinWait`` flag enabled to stabilize timings
- ``--useCudaGraph`` flag enabled to use CUDA graph optimizations. Cuda graph
isn't yet used in the predict function.
### Step 4 - Run inference on an example image.
```bash
python3 predict.py <engine_path> <image_path> --output=<output_path>
```
For more options
```
python3 predict.py --help
```
### Next Steps
Try modifying the predict.py code to visualize inference on a live camera feed.
| 3,292 | Markdown | 27.634782 | 174 | 0.717801 |
NVIDIA-Omniverse/synthetic-data-examples/training_examples/sdg_pallet_model/LICENSE.md | SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
SPDX-License-Identifier: Apache-2.0
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| 654 | Markdown | 42.666664 | 96 | 0.801223 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/README.md | # Isaac Sim ROS & ROS2 Workspaces
This repository contains three workspaces: `noetic_ws` (ROS Noetic), `foxy_ws` (ROS2 Foxy) and `humble_ws` (ROS2 Humble).
[Click here for usage and installation instructions with Isaac Sim](https://docs.omniverse.nvidia.com/isaacsim/latest/installation/install_ros.html)
When cloning this repository, all three workspaces are downloaded. Depending on which ROS distro you are using, follow the [setup instructions](https://docs.omniverse.nvidia.com/isaacsim/latest/installation/install_ros.html#setting-up-workspaces) for building your specific workspace. | 593 | Markdown | 83.857131 | 284 | 0.797639 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/foxy_ws/src/isaac_tutorials/scripts/ros2_publisher.py | #!/usr/bin/env python3
# Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import rclpy
from rclpy.node import Node
from sensor_msgs.msg import JointState
import numpy as np
import time
class TestROS2Bridge(Node):
def __init__(self):
super().__init__("test_ros2bridge")
# Create the publisher. This publisher will publish a JointState message to the /joint_command topic.
self.publisher_ = self.create_publisher(JointState, "joint_command", 10)
# Create a JointState message
self.joint_state = JointState()
self.joint_state.name = [
"panda_joint1",
"panda_joint2",
"panda_joint3",
"panda_joint4",
"panda_joint5",
"panda_joint6",
"panda_joint7",
"panda_finger_joint1",
"panda_finger_joint2",
]
num_joints = len(self.joint_state.name)
# make sure kit's editor is playing for receiving messages
self.joint_state.position = np.array([0.0] * num_joints, dtype=np.float64).tolist()
self.default_joints = [0.0, -1.16, -0.0, -2.3, -0.0, 1.6, 1.1, 0.4, 0.4]
# limiting the movements to a smaller range (this is not the range of the robot, just the range of the movement
self.max_joints = np.array(self.default_joints) + 0.5
self.min_joints = np.array(self.default_joints) - 0.5
# position control the robot to wiggle around each joint
self.time_start = time.time()
timer_period = 0.05 # seconds
self.timer = self.create_timer(timer_period, self.timer_callback)
def timer_callback(self):
self.joint_state.header.stamp = self.get_clock().now().to_msg()
joint_position = (
np.sin(time.time() - self.time_start) * (self.max_joints - self.min_joints) * 0.5 + self.default_joints
)
self.joint_state.position = joint_position.tolist()
# Publish the message to the topic
self.publisher_.publish(self.joint_state)
def main(args=None):
rclpy.init(args=args)
ros2_publisher = TestROS2Bridge()
rclpy.spin(ros2_publisher)
# Destroy the node explicitly
ros2_publisher.destroy_node()
rclpy.shutdown()
if __name__ == "__main__":
main()
| 2,662 | Python | 31.084337 | 119 | 0.644252 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/foxy_ws/src/navigation/carter_navigation/launch/carter_navigation.launch.py | ## Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
## NVIDIA CORPORATION and its licensors retain all intellectual property
## and proprietary rights in and to this software, related documentation
## and any modifications thereto. Any use, reproduction, disclosure or
## distribution of this software and related documentation without an express
## license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch.actions import IncludeLaunchDescription
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import Node
def generate_launch_description():
use_sim_time = LaunchConfiguration("use_sim_time", default="True")
map_dir = LaunchConfiguration(
"map",
default=os.path.join(
get_package_share_directory("carter_navigation"), "maps", "carter_warehouse_navigation.yaml"
),
)
param_dir = LaunchConfiguration(
"params_file",
default=os.path.join(
get_package_share_directory("carter_navigation"), "params", "carter_navigation_params.yaml"
),
)
nav2_bringup_launch_dir = os.path.join(get_package_share_directory("nav2_bringup"), "launch")
rviz_config_dir = os.path.join(get_package_share_directory("carter_navigation"), "rviz2", "carter_navigation.rviz")
return LaunchDescription(
[
DeclareLaunchArgument("map", default_value=map_dir, description="Full path to map file to load"),
DeclareLaunchArgument(
"params_file", default_value=param_dir, description="Full path to param file to load"
),
DeclareLaunchArgument(
"use_sim_time", default_value="true", description="Use simulation (Omniverse Isaac Sim) clock if true"
),
IncludeLaunchDescription(
PythonLaunchDescriptionSource(os.path.join(nav2_bringup_launch_dir, "rviz_launch.py")),
launch_arguments={"namespace": "", "use_namespace": "False", "rviz_config": rviz_config_dir}.items(),
),
IncludeLaunchDescription(
PythonLaunchDescriptionSource([nav2_bringup_launch_dir, "/bringup_launch.py"]),
launch_arguments={"map": map_dir, "use_sim_time": use_sim_time, "params_file": param_dir}.items(),
),
Node(
package='pointcloud_to_laserscan', executable='pointcloud_to_laserscan_node',
remappings=[('cloud_in', ['/front_3d_lidar/point_cloud']),
('scan', ['/scan'])],
parameters=[{
'target_frame': 'front_3d_lidar',
'transform_tolerance': 0.01,
'min_height': -0.4,
'max_height': 1.5,
'angle_min': -1.5708, # -M_PI/2
'angle_max': 1.5708, # M_PI/2
'angle_increment': 0.0087, # M_PI/360.0
'scan_time': 0.3333,
'range_min': 0.05,
'range_max': 100.0,
'use_inf': True,
'inf_epsilon': 1.0,
# 'concurrency_level': 1,
}],
name='pointcloud_to_laserscan'
)
]
)
| 3,521 | Python | 42.481481 | 119 | 0.601534 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/foxy_ws/src/navigation/carter_navigation/launch/carter_navigation_individual.launch.py | ## Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
## NVIDIA CORPORATION and its licensors retain all intellectual property
## and proprietary rights in and to this software, related documentation
## and any modifications thereto. Any use, reproduction, disclosure or
## distribution of this software and related documentation without an express
## license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument, ExecuteProcess, IncludeLaunchDescription
from launch.conditions import IfCondition
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch.substitutions import LaunchConfiguration, PythonExpression, TextSubstitution
from launch_ros.actions import Node
def generate_launch_description():
# Get the launch directory
nav2_launch_dir = os.path.join(get_package_share_directory("nav2_bringup"), "launch")
# Create the launch configuration variables
slam = LaunchConfiguration("slam")
namespace = LaunchConfiguration("namespace")
use_namespace = LaunchConfiguration("use_namespace")
map_yaml_file = LaunchConfiguration("map")
use_sim_time = LaunchConfiguration("use_sim_time")
params_file = LaunchConfiguration("params_file")
default_bt_xml_filename = LaunchConfiguration("default_bt_xml_filename")
autostart = LaunchConfiguration("autostart")
# Declare the launch arguments
declare_namespace_cmd = DeclareLaunchArgument("namespace", default_value="", description="Top-level namespace")
declare_use_namespace_cmd = DeclareLaunchArgument(
"use_namespace", default_value="false", description="Whether to apply a namespace to the navigation stack"
)
declare_slam_cmd = DeclareLaunchArgument("slam", default_value="False", description="Whether run a SLAM")
declare_map_yaml_cmd = DeclareLaunchArgument(
"map",
default_value=os.path.join(nav2_launch_dir, "maps", "carter_warehouse_navigation.yaml"),
description="Full path to map file to load",
)
declare_use_sim_time_cmd = DeclareLaunchArgument(
"use_sim_time", default_value="True", description="Use simulation (Isaac Sim) clock if true"
)
declare_params_file_cmd = DeclareLaunchArgument(
"params_file",
default_value=os.path.join(nav2_launch_dir, "params", "nav2_params.yaml"),
description="Full path to the ROS2 parameters file to use for all launched nodes",
)
declare_bt_xml_cmd = DeclareLaunchArgument(
"default_bt_xml_filename",
default_value=os.path.join(
get_package_share_directory("nav2_bt_navigator"), "behavior_trees", "navigate_w_replanning_and_recovery.xml"
),
description="Full path to the behavior tree xml file to use",
)
declare_autostart_cmd = DeclareLaunchArgument(
"autostart", default_value="true", description="Automatically startup the nav2 stack"
)
bringup_cmd = IncludeLaunchDescription(
PythonLaunchDescriptionSource(os.path.join(nav2_launch_dir, "bringup_launch.py")),
launch_arguments={
"namespace": namespace,
"use_namespace": use_namespace,
"slam": slam,
"map": map_yaml_file,
"use_sim_time": use_sim_time,
"params_file": params_file,
"default_bt_xml_filename": default_bt_xml_filename,
"autostart": autostart,
}.items(),
)
# Create the launch description and populate
ld = LaunchDescription()
# Declare the launch options
ld.add_action(declare_namespace_cmd)
ld.add_action(declare_use_namespace_cmd)
ld.add_action(declare_slam_cmd)
ld.add_action(declare_map_yaml_cmd)
ld.add_action(declare_use_sim_time_cmd)
ld.add_action(declare_params_file_cmd)
ld.add_action(declare_bt_xml_cmd)
ld.add_action(declare_autostart_cmd)
ld.add_action(bringup_cmd)
return ld
| 4,076 | Python | 39.366336 | 120 | 0.711237 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/foxy_ws/src/navigation/carter_navigation/launch/multiple_robot_carter_navigation_hospital.launch.py | ## Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
## NVIDIA CORPORATION and its licensors retain all intellectual property
## and proprietary rights in and to this software, related documentation
## and any modifications thereto. Any use, reproduction, disclosure or
## distribution of this software and related documentation without an express
## license agreement from NVIDIA CORPORATION is strictly prohibited.
"""
Example for spawing multiple robots in Gazebo.
This is an example on how to create a launch file for spawning multiple robots into Gazebo
and launch multiple instances of the navigation stack, each controlling one robot.
The robots co-exist on a shared environment and are controlled by independent nav stacks
"""
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument, ExecuteProcess, GroupAction, IncludeLaunchDescription, LogInfo
from launch.conditions import IfCondition
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch.substitutions import LaunchConfiguration, TextSubstitution
from launch_ros.actions import Node
def generate_launch_description():
# Get the launch and rviz directories
carter_nav2_bringup_dir = get_package_share_directory("carter_navigation")
nav2_bringup_dir = get_package_share_directory("nav2_bringup")
nav2_bringup_launch_dir = os.path.join(nav2_bringup_dir, "launch")
rviz_config_dir = os.path.join(carter_nav2_bringup_dir, "rviz2", "carter_navigation_namespaced.rviz")
# Names and poses of the robots
robots = [{"name": "carter1"}, {"name": "carter2"}, {"name": "carter3"}]
# Common settings
ENV_MAP_FILE = "carter_hospital_navigation.yaml"
use_sim_time = LaunchConfiguration("use_sim_time", default="True")
map_yaml_file = LaunchConfiguration("map")
default_bt_xml_filename = LaunchConfiguration("default_bt_xml_filename")
autostart = LaunchConfiguration("autostart")
rviz_config_file = LaunchConfiguration("rviz_config")
use_rviz = LaunchConfiguration("use_rviz")
log_settings = LaunchConfiguration("log_settings", default="true")
# Declare the launch arguments
declare_map_yaml_cmd = DeclareLaunchArgument(
"map",
default_value=os.path.join(carter_nav2_bringup_dir, "maps", ENV_MAP_FILE),
description="Full path to map file to load",
)
declare_robot1_params_file_cmd = DeclareLaunchArgument(
"carter1_params_file",
default_value=os.path.join(
carter_nav2_bringup_dir, "params", "hospital", "multi_robot_carter_navigation_params_1.yaml"
),
description="Full path to the ROS2 parameters file to use for robot1 launched nodes",
)
declare_robot2_params_file_cmd = DeclareLaunchArgument(
"carter2_params_file",
default_value=os.path.join(
carter_nav2_bringup_dir, "params", "hospital", "multi_robot_carter_navigation_params_2.yaml"
),
description="Full path to the ROS2 parameters file to use for robot2 launched nodes",
)
declare_robot3_params_file_cmd = DeclareLaunchArgument(
"carter3_params_file",
default_value=os.path.join(
carter_nav2_bringup_dir, "params", "hospital", "multi_robot_carter_navigation_params_3.yaml"
),
description="Full path to the ROS2 parameters file to use for robot3 launched nodes",
)
declare_bt_xml_cmd = DeclareLaunchArgument(
"default_bt_xml_filename",
default_value=os.path.join(
get_package_share_directory("nav2_bt_navigator"), "behavior_trees", "navigate_w_replanning_and_recovery.xml"
),
description="Full path to the behavior tree xml file to use",
)
declare_autostart_cmd = DeclareLaunchArgument(
"autostart", default_value="True", description="Automatically startup the stacks"
)
declare_rviz_config_file_cmd = DeclareLaunchArgument(
"rviz_config", default_value=rviz_config_dir, description="Full path to the RVIZ config file to use."
)
declare_use_rviz_cmd = DeclareLaunchArgument("use_rviz", default_value="True", description="Whether to start RVIZ")
# Define commands for launching the navigation instances
nav_instances_cmds = []
for robot in robots:
params_file = LaunchConfiguration(robot["name"] + "_params_file")
group = GroupAction(
[
IncludeLaunchDescription(
PythonLaunchDescriptionSource(os.path.join(nav2_bringup_launch_dir, "rviz_launch.py")),
condition=IfCondition(use_rviz),
launch_arguments={
"namespace": TextSubstitution(text=robot["name"]),
"use_namespace": "True",
"rviz_config": rviz_config_file,
}.items(),
),
IncludeLaunchDescription(
PythonLaunchDescriptionSource(
os.path.join(carter_nav2_bringup_dir, "launch", "carter_navigation_individual.launch.py")
),
launch_arguments={
"namespace": robot["name"],
"use_namespace": "True",
"map": map_yaml_file,
"use_sim_time": use_sim_time,
"params_file": params_file,
"default_bt_xml_filename": default_bt_xml_filename,
"autostart": autostart,
"use_rviz": "False",
"use_simulator": "False",
"headless": "False",
}.items(),
),
Node(
package='pointcloud_to_laserscan', executable='pointcloud_to_laserscan_node',
remappings=[('cloud_in', ['front_3d_lidar/point_cloud']),
('scan', ['scan'])],
parameters=[{
'target_frame': 'front_3d_lidar',
'transform_tolerance': 0.01,
'min_height': -0.4,
'max_height': 1.5,
'angle_min': -1.5708, # -M_PI/2
'angle_max': 1.5708, # M_PI/2
'angle_increment': 0.0087, # M_PI/360.0
'scan_time': 0.3333,
'range_min': 0.05,
'range_max': 100.0,
'use_inf': True,
'inf_epsilon': 1.0,
# 'concurrency_level': 1,
}],
name='pointcloud_to_laserscan',
namespace = robot["name"]
),
LogInfo(condition=IfCondition(log_settings), msg=["Launching ", robot["name"]]),
LogInfo(condition=IfCondition(log_settings), msg=[robot["name"], " map yaml: ", map_yaml_file]),
LogInfo(condition=IfCondition(log_settings), msg=[robot["name"], " params yaml: ", params_file]),
LogInfo(
condition=IfCondition(log_settings),
msg=[robot["name"], " behavior tree xml: ", default_bt_xml_filename],
),
LogInfo(
condition=IfCondition(log_settings), msg=[robot["name"], " rviz config file: ", rviz_config_file]
),
LogInfo(condition=IfCondition(log_settings), msg=[robot["name"], " autostart: ", autostart]),
]
)
nav_instances_cmds.append(group)
# Create the launch description and populate
ld = LaunchDescription()
# Declare the launch options
ld.add_action(declare_map_yaml_cmd)
ld.add_action(declare_robot1_params_file_cmd)
ld.add_action(declare_robot2_params_file_cmd)
ld.add_action(declare_robot3_params_file_cmd)
ld.add_action(declare_bt_xml_cmd)
ld.add_action(declare_use_rviz_cmd)
ld.add_action(declare_autostart_cmd)
ld.add_action(declare_rviz_config_file_cmd)
for simulation_instance_cmd in nav_instances_cmds:
ld.add_action(simulation_instance_cmd)
return ld
| 8,338 | Python | 42.432291 | 120 | 0.601823 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/foxy_ws/src/navigation/carter_navigation/launch/multiple_robot_carter_navigation_office.launch.py | ## Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
## NVIDIA CORPORATION and its licensors retain all intellectual property
## and proprietary rights in and to this software, related documentation
## and any modifications thereto. Any use, reproduction, disclosure or
## distribution of this software and related documentation without an express
## license agreement from NVIDIA CORPORATION is strictly prohibited.
"""
Example for spawing multiple robots in Gazebo.
This is an example on how to create a launch file for spawning multiple robots into Gazebo
and launch multiple instances of the navigation stack, each controlling one robot.
The robots co-exist on a shared environment and are controlled by independent nav stacks
"""
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument, ExecuteProcess, GroupAction, IncludeLaunchDescription, LogInfo
from launch.conditions import IfCondition
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch.substitutions import LaunchConfiguration, TextSubstitution
from launch_ros.actions import Node
def generate_launch_description():
# Get the launch and rviz directories
carter_nav2_bringup_dir = get_package_share_directory("carter_navigation")
nav2_bringup_dir = get_package_share_directory("nav2_bringup")
nav2_bringup_launch_dir = os.path.join(nav2_bringup_dir, "launch")
rviz_config_dir = os.path.join(carter_nav2_bringup_dir, "rviz2", "carter_navigation_namespaced.rviz")
# Names and poses of the robots
robots = [{"name": "carter1"}, {"name": "carter2"}, {"name": "carter3"}]
# Common settings
ENV_MAP_FILE = "carter_office_navigation.yaml"
use_sim_time = LaunchConfiguration("use_sim_time", default="True")
map_yaml_file = LaunchConfiguration("map")
default_bt_xml_filename = LaunchConfiguration("default_bt_xml_filename")
autostart = LaunchConfiguration("autostart")
rviz_config_file = LaunchConfiguration("rviz_config")
use_rviz = LaunchConfiguration("use_rviz")
log_settings = LaunchConfiguration("log_settings", default="true")
# Declare the launch arguments
declare_map_yaml_cmd = DeclareLaunchArgument(
"map",
default_value=os.path.join(carter_nav2_bringup_dir, "maps", ENV_MAP_FILE),
description="Full path to map file to load",
)
declare_robot1_params_file_cmd = DeclareLaunchArgument(
"carter1_params_file",
default_value=os.path.join(
carter_nav2_bringup_dir, "params", "office", "multi_robot_carter_navigation_params_1.yaml"
),
description="Full path to the ROS2 parameters file to use for robot1 launched nodes",
)
declare_robot2_params_file_cmd = DeclareLaunchArgument(
"carter2_params_file",
default_value=os.path.join(
carter_nav2_bringup_dir, "params", "office", "multi_robot_carter_navigation_params_2.yaml"
),
description="Full path to the ROS2 parameters file to use for robot2 launched nodes",
)
declare_robot3_params_file_cmd = DeclareLaunchArgument(
"carter3_params_file",
default_value=os.path.join(
carter_nav2_bringup_dir, "params", "office", "multi_robot_carter_navigation_params_3.yaml"
),
description="Full path to the ROS2 parameters file to use for robot3 launched nodes",
)
declare_bt_xml_cmd = DeclareLaunchArgument(
"default_bt_xml_filename",
default_value=os.path.join(
get_package_share_directory("nav2_bt_navigator"), "behavior_trees", "navigate_w_replanning_and_recovery.xml"
),
description="Full path to the behavior tree xml file to use",
)
declare_autostart_cmd = DeclareLaunchArgument(
"autostart", default_value="True", description="Automatically startup the stacks"
)
declare_rviz_config_file_cmd = DeclareLaunchArgument(
"rviz_config", default_value=rviz_config_dir, description="Full path to the RVIZ config file to use."
)
declare_use_rviz_cmd = DeclareLaunchArgument("use_rviz", default_value="True", description="Whether to start RVIZ")
# Define commands for launching the navigation instances
nav_instances_cmds = []
for robot in robots:
params_file = LaunchConfiguration(robot["name"] + "_params_file")
group = GroupAction(
[
IncludeLaunchDescription(
PythonLaunchDescriptionSource(os.path.join(nav2_bringup_launch_dir, "rviz_launch.py")),
condition=IfCondition(use_rviz),
launch_arguments={
"namespace": TextSubstitution(text=robot["name"]),
"use_namespace": "True",
"rviz_config": rviz_config_file,
}.items(),
),
IncludeLaunchDescription(
PythonLaunchDescriptionSource(
os.path.join(carter_nav2_bringup_dir, "launch", "carter_navigation_individual.launch.py")
),
launch_arguments={
"namespace": robot["name"],
"use_namespace": "True",
"map": map_yaml_file,
"use_sim_time": use_sim_time,
"params_file": params_file,
"default_bt_xml_filename": default_bt_xml_filename,
"autostart": autostart,
"use_rviz": "False",
"use_simulator": "False",
"headless": "False",
}.items(),
),
Node(
package='pointcloud_to_laserscan', executable='pointcloud_to_laserscan_node',
remappings=[('cloud_in', ['front_3d_lidar/point_cloud']),
('scan', ['scan'])],
parameters=[{
'target_frame': 'front_3d_lidar',
'transform_tolerance': 0.01,
'min_height': -0.4,
'max_height': 1.5,
'angle_min': -1.5708, # -M_PI/2
'angle_max': 1.5708, # M_PI/2
'angle_increment': 0.0087, # M_PI/360.0
'scan_time': 0.3333,
'range_min': 0.05,
'range_max': 100.0,
'use_inf': True,
'inf_epsilon': 1.0,
# 'concurrency_level': 1,
}],
name='pointcloud_to_laserscan',
namespace = robot["name"]
),
LogInfo(condition=IfCondition(log_settings), msg=["Launching ", robot["name"]]),
LogInfo(condition=IfCondition(log_settings), msg=[robot["name"], " map yaml: ", map_yaml_file]),
LogInfo(condition=IfCondition(log_settings), msg=[robot["name"], " params yaml: ", params_file]),
LogInfo(
condition=IfCondition(log_settings),
msg=[robot["name"], " behavior tree xml: ", default_bt_xml_filename],
),
LogInfo(
condition=IfCondition(log_settings), msg=[robot["name"], " rviz config file: ", rviz_config_file]
),
LogInfo(condition=IfCondition(log_settings), msg=[robot["name"], " autostart: ", autostart]),
]
)
nav_instances_cmds.append(group)
# Create the launch description and populate
ld = LaunchDescription()
# Declare the launch options
ld.add_action(declare_map_yaml_cmd)
ld.add_action(declare_robot1_params_file_cmd)
ld.add_action(declare_robot2_params_file_cmd)
ld.add_action(declare_robot3_params_file_cmd)
ld.add_action(declare_bt_xml_cmd)
ld.add_action(declare_use_rviz_cmd)
ld.add_action(declare_autostart_cmd)
ld.add_action(declare_rviz_config_file_cmd)
for simulation_instance_cmd in nav_instances_cmds:
ld.add_action(simulation_instance_cmd)
return ld
| 8,330 | Python | 42.390625 | 120 | 0.601441 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/foxy_ws/src/navigation/carter_navigation/maps/carter_office_navigation.yaml | image: carter_office_navigation.png
resolution: 0.05
origin: [-29.975, -39.975, 0.0000]
negate: 0
occupied_thresh: 0.65
free_thresh: 0.196
| 139 | YAML | 18.999997 | 35 | 0.733813 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/foxy_ws/src/navigation/carter_navigation/maps/carter_hospital_navigation.yaml | image: carter_hospital_navigation.png
resolution: 0.05
origin: [-49.625, -4.675, 0.0000]
negate: 0
occupied_thresh: 0.65
free_thresh: 0.196
| 140 | YAML | 19.142854 | 37 | 0.735714 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/foxy_ws/src/navigation/carter_navigation/maps/carter_warehouse_navigation.yaml | image: carter_warehouse_navigation.png
resolution: 0.05
origin: [-11.975, -17.975, 0.0000]
negate: 0
occupied_thresh: 0.65
free_thresh: 0.196
| 142 | YAML | 19.428569 | 38 | 0.739437 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/foxy_ws/src/navigation/isaac_ros_navigation_goal/setup.py | from setuptools import setup
from glob import glob
import os
package_name = "isaac_ros_navigation_goal"
setup(
name=package_name,
version="0.0.1",
packages=[package_name, package_name + "/goal_generators"],
data_files=[
("share/ament_index/resource_index/packages", ["resource/" + package_name]),
("share/" + package_name, ["package.xml"]),
(os.path.join("share", package_name, "launch"), glob("launch/*.launch.py")),
("share/" + package_name + "/assets", glob("assets/*")),
],
install_requires=["setuptools"],
zip_safe=True,
maintainer="isaac sim",
maintainer_email="[email protected]",
description="Package to set goals for navigation stack.",
license="NVIDIA Isaac ROS Software License",
tests_require=["pytest"],
entry_points={"console_scripts": ["SetNavigationGoal = isaac_ros_navigation_goal.set_goal:main"]},
)
| 906 | Python | 33.884614 | 102 | 0.651214 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/foxy_ws/src/navigation/isaac_ros_navigation_goal/test/test_flake8.py | # Copyright 2017 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ament_flake8.main import main_with_errors
import pytest
@pytest.mark.flake8
@pytest.mark.linter
def test_flake8():
rc, errors = main_with_errors(argv=[])
assert rc == 0, "Found %d code style errors / warnings:\n" % len(errors) + "\n".join(errors)
| 864 | Python | 35.041665 | 96 | 0.741898 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/foxy_ws/src/navigation/isaac_ros_navigation_goal/test/test_pep257.py | # Copyright 2015 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ament_pep257.main import main
import pytest
@pytest.mark.linter
@pytest.mark.pep257
def test_pep257():
rc = main(argv=[".", "test"])
assert rc == 0, "Found code style errors / warnings"
| 803 | Python | 32.499999 | 74 | 0.743462 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/foxy_ws/src/navigation/isaac_ros_navigation_goal/launch/isaac_ros_navigation_goal.launch.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import Node
def generate_launch_description():
map_yaml_file = LaunchConfiguration(
"map_yaml_path",
default=os.path.join(
get_package_share_directory("isaac_ros_navigation_goal"), "assets", "carter_warehouse_navigation.yaml"
),
)
goal_text_file = LaunchConfiguration(
"goal_text_file_path",
default=os.path.join(get_package_share_directory("isaac_ros_navigation_goal"), "assets", "goals.txt"),
)
navigation_goal_node = Node(
name="set_navigation_goal",
package="isaac_ros_navigation_goal",
executable="SetNavigationGoal",
parameters=[
{
"map_yaml_path": map_yaml_file,
"iteration_count": 3,
"goal_generator_type": "RandomGoalGenerator",
"action_server_name": "navigate_to_pose",
"obstacle_search_distance_in_meters": 0.2,
"goal_text_file_path": goal_text_file,
"initial_pose": [-6.4, -1.04, 0.0, 0.0, 0.0, 0.99, 0.02],
}
],
output="screen",
)
return LaunchDescription([navigation_goal_node])
| 1,782 | Python | 35.387754 | 114 | 0.654882 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/foxy_ws/src/navigation/isaac_ros_navigation_goal/isaac_ros_navigation_goal/obstacle_map.py | import numpy as np
import yaml
import os
import math
from PIL import Image
class GridMap:
def __init__(self, yaml_file_path):
self.__get_meta_from_yaml(yaml_file_path)
self.__get_raw_map()
self.__add_max_range_to_meta()
# print(self.__map_meta)
def __get_meta_from_yaml(self, yaml_file_path):
"""
Reads map meta from the yaml file.
Parameters
----------
yaml_file_path: path of the yaml file.
"""
with open(yaml_file_path, "r") as f:
file_content = f.read()
self.__map_meta = yaml.safe_load(file_content)
self.__map_meta["image"] = os.path.join(os.path.dirname(yaml_file_path), self.__map_meta["image"])
def __get_raw_map(self):
"""
Reads the map image and generates the grid map.\n
Grid map is a 2D boolean matrix where True=>occupied space & False=>Free space.
"""
img = Image.open(self.__map_meta.get("image"))
img = np.array(img)
# Anything greater than free_thresh is considered as occupied
if self.__map_meta["negate"]:
res = np.where((img / 255)[:, :, 0] > self.__map_meta["free_thresh"])
else:
res = np.where(((255 - img) / 255)[:, :, 0] > self.__map_meta["free_thresh"])
self.__grid_map = np.zeros(shape=(img.shape[:2]), dtype=bool)
for i in range(res[0].shape[0]):
self.__grid_map[res[0][i], res[1][i]] = 1
def __add_max_range_to_meta(self):
"""
Calculates and adds the max value of pose in x & y direction to the meta.
"""
max_x = self.__grid_map.shape[1] * self.__map_meta["resolution"] + self.__map_meta["origin"][0]
max_y = self.__grid_map.shape[0] * self.__map_meta["resolution"] + self.__map_meta["origin"][1]
self.__map_meta["max_x"] = round(max_x, 2)
self.__map_meta["max_y"] = round(max_y, 2)
def __pad_obstacles(self, distance):
pass
def get_range(self):
"""
Returns the bounds of pose values in x & y direction.\n
Returns
-------
[List]:\n
Where list[0][0]: min value in x direction
list[0][1]: max value in x direction
list[1][0]: min value in y direction
list[1][1]: max value in y direction
"""
return [
[self.__map_meta["origin"][0], self.__map_meta["max_x"]],
[self.__map_meta["origin"][1], self.__map_meta["max_y"]],
]
def __transform_to_image_coordinates(self, point):
"""
Transforms a pose in meters to image pixel coordinates.
Parameters
----------
Point: A point as list. where list[0]=>pose.x and list[1]=pose.y
Returns
-------
[Tuple]: tuple[0]=>pixel value in x direction. i.e column index.
tuple[1]=> pixel vlaue in y direction. i.e row index.
"""
p_x, p_y = point
i_x = math.floor((p_x - self.__map_meta["origin"][0]) / self.__map_meta["resolution"])
i_y = math.floor((p_y - self.__map_meta["origin"][1]) / self.__map_meta["resolution"])
# because origin in yaml is at bottom left of image
i_y = self.__grid_map.shape[0] - i_y
return i_x, i_y
def __transform_distance_to_pixels(self, distance):
"""
Converts the distance in meters to number of pixels based on the resolution.
Parameters
----------
distance: value in meters
Returns
-------
[Integer]: number of pixel which represent the same distance.
"""
return math.ceil(distance / self.__map_meta["resolution"])
def __is_obstacle_in_distance(self, img_point, distance):
"""
Checks if any obstacle is in vicinity of the given image point.
Parameters
----------
img_point: pixel values of the point
distance: distnace in pixels in which there shouldn't be any obstacle.
Returns
-------
[Bool]: True if any obstacle found else False.
"""
# need to make sure that patch xmin & ymin are >=0,
# because of python's negative indexing capability
row_start_idx = 0 if img_point[1] - distance < 0 else img_point[1] - distance
col_start_idx = 0 if img_point[0] - distance < 0 else img_point[0] - distance
# image point acts as the center of the square, where each side of square is of size
# 2xdistance
patch = self.__grid_map[row_start_idx : img_point[1] + distance, col_start_idx : img_point[0] + distance]
obstacles = np.where(patch == True)
return len(obstacles[0]) > 0
def is_valid_pose(self, point, distance=0.2):
"""
Checks if a given pose is "distance" away from a obstacle.
Parameters
----------
point: pose in 2D space. where point[0]=pose.x and point[1]=pose.y
distance: distance in meters.
Returns
-------
[Bool]: True if pose is valid else False
"""
assert len(point) == 2
img_point = self.__transform_to_image_coordinates(point)
img_pixel_distance = self.__transform_distance_to_pixels(distance)
# Pose is not valid if there is obstacle in the vicinity
return not self.__is_obstacle_in_distance(img_point, img_pixel_distance)
| 5,443 | Python | 33.455696 | 113 | 0.553188 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/foxy_ws/src/navigation/isaac_ros_navigation_goal/isaac_ros_navigation_goal/set_goal.py | import rclpy
from rclpy.action import ActionClient
from rclpy.node import Node
from nav2_msgs.action import NavigateToPose
from .obstacle_map import GridMap
from .goal_generators import RandomGoalGenerator, GoalReader
import sys
from geometry_msgs.msg import PoseWithCovarianceStamped
import time
class SetNavigationGoal(Node):
def __init__(self):
super().__init__("set_navigation_goal")
self.declare_parameters(
namespace="",
parameters=[
("iteration_count", 1),
("goal_generator_type", "RandomGoalGenerator"),
("action_server_name", "navigate_to_pose"),
("obstacle_search_distance_in_meters", 0.2),
("frame_id", "map"),
("map_yaml_path", None),
("goal_text_file_path", None),
("initial_pose", None),
],
)
self.__goal_generator = self.__create_goal_generator()
action_server_name = self.get_parameter("action_server_name").value
self._action_client = ActionClient(self, NavigateToPose, action_server_name)
self.MAX_ITERATION_COUNT = self.get_parameter("iteration_count").value
assert self.MAX_ITERATION_COUNT > 0
self.curr_iteration_count = 1
self.__initial_goal_publisher = self.create_publisher(PoseWithCovarianceStamped, "/initialpose", 1)
self.__initial_pose = self.get_parameter("initial_pose").value
self.__is_initial_pose_sent = True if self.__initial_pose is None else False
def __send_initial_pose(self):
"""
Publishes the initial pose.
This function is only called once that too before sending any goal pose
to the mission server.
"""
goal = PoseWithCovarianceStamped()
goal.header.frame_id = self.get_parameter("frame_id").value
goal.header.stamp = self.get_clock().now().to_msg()
goal.pose.pose.position.x = self.__initial_pose[0]
goal.pose.pose.position.y = self.__initial_pose[1]
goal.pose.pose.position.z = self.__initial_pose[2]
goal.pose.pose.orientation.x = self.__initial_pose[3]
goal.pose.pose.orientation.y = self.__initial_pose[4]
goal.pose.pose.orientation.z = self.__initial_pose[5]
goal.pose.pose.orientation.w = self.__initial_pose[6]
self.__initial_goal_publisher.publish(goal)
def send_goal(self):
"""
Sends the goal to the action server.
"""
if not self.__is_initial_pose_sent:
self.get_logger().info("Sending initial pose")
self.__send_initial_pose()
self.__is_initial_pose_sent = True
# Assumption is that initial pose is set after publishing first time in this duration.
# Can be changed to more sophisticated way. e.g. /particlecloud topic has no msg until
# the initial pose is set.
time.sleep(10)
self.get_logger().info("Sending first goal")
self._action_client.wait_for_server()
goal_msg = self.__get_goal()
if goal_msg is None:
rclpy.shutdown()
sys.exit(1)
self._send_goal_future = self._action_client.send_goal_async(
goal_msg, feedback_callback=self.__feedback_callback
)
self._send_goal_future.add_done_callback(self.__goal_response_callback)
def __goal_response_callback(self, future):
"""
Callback function to check the response(goal accpted/rejected) from the server.\n
If the Goal is rejected it stops the execution for now.(We can change to resample the pose if rejected.)
"""
goal_handle = future.result()
if not goal_handle.accepted:
self.get_logger().info("Goal rejected :(")
rclpy.shutdown()
return
self.get_logger().info("Goal accepted :)")
self._get_result_future = goal_handle.get_result_async()
self._get_result_future.add_done_callback(self.__get_result_callback)
def __get_goal(self):
"""
Get the next goal from the goal generator.
Returns
-------
[NavigateToPose][goal] or None if the next goal couldn't be generated.
"""
goal_msg = NavigateToPose.Goal()
goal_msg.pose.header.frame_id = self.get_parameter("frame_id").value
goal_msg.pose.header.stamp = self.get_clock().now().to_msg()
pose = self.__goal_generator.generate_goal()
# couldn't sample a pose which is not close to obstacles. Rare but might happen in dense maps.
if pose is None:
self.get_logger().error(
"Could not generate next goal. Returning. Possible reasons for this error could be:"
)
self.get_logger().error(
"1. If you are using GoalReader then please make sure iteration count <= number of goals avaiable in file."
)
self.get_logger().error(
"2. If RandomGoalGenerator is being used then it was not able to sample a pose which is given distance away from the obstacles."
)
return
self.get_logger().info("Generated goal pose: {0}".format(pose))
goal_msg.pose.pose.position.x = pose[0]
goal_msg.pose.pose.position.y = pose[1]
goal_msg.pose.pose.orientation.x = pose[2]
goal_msg.pose.pose.orientation.y = pose[3]
goal_msg.pose.pose.orientation.z = pose[4]
goal_msg.pose.pose.orientation.w = pose[5]
return goal_msg
def __get_result_callback(self, future):
"""
Callback to check result.\n
It calls the send_goal() function in case current goal sent count < required goals count.
"""
# Nav2 is sending empty message for success as well as for failure.
result = future.result().result
self.get_logger().info("Result: {0}".format(result.result))
if self.curr_iteration_count < self.MAX_ITERATION_COUNT:
self.curr_iteration_count += 1
self.send_goal()
else:
rclpy.shutdown()
def __feedback_callback(self, feedback_msg):
"""
This is feeback callback. We can compare/compute/log while the robot is on its way to goal.
"""
# self.get_logger().info('FEEDBACK: {}\n'.format(feedback_msg))
pass
def __create_goal_generator(self):
"""
Creates the GoalGenerator object based on the specified ros param value.
"""
goal_generator_type = self.get_parameter("goal_generator_type").value
goal_generator = None
if goal_generator_type == "RandomGoalGenerator":
if self.get_parameter("map_yaml_path").value is None:
self.get_logger().info("Yaml file path is not given. Returning..")
sys.exit(1)
yaml_file_path = self.get_parameter("map_yaml_path").value
grid_map = GridMap(yaml_file_path)
obstacle_search_distance_in_meters = self.get_parameter("obstacle_search_distance_in_meters").value
assert obstacle_search_distance_in_meters > 0
goal_generator = RandomGoalGenerator(grid_map, obstacle_search_distance_in_meters)
elif goal_generator_type == "GoalReader":
if self.get_parameter("goal_text_file_path").value is None:
self.get_logger().info("Goal text file path is not given. Returning..")
sys.exit(1)
file_path = self.get_parameter("goal_text_file_path").value
goal_generator = GoalReader(file_path)
else:
self.get_logger().info("Invalid goal generator specified. Returning...")
sys.exit(1)
return goal_generator
def main():
rclpy.init()
set_goal = SetNavigationGoal()
result = set_goal.send_goal()
rclpy.spin(set_goal)
if __name__ == "__main__":
main()
| 7,971 | Python | 37.887805 | 144 | 0.605696 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/foxy_ws/src/navigation/isaac_ros_navigation_goal/isaac_ros_navigation_goal/goal_generators/goal_reader.py | from .goal_generator import GoalGenerator
class GoalReader(GoalGenerator):
def __init__(self, file_path):
self.__file_path = file_path
self.__generator = self.__get_goal()
def generate_goal(self, max_num_of_trials=1000):
try:
return next(self.__generator)
except StopIteration:
return
def __get_goal(self):
for row in open(self.__file_path, "r"):
yield list(map(float, row.strip().split(" ")))
| 486 | Python | 26.055554 | 58 | 0.584362 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/foxy_ws/src/navigation/isaac_ros_navigation_goal/isaac_ros_navigation_goal/goal_generators/random_goal_generator.py | import numpy as np
from .goal_generator import GoalGenerator
class RandomGoalGenerator(GoalGenerator):
"""
Random goal generator.
parameters
----------
grid_map: GridMap Object
distance: distance in meters to check vicinity for obstacles.
"""
def __init__(self, grid_map, distance):
self.__grid_map = grid_map
self.__distance = distance
def generate_goal(self, max_num_of_trials=1000):
"""
Generate the goal.
Parameters
----------
max_num_of_trials: maximum number of pose generations when generated pose keep is not a valid pose.
Returns
-------
[List][Pose]: Pose in format [pose.x,pose.y,orientaion.x,orientaion.y,orientaion.z,orientaion.w]
"""
range_ = self.__grid_map.get_range()
trial_count = 0
while trial_count < max_num_of_trials:
x = np.random.uniform(range_[0][0], range_[0][1])
y = np.random.uniform(range_[1][0], range_[1][1])
orient_x = np.random.uniform(0, 1)
orient_y = np.random.uniform(0, 1)
orient_z = np.random.uniform(0, 1)
orient_w = np.random.uniform(0, 1)
if self.__grid_map.is_valid_pose([x, y], self.__distance):
goal = [x, y, orient_x, orient_y, orient_z, orient_w]
return goal
trial_count += 1
| 1,405 | Python | 30.954545 | 107 | 0.560854 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/foxy_ws/src/navigation/isaac_ros_navigation_goal/isaac_ros_navigation_goal/goal_generators/__init__.py | from .random_goal_generator import RandomGoalGenerator
from .goal_reader import GoalReader
| 91 | Python | 29.666657 | 54 | 0.857143 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/foxy_ws/src/navigation/isaac_ros_navigation_goal/isaac_ros_navigation_goal/goal_generators/goal_generator.py | from abc import ABC, abstractmethod
class GoalGenerator(ABC):
"""
Parent class for the Goal generators
"""
def __init__(self):
pass
@abstractmethod
def generate_goal(self, max_num_of_trials=2000):
"""
Generate the goal.
Parameters
----------
max_num_of_trials: maximum number of pose generations when generated pose keep is not a valid pose.
Returns
-------
[List][Pose]: Pose in format [pose.x,pose.y,orientaion.x,orientaion.y,orientaion.z,orientaion.w]
"""
pass
| 582 | Python | 21.423076 | 107 | 0.580756 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/foxy_ws/src/navigation/isaac_ros_navigation_goal/assets/carter_warehouse_navigation.yaml | image: carter_warehouse_navigation.png
resolution: 0.05
origin: [-11.975, -17.975, 0.0000]
negate: 0
occupied_thresh: 0.65
free_thresh: 0.196
| 142 | YAML | 19.428569 | 38 | 0.739437 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/package.xml | <package>
<name>cortex_control</name>
<version>1.0.0</version>
<description>NVIDIA Isaac Cortex Control ROS package.</description>
<author>Nathan Ratliff</author>
<maintainer email="[email protected]">Nathan Ratliff</maintainer>
<license>See LICENSE file.</license>
<url type="Documentation">https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/overview.html</url>
<url type="Forums">https://forums.developer.nvidia.com/c/agx-autonomous-machines/isaac/simulation</url>
<buildtool_depend>catkin</buildtool_depend>
<build_depend>message_generation</build_depend>
<build_depend>roscpp</build_depend>
<build_depend>rospy</build_depend>
<build_depend>roslib</build_depend>
<build_depend>rosbag</build_depend>
<build_depend>std_msgs</build_depend>
<build_depend>robot_state_publisher</build_depend>
<build_depend>yaml-cpp</build_depend>
<build_depend>libgflags-dev</build_depend>
<run_depend>message_runtime</run_depend>
<run_depend>roscpp</run_depend>
<run_depend>rospy</run_depend>
<run_depend>roslib</run_depend>
<run_depend>rosbag</run_depend>
<run_depend>std_msgs</run_depend>
<run_depend>robot_state_publisher</run_depend>
<run_depend>yaml-cpp</run_depend>
<run_depend>libgflags-dev</run_depend>
</package>
| 1,286 | XML | 34.749999 | 107 | 0.74028 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/math/state.cpp | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
#include "cortex/math/state.h"
#include <Eigen/Core>
#include <ros/assert.h>
namespace cortex {
namespace math {
State::State(int n) : state(Eigen::VectorXd::Zero(2 * n)) {}
State::State(const Eigen::VectorXd &x, const Eigen::VectorXd &xd) : State(x.size()) {
ROS_ASSERT(x.size() == xd.size());
pos() = x;
vel() = xd;
}
} // namespace math
} // namespace cortex
| 820 | C++ | 28.321428 | 85 | 0.717073 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/math/state.h | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
#pragma once
#include <Eigen/Core>
namespace cortex {
namespace math {
// Represents a vector s = (x, xd) \in \R^{2d} where d is the space dim.
class State {
public:
State() = delete;
explicit State(int n); // Initialize to the zero state (0,0)\in\R^n X \R^n.
State(const Eigen::VectorXd &x, const Eigen::VectorXd &xd);
Eigen::Ref<Eigen::VectorXd> pos() { return state.head(dim()); }
Eigen::Ref<Eigen::VectorXd> vel() { return state.tail(dim()); }
Eigen::Ref<Eigen::VectorXd> vector() { return state; }
Eigen::Ref<const Eigen::VectorXd> pos() const { return state.head(dim()); }
Eigen::Ref<const Eigen::VectorXd> vel() const { return state.tail(dim()); }
Eigen::Ref<const Eigen::VectorXd> vector() const { return state; }
int dim() const { return state.size() / 2; }
// Returns one integration step forward.
//
// Equations:
// x_next = x + dt xd
// xd_next = xd + dt xdd
State Step(double dt, const Eigen::VectorXd &xdd) {
return State(pos() + dt * vel(), vel() + dt * xdd);
}
private:
Eigen::VectorXd state;
};
} // namespace math
} // namespace cortex
| 1,558 | C | 30.816326 | 78 | 0.675866 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/math/interpolation/interpolator.h | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
// Pure virtual base class interface for an interpolator.
#pragma once
#include <iostream>
#include <list>
#include <sstream>
#include <string>
#include <ros/assert.h>
#include "cortex/math/interpolation/pos_vel_acc.h"
#include "cortex/math/interpolation/time_scaled_interpolator.h"
namespace cortex {
namespace math {
// Represents a generic interpolator interface giving an API of the form:
//
// 1. Add p = (q, qd, qdd) point at time t:
//
// interp.AddPt(t, p);
//
// 2. Evaluate at a given time t:
//
// auto p = interp.Eval(t);
// auto p = interp(t);
//
// Deriving classes need to implement the pure virtual functions
//
// AddPt() and Eval()
//
// Deriving classes might add additional restrictions, such as monotonicity of add
// times t (IncrementalInterpolator).
template <class vec_t>
class Interpolator {
public:
typedef vec_t VectorXx;
virtual bool AddPt(double t, const PosVelAcc<VectorXx>& p, std::string* error_str = nullptr) = 0;
virtual bool Eval(double t, PosVelAcc<VectorXx>& ret, std::string* error_str) const = 0;
// Asserting version.
PosVelAccXd Eval(double t) const {
std::string error_str;
PosVelAccXd p;
ROS_ASSERT_MSG(Eval(t, p, &error_str), "%s", error_str.c_str());
return p;
}
Eigen::VectorXd operator()(double t) const { return Eval(t).x; }
};
} // namespace math
} // namespace cortex
| 1,825 | C | 26.253731 | 99 | 0.706849 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/math/interpolation/cubic_position_interpolator.cpp | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
#include "cortex/math/interpolation/cubic_position_interpolator.h"
#include <fstream>
#include <string>
#include <vector>
#include <Eigen/Dense>
namespace cortex {
namespace math {
// Returns true iff t \in [0,1].
inline bool InZeroOne(double t) { return 0 <= t && t <= 1; }
// clang-format off
#define CUBIC_POSITION_INTERP_MATRIX \
0, 0, 0, 1, \
0, 0, 1, 0, \
0, 2, 0, 0, \
1, 1, 1, 1
// clang-format on
CubicPositionInterpolator1d::CubicPositionInterpolator1d(const PosVelAcc1d& p0,
const PosVelAcc1d& p1,
bool validate_interpolation_evals)
: validate_interpolation_evals_(validate_interpolation_evals),
A_((Eigen::MatrixXd(4, 4) << CUBIC_POSITION_INTERP_MATRIX).finished()),
b_((Eigen::VectorXd(4) << p0.x, p0.xd, p0.xdd, p1.x).finished()),
coeffs_(A_.colPivHouseholderQr().solve(b_)) {}
bool CubicPositionInterpolator1d::Eval(double t, PosVelAcc1d& ret, std::string* error_str) const {
if (validate_interpolation_evals_ && !InZeroOne(t)) {
std::stringstream ss;
ss << "t not in [0,1] (t = " << t << "). ";
if (error_str) {
*error_str += ss.str();
}
return false;
}
auto a3 = coeffs_[0];
auto a2 = coeffs_[1];
auto a1 = coeffs_[2];
auto a0 = coeffs_[3];
std::vector<double> t_powers(4, 1);
for (size_t i = 1; i < t_powers.size(); ++i) {
t_powers[i] = t * t_powers[i - 1];
}
auto x = a3 * t_powers[3] + a2 * t_powers[2] + a1 * t_powers[1] + a0;
auto xd = 3. * a3 * t_powers[2] + 2. * a2 * t_powers[1] + a1;
auto xdd = 6. * a3 * t_powers[1] + 2. * a2;
ret = PosVelAcc1d(x, xd, xdd);
return true;
}
} // namespace math
} // namespace cortex
| 2,211 | C++ | 30.6 | 98 | 0.61194 |
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/math/interpolation/cubic_position_interpolator.h | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
#pragma once
#include <iostream>
#include <list>
#include <sstream>
#include <string>
#include <Eigen/Core>
#include <ros/assert.h>
#include "cortex/math/interpolation/pos_vel_acc.h"
#include "cortex/math/interpolation/time_scaled_interpolator.h"
#include "cortex/math/interpolation/trajectories.h"
namespace cortex {
namespace math {
// One-dimensional cubic interpolating polynomial. Interpolates between (x0,
// xd0) and (x1, xd1).
class CubicPositionInterpolator1d {
public:
typedef double VectorXx;
CubicPositionInterpolator1d() {}
// Creates a cubic spline that interpolates between p0 and p1 at t = 0 and
// 1, respectively.
CubicPositionInterpolator1d(const PosVelAcc1d& p0,
const PosVelAcc1d& p1,
bool validate_interpolation_evals = false);
// Evaluate the polynomial at t. If validate_interpolating_evals is set to
// true, enforces that the evaluations are only interpolating, i.e. t is in
// [0, 1]; fails if not. The interpolated value is returned in the ret return
// parameter. On failure, returns false and sets the error string if it's
// provided.
bool Eval(double t, PosVelAcc1d& ret, std::string* error_str = nullptr) const;
// This verion asserts on error.
PosVelAcc1d Eval(double t) const {
PosVelAcc1d ret;
std::string error_str;
ROS_ASSERT_MSG(Eval(t, ret, &error_str), "%s", error_str.c_str());
return ret;
}
double operator()(double t) const {
auto p = Eval(t);
return p.x;
}
// Accessor.
const Eigen::VectorXd& coeffs() const { return coeffs_; }
protected:
bool validate_interpolation_evals_;
const Eigen::MatrixXd A_;
const Eigen::VectorXd b_;
const Eigen::VectorXd coeffs_;
};
template <class vec_t>
MultiDimInterp<CubicPositionInterpolator1d, vec_t> CubicInterpolator(
const PosVelAcc<vec_t>& p0,
const PosVelAcc<vec_t>& p1,
bool validate_interpolation_evals = false) {
return MultiDimInterp<CubicPositionInterpolator1d, vec_t>(p0, p1, validate_interpolation_evals);
}
typedef MultiDimInterp<CubicPositionInterpolator1d, Eigen::VectorXd> CubicPositionInterpolatorXd;
} // namespace math
} // namespace cortex
| 2,646 | C | 30.511904 | 98 | 0.721088 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.