file_path
stringlengths 20
207
| content
stringlengths 5
3.85M
| size
int64 5
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
xibeisiber/orbit.dual_arm/orbit/ext_template/tasks/locomotion/velocity/config/anymal_d/flat_env_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.orbit.utils import configclass
from .rough_env_cfg import AnymalDRoughEnvCfg
@configclass
class AnymalDFlatEnvCfg(AnymalDRoughEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# override rewards
self.rewards.flat_orientation_l2.weight = -5.0
self.rewards.dof_torques_l2.weight = -2.5e-5
self.rewards.feet_air_time.weight = 0.5
# change terrain to flat
self.scene.terrain.terrain_type = "plane"
self.scene.terrain.terrain_generator = None
# no height scan
self.scene.height_scanner = None
self.observations.policy.height_scan = None
# no terrain curriculum
self.curriculum.terrain_levels = None
class AnymalDFlatEnvCfg_PLAY(AnymalDFlatEnvCfg):
def __post_init__(self) -> None:
# post init of parent
super().__post_init__()
# make a smaller scene for play
self.scene.num_envs = 50
self.scene.env_spacing = 2.5
# disable randomization for play
self.observations.policy.enable_corruption = False
# remove random pushing
self.randomization.base_external_force_torque = None
self.randomization.push_robot = None
| 1,382 | Python | 30.431817 | 60 | 0.656295 |
xibeisiber/orbit.dual_arm/orbit/ext_template/tasks/locomotion/velocity/config/anymal_d/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import gymnasium as gym
from . import agents, flat_env_cfg, rough_env_cfg
##
# Register Gym environments.
##
gym.register(
id="Template-Velocity-Flat-Anymal-D-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": flat_env_cfg.AnymalDFlatEnvCfg,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalDFlatPPORunnerCfg,
},
)
gym.register(
id="Template-Velocity-Flat-Anymal-D-Play-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": flat_env_cfg.AnymalDFlatEnvCfg_PLAY,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalDFlatPPORunnerCfg,
},
)
gym.register(
id="Template-Velocity-Rough-Anymal-D-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": rough_env_cfg.AnymalDRoughEnvCfg,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalDRoughPPORunnerCfg,
},
)
gym.register(
id="Template-Velocity-Rough-Anymal-D-Play-v0",
entry_point="omni.isaac.orbit.envs:RLTaskEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": rough_env_cfg.AnymalDRoughEnvCfg_PLAY,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalDRoughPPORunnerCfg,
},
)
| 1,474 | Python | 26.830188 | 77 | 0.685889 |
xibeisiber/orbit.dual_arm/orbit/ext_template/tasks/locomotion/velocity/config/anymal_d/agents/rsl_rl_cfg.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.orbit.utils import configclass
from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import (
RslRlOnPolicyRunnerCfg,
RslRlPpoActorCriticCfg,
RslRlPpoAlgorithmCfg,
)
@configclass
class AnymalDRoughPPORunnerCfg(RslRlOnPolicyRunnerCfg):
num_steps_per_env = 24
max_iterations = 1500
save_interval = 50
experiment_name = "anymal_d_rough"
empirical_normalization = False
policy = RslRlPpoActorCriticCfg(
init_noise_std=1.0,
actor_hidden_dims=[512, 256, 128],
critic_hidden_dims=[512, 256, 128],
activation="elu",
)
algorithm = RslRlPpoAlgorithmCfg(
value_loss_coef=1.0,
use_clipped_value_loss=True,
clip_param=0.2,
entropy_coef=0.005,
num_learning_epochs=5,
num_mini_batches=4,
learning_rate=1.0e-3,
schedule="adaptive",
gamma=0.99,
lam=0.95,
desired_kl=0.01,
max_grad_norm=1.0,
)
@configclass
class AnymalDFlatPPORunnerCfg(AnymalDRoughPPORunnerCfg):
def __post_init__(self):
super().__post_init__()
self.max_iterations = 300
self.experiment_name = "anymal_d_flat"
self.policy.actor_hidden_dims = [128, 128, 128]
self.policy.critic_hidden_dims = [128, 128, 128]
| 1,417 | Python | 26.26923 | 58 | 0.645025 |
xibeisiber/orbit.dual_arm/orbit/ext_template/tasks/locomotion/velocity/config/anymal_d/agents/__init__.py | # Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from . import rsl_rl_cfg # noqa: F401, F403
| 168 | Python | 23.142854 | 56 | 0.720238 |
xibeisiber/orbit.dual_arm/docs/CHANGELOG.rst | Changelog
---------
0.1.0 (2024-01-29)
~~~~~~~~~~~~~~~~~~
Added
^^^^^
* Created an initial template for building an extension or project based on Orbit
| 155 | reStructuredText | 13.181817 | 81 | 0.593548 |
KazWong/omniverse_sample/README.md | ov_sample: original sample from omniverse
script_window: sample for omniverse script editor
omnihelper: sample for python in command line
isaac is the bash script to run isaac python
replace your path to isaac_sim-2021.1.1
```
source /path/to /isaac_sim-2021.1.1/setup_python_env.sh
/path/to /isaac_sim-2021.1.1/python.sh $@
```
| 333 | Markdown | 26.833331 | 55 | 0.756757 |
KazWong/omniverse_sample/ov_sample/ros2_samples/navigation/carter_navigation/launch/carter_navigation.launch.py | ## Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
## NVIDIA CORPORATION and its licensors retain all intellectual property
## and proprietary rights in and to this software, related documentation
## and any modifications thereto. Any use, reproduction, disclosure or
## distribution of this software and related documentation without an express
## license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch.actions import IncludeLaunchDescription
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import Node
def generate_launch_description():
use_sim_time = LaunchConfiguration("use_sim_time", default="True")
map_dir = LaunchConfiguration(
"map",
default=os.path.join(
get_package_share_directory("carter_navigation"), "maps", "carter_warehouse_navigation.yaml"
),
)
param_dir = LaunchConfiguration(
"params_file",
default=os.path.join(
get_package_share_directory("carter_navigation"), "params", "carter_navigation_params.yaml"
),
)
nav2_launch_file_dir = os.path.join(get_package_share_directory("nav2_bringup"), "launch")
rviz_config_dir = os.path.join(get_package_share_directory("carter_navigation"), "rviz2", "carter_navigation.rviz")
return LaunchDescription(
[
DeclareLaunchArgument("map", default_value=map_dir, description="Full path to map file to load"),
DeclareLaunchArgument(
"params_file", default_value=param_dir, description="Full path to param file to load"
),
DeclareLaunchArgument(
"use_sim_time", default_value="true", description="Use simulation (Omniverse Isaac Sim) clock if true"
),
Node(
package="rviz2",
executable="rviz2",
name="rviz2",
output="screen",
parameters=[{"use_sim_time": use_sim_time}],
arguments=["-d", rviz_config_dir],
),
IncludeLaunchDescription(
PythonLaunchDescriptionSource([nav2_launch_file_dir, "/bringup_launch.py"]),
launch_arguments={"map": map_dir, "use_sim_time": use_sim_time, "params_file": param_dir}.items(),
),
]
)
| 2,578 | Python | 39.936507 | 119 | 0.66059 |
KazWong/omniverse_sample/ov_sample/ros2_samples/navigation/carter_navigation/launch/carter_visualization.launch.py | ## Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
## NVIDIA CORPORATION and its licensors retain all intellectual property
## and proprietary rights in and to this software, related documentation
## and any modifications thereto. Any use, reproduction, disclosure or
## distribution of this software and related documentation without an express
## license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import Node
## This launch file is only used for seperately running Rviz2 with the carter_navigation configuration.
def generate_launch_description():
use_sim_time = LaunchConfiguration("use_sim_time", default="true")
rviz_config_dir = os.path.join(get_package_share_directory("carter_navigation"), "rviz2", "carter_navigation.rviz")
return LaunchDescription(
[
DeclareLaunchArgument("use_sim_time", default_value="True", description="Flag to enable use_sim_time"),
Node(
package="rviz2",
executable="rviz2",
name="rviz2",
output="screen",
parameters=[{"use_sim_time": use_sim_time}],
arguments=["-d", rviz_config_dir],
),
]
)
| 1,468 | Python | 38.702702 | 119 | 0.69891 |
KazWong/omniverse_sample/ov_sample/ros2_samples/navigation/carter_navigation/maps/carter_warehouse_navigation.yaml | image: carter_warehouse_navigation.png
resolution: 0.05
origin: [-10.325, -12.225, 0.0000]
negate: 0
occupied_thresh: 0.65
free_thresh: 0.196
| 142 | YAML | 19.428569 | 38 | 0.739437 |
KazWong/omniverse_sample/ov_sample/python_samples/environment.yml | name: isaac-sim
channels:
- pytorch
- defaults
dependencies:
- cudatoolkit=11.0
- matplotlib=3.1.3
- pip=20.3.3
- python=3.7
- pytorch
- requests=2.23.0
- torchaudio=0.7.2
- torchvision=0.8.2
- six=1.12.0
- pip:
- gym==0.17.3
- opencv-python==4.4.0.44
- pillow==8.2.0
- scipy==1.5.4
- stable-baselines3==0.10.0
- tensorboard==2.4.0
- tensorboard-plugin-wit==1.7.0
- tensorflow-estimator==2.3.0
- tensorflow-gpu==2.3.1 | 499 | YAML | 19.833333 | 36 | 0.563126 |
KazWong/omniverse_sample/ov_sample/python_samples/README.md | # Python samples
This folder contains samples that use the omniverse python loop to execute.
## Built in python 3.7 environment
Navigate to the parent directory and execute
```
./python.sh path/to/script.py
```
## Anaconda
- Anaconda
### Setup
Create and activate the conda environment
```
conda env create -f environment.yml
conda activate isaac-sim
```
Use `setenv` script to add the omniverse kit python environment to your active PYTHONPATH
`source setenv.sh`
## How To Run
See the Isaac Sim documentation for how to run the samples in this folder
| 567 | Markdown | 16.212121 | 89 | 0.749559 |
KazWong/omniverse_sample/ov_sample/python_samples/jetbot/jetbot_train.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import torch
import numpy as np
import os
import carb
import signal
import json
import argparse
from argparse import Namespace
from omni.isaac.python_app import OmniKitHelper
from jetbot_model import CustomCNN
from stable_baselines3 import PPO
from stable_baselines3.common.callbacks import BaseCallback, CheckpointCallback
def train(args):
CUSTOM_CONFIG = {
"width": 224,
"height": 224,
"renderer": "RayTracedLighting",
"headless": args.headless,
"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit',
}
omniverse_kit = OmniKitHelper(CUSTOM_CONFIG)
# need to construct OmniKitHelp before importing physics, etc
from jetbot_env import JetbotEnv
import omni.physx
# we disable all anti aliasing in the render because we want to train on the raw camera image.
omniverse_kit.set_setting("/rtx/post/aa/op", 0)
env = JetbotEnv(omniverse_kit, max_resets=args.rand_freq, updates_per_step=3, mirror_mode=args.mirror_mode)
checkpoint_callback = CheckpointCallback(
save_freq=args.save_freq, save_path="./params/", name_prefix=args.checkpoint_name
)
net_arch = [256, 256, dict(pi=[128, 64, 32], vf=[128, 64, 32])]
policy_kwargs = {"net_arch": net_arch, "activation_fn": torch.nn.ReLU}
if args.loaded_checkpoint == "":
model = PPO(
"CnnPolicy",
env,
verbose=1,
tensorboard_log=args.tensorboard_dir,
policy_kwargs=policy_kwargs,
device="cuda",
n_steps=args.step_freq,
batch_size=2048,
n_epochs=50,
learning_rate=0.0001,
)
else:
model = PPO.load(args.loaded_checkpoint, env)
model.learn(
total_timesteps=args.total_steps,
callback=checkpoint_callback,
eval_env=env,
eval_freq=args.eval_freq,
eval_log_path=args.evaluation_dir,
reset_num_timesteps=args.reset_num_timesteps,
)
model.save(args.checkpoint_name)
def runEval(args):
CUSTOM_CONFIG = {
"width": 224,
"height": 224,
"renderer": "RayTracedLighting",
"headless": args.headless,
"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit',
}
# load a zip file to evaluate here
agent = PPO.load(args.evaluation_dir + "/best_model.zip", device="cuda")
omniverse_kit = OmniKitHelper(CUSTOM_CONFIG)
# need to construct OmniKitHelper before importing physics, etc
from jetbot_env import JetbotEnv
import omni.physx
# we disable all anti aliasing in the render because we want to train on the raw camera image.
omniverse_kit.set_setting("/rtx/post/aa/op", 0)
env = JetbotEnv(omniverse_kit, mirror_mode=args.mirror_mode)
obs = env.reset()
while True:
action = agent.predict(obs)
print(action)
obs, rew, done, infos = env.step(action[0])
if done:
obs = env.reset()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--loaded_checkpoint", help="path to checkpoint to be loaded", default="", nargs="?", type=str)
parser.add_argument("-E", "--eval", help="evaluate checkpoint", action="store_true")
parser.add_argument(
"-R", "--reset_num_timesteps", help="reset the current timestep number (used in logging)", action="store_true"
)
parser.add_argument(
"-M", "--mirror_mode", help="reflect images and actions horizontally during training", action="store_true"
)
parser.add_argument("-H", "--headless", help="run in headless mode (no GUI)", action="store_true")
parser.add_argument(
"--checkpoint_name", help="name of checkpoint file (no suffix)", default="checkpoint_25k", type=str
)
parser.add_argument("--tensorboard_dir", help="path to tensorboard log directory", default="tensorboard", type=str)
parser.add_argument("--evaluation_dir", help="path to evaluation log directory", default="eval_log", type=str)
parser.add_argument("--save_freq", help="number of steps before saving a checkpoint", default=4096 * 8, type=int)
parser.add_argument("--eval_freq", help="number of steps before running an evaluation", default=4096 * 8, type=int)
parser.add_argument("--step_freq", help="number of steps before executing a PPO update", default=10240, type=int)
parser.add_argument(
"--rand_freq", help="number of environment resets before domain randomization", default=1, type=int
)
parser.add_argument(
"--total_steps",
help="the total number of steps before exiting and saving a final checkpoint",
default=250000000,
type=int,
)
parser.add_argument(
"--experimentFile", help="specify configuration via JSON. Overrides commandline", default="", type=str
)
args = parser.parse_args()
if args.experimentFile != "":
args_dict = vars(args)
if os.path.exists(args.experimentFile):
with open(args.experimentFile) as f:
json_args_dict = json.load(f)
args_dict.update(json_args_dict)
args = Namespace(**args_dict)
print("running with args: ", args)
def handle_exit(*args, **kwargs):
print("Exiting training...")
quit()
signal.signal(signal.SIGINT, handle_exit)
if args.eval:
runEval(args)
else:
train(args)
| 5,897 | Python | 31.766666 | 119 | 0.652535 |
KazWong/omniverse_sample/ov_sample/python_samples/jetbot/jetbot.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import carb
import omni
from pxr import UsdGeom, Gf, UsdPhysics
import numpy as np
# Camera parameters
FOCAL_LENGTH = 0.75
HORIZONTAL_APERTURE = 2.350
VERTICAL_APERTURE = 2.350
# Drive Parameters
DRIVE_STIFFNESS = 10000.0
# The amount the camera points down at, decrease to raise camera angle
CAMERA_PIVOT = 40.0
class Jetbot:
def __init__(self, omni_kit):
from omni.isaac.dynamic_control import _dynamic_control
from omni.isaac.utils.scripts.nucleus_utils import find_nucleus_server
self.omni_kit = omni_kit
result, nucleus_server = find_nucleus_server()
if result is False:
carb.log_error("Could not find nucleus server with /Isaac folder")
return
self.usd_path = nucleus_server + "/Isaac/Robots/Jetbot/jetbot.usd"
self.robot_prim = None
self._dynamic_control = _dynamic_control
self.dc = _dynamic_control.acquire_dynamic_control_interface()
self.ar = None
# rotation is in degrees
def spawn(self, location, rotation):
stage = self.omni_kit.get_stage()
prefix = "/World/Robot/Jetbot"
prim_path = omni.usd.get_stage_next_free_path(stage, prefix, False)
self.robot_prim = stage.DefinePrim(prim_path, "Xform")
self.robot_prim.GetReferences().AddReference(self.usd_path)
xform = UsdGeom.Xformable(self.robot_prim)
xform_op = xform.AddXformOp(UsdGeom.XformOp.TypeTransform, UsdGeom.XformOp.PrecisionDouble, "")
mat = Gf.Matrix4d().SetTranslate(location)
mat.SetRotateOnly(Gf.Rotation(Gf.Vec3d(0, 0, 1), rotation))
xform_op.Set(mat)
self.camera_path = prim_path + "/chassis/rgb_camera/jetbot_camera"
self.camera_pivot = prim_path + "/chassis/rgb_camera"
# Set joint drive parameters
left_wheel_joint = UsdPhysics.DriveAPI.Apply(
stage.GetPrimAtPath(f"{prim_path}/chassis/left_wheel_joint"), "angular"
)
left_wheel_joint.GetDampingAttr().Set(DRIVE_STIFFNESS)
right_wheel_joint = UsdPhysics.DriveAPI.Apply(
stage.GetPrimAtPath(f"{prim_path}/chassis/right_wheel_joint"), "angular"
)
right_wheel_joint.GetDampingAttr().Set(DRIVE_STIFFNESS)
def teleport(self, location, rotation, settle=False):
if self.ar is None:
self.ar = self.dc.get_articulation(self.robot_prim.GetPath().pathString)
self.chassis = self.dc.get_articulation_root_body(self.ar)
self.dc.wake_up_articulation(self.ar)
rot_quat = Gf.Rotation(Gf.Vec3d(0, 0, 1), rotation).GetQuaternion()
tf = self._dynamic_control.Transform(
location,
(rot_quat.GetImaginary()[0], rot_quat.GetImaginary()[1], rot_quat.GetImaginary()[2], rot_quat.GetReal()),
)
self.dc.set_rigid_body_pose(self.chassis, tf)
self.dc.set_rigid_body_linear_velocity(self.chassis, [0, 0, 0])
self.dc.set_rigid_body_angular_velocity(self.chassis, [0, 0, 0])
self.command((0, 0))
# Settle the robot onto the ground
if settle:
frame = 0
velocity = 1
while velocity > 0.1 and frame < 120:
self.omni_kit.update(1.0 / 60.0)
lin_vel = self.dc.get_rigid_body_linear_velocity(self.chassis)
velocity = np.linalg.norm([lin_vel.x, lin_vel.y, lin_vel.z])
frame = frame + 1
def activate_camera(self):
# Set camera parameters
stage = self.omni_kit.get_stage()
cameraPrim = UsdGeom.Camera(stage.GetPrimAtPath(self.camera_path))
cameraPrim.GetFocalLengthAttr().Set(FOCAL_LENGTH)
cameraPrim.GetHorizontalApertureAttr().Set(HORIZONTAL_APERTURE)
cameraPrim.GetVerticalApertureAttr().Set(VERTICAL_APERTURE)
# Point camera down at road
pivot_prim = stage.GetPrimAtPath(self.camera_pivot)
transform_attr = pivot_prim.GetAttribute("xformOp:transform")
transform_attr.Set(
transform_attr.Get().SetRotateOnly(Gf.Matrix3d(Gf.Rotation(Gf.Vec3d(0, 1, 0), CAMERA_PIVOT)))
)
vpi = omni.kit.viewport.get_viewport_interface()
vpi.get_viewport_window().set_active_camera(str(self.camera_path))
def command(self, motor_value):
if self.ar is None:
self.ar = self.dc.get_articulation(self.robot_prim.GetPath().pathString)
self.chassis = self.dc.get_articulation_root_body(self.ar)
self.wheel_left = self.dc.find_articulation_dof(self.ar, "left_wheel_joint")
self.wheel_right = self.dc.find_articulation_dof(self.ar, "right_wheel_joint")
self.dc.wake_up_articulation(self.ar)
left_speed = self.wheel_speed_from_motor_value(motor_value[0])
right_speed = self.wheel_speed_from_motor_value(motor_value[1])
self.dc.set_dof_velocity_target(self.wheel_left, np.clip(left_speed, -10, 10))
self.dc.set_dof_velocity_target(self.wheel_right, np.clip(right_speed, -10, 10))
# idealized motor model
def wheel_speed_from_motor_value(self, input):
return input
def observations(self):
if self.ar is None:
self.ar = self.dc.get_articulation(self.robot_prim.GetPath().pathString)
self.chassis = self.dc.get_articulation_root_body(self.ar)
dc_pose = self.dc.get_rigid_body_pose(self.chassis)
dc_lin_vel = self.dc.get_rigid_body_linear_velocity(self.chassis)
dc_local_lin_vel = self.dc.get_rigid_body_local_linear_velocity(self.chassis)
dc_ang_vel = self.dc.get_rigid_body_angular_velocity(self.chassis)
return {
"pose": (dc_pose.p.x, dc_pose.p.y, dc_pose.p.z, dc_pose.r.w, dc_pose.r.x, dc_pose.r.y, dc_pose.r.z),
"linear_velocity": (dc_lin_vel.x, dc_lin_vel.y, dc_lin_vel.z),
"local_linear_velocity": (dc_local_lin_vel.x, dc_local_lin_vel.y, dc_local_lin_vel.z),
"angular_velocity": (dc_ang_vel.x, dc_ang_vel.y, dc_ang_vel.z),
}
| 6,442 | Python | 44.373239 | 117 | 0.652903 |
KazWong/omniverse_sample/ov_sample/python_samples/jetbot/jetbot_model.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import torch
import torch.nn as nn
import torch.nn.functional as F
from stable_baselines3.common.torch_layers import BaseFeaturesExtractor
import gym
from gym import spaces
class CustomCNN(BaseFeaturesExtractor):
"""
:param observation_space: (gym.Space)
:param features_dim: (int) Number of features extracted.
This corresponds to the number of unit for the last layer.
"""
def __init__(self, observation_space: gym.spaces.Box, features_dim: int = 512):
super(CustomCNN, self).__init__(observation_space, features_dim)
# We assume CxHxW images (channels first)
# Re-ordering will be done by pre-preprocessing or wrapper
n_input_channels = observation_space.shape[0]
# print(observation_space.shape)
self.cnn = nn.Sequential(
nn.Conv2d(n_input_channels, 32, kernel_size=8, stride=4, padding=0),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=4, stride=2, padding=0),
nn.ReLU(),
nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=0),
nn.ReLU(),
nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=0),
nn.ReLU(),
nn.Flatten(),
)
# Compute shape by doing one forward pass
with torch.no_grad():
n_flatten = self.cnn(torch.as_tensor(observation_space.sample()[None]).float()).shape[1]
print("POST CONV FEATURES = ", n_flatten)
# define the hidden layer to translate to a fixed number of features
self.linear = nn.Sequential(nn.Linear(n_flatten, features_dim), nn.ReLU())
def forward(self, observations: torch.Tensor) -> torch.Tensor:
return self.linear(self.cnn(observations))
| 2,166 | Python | 37.696428 | 100 | 0.669898 |
KazWong/omniverse_sample/ov_sample/python_samples/jetbot/jetbot_env.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import torch
import numpy as np
import carb
from pxr import UsdGeom, Gf
import os
import time
import atexit
import asyncio
import numpy as np
import random
import collections
import matplotlib.pyplot as plt
import omni
from omni.isaac.synthetic_utils import visualization as vis
from omni.isaac.python_app import OmniKitHelper
from omni.isaac.synthetic_utils import SyntheticDataHelper
from jetbot import Jetbot
from road_environment import Environment
import gym
from gym import spaces
class JetbotEnv:
metadata = {"render.modes": ["human"]}
def __init__(
self, omni_kit, z_height=0, max_resets=10, updates_per_step=3, steps_per_rollout=1000, mirror_mode=False
):
self.MIRROR_MODE = mirror_mode
self.action_space = spaces.Box(low=0, high=2.0, shape=(2,), dtype=np.float32)
# IMPORTANT NOTE! SB3 wraps all image spaces in a transposer.
# it assumes the image outputed is of standard form
self.observation_space = spaces.Box(low=0, high=255, shape=(224, 224, 1), dtype=np.uint8)
self.noise = 0.05
# every time we update the stage, this is how much time will be simulated
self.dt = 1 / 30.0
self.omniverse_kit = omni_kit
self.sd_helper = SyntheticDataHelper()
self.roads = Environment(self.omniverse_kit)
# make environment z up
self.omniverse_kit.set_up_axis(UsdGeom.Tokens.z)
# we are going to train on a randomized loop that fits in a 2x2 tile area.
self.shape = [2, 2]
self.roads.generate_road(self.shape)
self.roads.generate_lights()
# spawn robot
self.jetbot = Jetbot(self.omniverse_kit)
self.initial_loc = self.roads.get_valid_location()
self.jetbot.spawn(Gf.Vec3d(self.initial_loc[0], self.initial_loc[1], 5), 0)
# switch kit camera to jetbot camera
self.jetbot.activate_camera()
# start simulation
self.omniverse_kit.play()
# Step simulation so that objects fall to rest
# wait until all materials are loaded
frame = 0
print("simulating physics...")
while frame < 60 or self.omniverse_kit.is_loading():
self.omniverse_kit.update(self.dt)
frame = frame + 1
print("done after frame: ", frame)
self.initialized = False
self.numsteps = 0
self.numresets = 0
self.maxresets = max_resets
self.updates_per_step = updates_per_step
self.steps_per_rollout = steps_per_rollout
self.hist_length = collections.deque([0.0] * 10, maxlen=10)
self.hist_forward_vel = collections.deque([0.0] * 10, maxlen=10)
self.hist_ang_vel = collections.deque([0.0] * 30, maxlen=30)
self.avg_forward_vel = 0
self.dist_traveled = 0
self.total_reward = 0
# Randomly mirror horizontally
self.update_mirror_mode()
def update_mirror_mode(self):
# Mirror if mode is enabled and we randomly sample True
self.mirror_mode = self.MIRROR_MODE & random.choice([False, True])
def calculate_reward(self):
# distance to nearest point on path in units of block. [0,1]
dist = self.roads.distance_to_path_in_tiles(self.current_pose)
self.dist = dist
dist_reward = np.exp(-dist ** 2 / 0.15 ** 2)
reward = self.current_forward_velocity * dist_reward
# if we are driving backwards, large negative reward
# if self.current_forward_velocity < 0:
# reward = self.current_forward_velocity
# THIS IS FOR DEBUGGING ONLY
if self.numsteps % 10 == 0 or reward < 0:
print(
"{:.3f} {:.3f} {:.3f} {:.3f} {:.3f} {:.3f}".format(
reward,
self.current_forward_velocity,
self.avg_forward_vel,
dist_reward,
self.dist_traveled,
self.current_ang_vel,
)
)
self.total_reward += reward
return reward
def is_dead(self):
done = False
# terminate if we leave boundary
if not self.roads.is_inside_path_boundary(self.current_pose):
print("dead not inside boundary", self.numsteps)
done = True
# kill the episode after 500 steps
if self.numsteps > self.steps_per_rollout:
print("dead self.numsteps > self.steps_per_rollout", self.numsteps)
done = True
# terminate if we are driving backwards for too long
if self.avg_forward_vel <= 0 and self.numsteps > 35:
print("dead self.avg_forward_vel <= 1 after 35 steps ", self.avg_forward_vel)
done = True
return done
def transform_action(self, action):
# If mirrored, swap wheel controls
if self.mirror_mode:
action = action[::-1]
return action
def transform_state_image(self, im):
# If enabled, mirror image horizontally
if self.mirror_mode:
return np.flip(im, axis=1)
return im
def step(self, action):
if self.initialized:
self.previous_loc = self.current_loc
transformed_action = self.transform_action(action)
self.jetbot.command(transformed_action)
frame = 0
reward = 0
# every time step is called we actually update the scene by updates_per_step.
while frame < self.updates_per_step:
# render at 1/30, simulate at 1/60, which means 2 substeps per frame
self.omniverse_kit.update(self.dt, 1.0 / 60.0, 2.0)
frame = frame + 1
# compute reward once simulation is complete
obs = self.jetbot.observations()
self.current_pose = obs["pose"]
self.current_speed = np.linalg.norm(np.array(obs["linear_velocity"]))
self.current_forward_velocity = obs["local_linear_velocity"][0]
self.current_ang_vel = obs["angular_velocity"][2]
self.current_loc = self.roads.get_tile_from_pose(self.current_pose)
self.hist_forward_vel.append(self.current_forward_velocity)
self.dist_traveled = self.dist_traveled + self.current_forward_velocity * self.dt
self.hist_ang_vel.append(self.current_ang_vel)
self.avg_forward_vel = sum(self.hist_forward_vel) / len(self.hist_forward_vel)
if not self.initialized:
self.previous_loc = self.roads.get_tile_from_pose(self.current_pose)
reward = self.calculate_reward()
# the synthetic data helper is our way of grabbing the image data we need from the camera. currently the SD helper
# only supports a single camera, however you can use it to access camera data as a cuda tensor directly on the
# device. stable baselines 3 is expecting a numpy array, so we pull the data to the host
# additional sensors that could be of interest and can be added to this list:
# "depth", "instanceSegmentation", "semanticSegmentation"
viewport = omni.kit.viewport.get_default_viewport_window()
gt = self.sd_helper.get_groundtruth(["rgb"], viewport)
# we only need the rgb channels of the rgb image
currentState = gt["rgb"][:, :, :3].astype(np.float)
currentState = self.transform_state_image(currentState)
if not self.initialized:
self.previousState = currentState
img = np.dot(currentState, [0.299, 0.587, 0.114]) # np.concatenate((currentState, self.previousState), axis=2)
img = img.reshape((img.shape[0], img.shape[1], 1))
# the real camera will have noise on each pixel, so we add some uniform noise here to simulate thats
# uncomment below to add noise to image
img = np.clip((255 * self.noise * np.random.randn(224, 224, 1) + img.astype(np.float)), 0, 255).astype(np.uint8)
self.previousState = currentState
self.numsteps += 1
done = self.is_dead()
return img, reward, done, {}
def reset(self):
# Randomly mirror horizontally
self.update_mirror_mode()
# randomize the road configuration every self.maxresets resets.
if self.numresets % self.maxresets == 0:
size = random.randrange(2, 6)
self.shape = [size, size]
self.roads.reset(self.shape)
if not self.initialized:
state, reward, done, info, = self.step([0, 0])
self.initialized = True
# every time we reset, we move the robot to a random location, and pointing along the direction of the road
loc = self.roads.get_valid_location()
# the random angle offset can be increased here
rot = self.roads.get_forward_direction(loc) + random.uniform(-10, 10)
self.jetbot.teleport(
Gf.Vec3d(loc[0] + random.uniform(-2.5, 2.5), loc[1] + random.uniform(-2.5, 2.5), 5), rot, settle=True
)
obs = self.jetbot.observations()
self.current_pose = obs["pose"]
self.current_speed = np.linalg.norm(np.array(obs["linear_velocity"]))
self.current_forward_velocity = obs["local_linear_velocity"][0]
self.current_loc = self.roads.get_tile_from_pose(self.current_pose)
self.previous_loc = self.roads.get_tile_from_pose(self.current_pose)
self.dist = self.roads.distance_to_path_in_tiles(self.current_pose)
# wait for loading
if self.numresets % self.maxresets == 0:
while self.omniverse_kit.is_loading():
self.omniverse_kit.update(self.dt)
viewport = omni.kit.viewport.get_default_viewport_window()
gt = self.sd_helper.get_groundtruth(["rgb"], viewport)
currentState = gt["rgb"][:, :, :3]
currentState = self.transform_state_image(currentState)
img = np.dot(
currentState.astype(np.float), [0.299, 0.587, 0.114]
) # np.concatenate((currentState, currentState), axis=2)
img = img.reshape((img.shape[0], img.shape[1], 1))
# uncomment below to add noise to image
img = np.clip((255 * self.noise * np.random.randn(224, 224, 1) + img.astype(np.float)), 0, 255).astype(np.uint8)
print(
"reset ",
sum(self.hist_length) / len(self.hist_length),
self.numresets,
self.dist_traveled,
self.avg_forward_vel,
self.total_reward,
)
self.numsteps = 0
self.previousState = currentState
self.numresets += 1
self.total_reward = 0
self.dist_traveled = 0
return img
| 11,046 | Python | 37.093103 | 123 | 0.62457 |
KazWong/omniverse_sample/ov_sample/python_samples/jetbot/road_environment.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import carb
import omni
import numpy as np
from pxr import UsdGeom, Gf, Sdf, UsdPhysics
from jetbot_city.road_map import *
from jetbot_city.road_map_path_helper import *
from jetbot_city.road_map_generator import *
from omni.isaac.synthetic_utils import DomainRandomization
import math
class Environment:
def __init__(self, omni_kit, z_height=0):
from omni.isaac.utils.scripts.nucleus_utils import find_nucleus_server
self.omni_kit = omni_kit
result, nucleus_server = find_nucleus_server()
if result is False:
carb.log_error(
"Could not find nucleus server with /Isaac folder. Please specify the correct nucleus server in apps/omni.isaac.sim.python.kit"
)
return
result, nucleus_server = find_nucleus_server("/Library/Props/Road_Tiles/Parts/")
if result is False:
carb.log_error(
"Could not find nucleus server with /Library/Props/Road_Tiles/Parts/ folder. Please refer to the documentation to aquire the road tile assets"
)
return
# 1=I 2=L 3=T, 4=X
self.tile_usd = {
0: None,
1: {"asset": nucleus_server + "/Library/Props/Road_Tiles/Parts/p4336p01.usd", "offset": 180},
2: {"asset": nucleus_server + "/Library/Props/Road_Tiles/Parts/p4342p01.usd", "offset": 180},
3: {"asset": nucleus_server + "/Library/Props/Road_Tiles/Parts/p4341p01.usd", "offset": 180},
4: {"asset": nucleus_server + "/Library/Props/Road_Tiles/Parts/p4343p01.usd", "offset": 180},
} # list of tiles that can be spawned
self.texture_list = [
nucleus_server + "/Isaac/Samples/DR/Materials/Textures/checkered.png",
nucleus_server + "/Isaac/Samples/DR/Materials/Textures/marble_tile.png",
nucleus_server + "/Isaac/Samples/DR/Materials/Textures/picture_a.png",
nucleus_server + "/Isaac/Samples/DR/Materials/Textures/picture_b.png",
nucleus_server + "/Isaac/Samples/DR/Materials/Textures/textured_wall.png",
nucleus_server + "/Isaac/Samples/DR/Materials/Textures/checkered_color.png",
]
self.tile_size = [25.0, 25.0]
# 1=UP, 2 = DOWN, 3 = LEFT, 4= RIGHT
self.direction_map = {1: 180, 2: 0, 3: -90, 4: 90}
self.prims = [] # list of spawned tiles
self.height = z_height # height of the ground tiles
self.tiles = None
self.state = None
# because the ground plane is what the robot drives on, we only do this once. We can then re-generate the road as often as we need without impacting physics
self.setup_physics()
self.road_map = None
self.road_path_helper = None
self.map_generator = LoopRoadMapGenerator()
contents = omni.client.list(nucleus_server + "/Isaac/Props/Sortbot_Housing/Materials/Textures/")[1]
for entry in contents:
self.texture_list.append(
nucleus_server + "/Isaac/Props/Sortbot_Housing/Materials/Textures/" + entry.relative_path
)
contents = omni.client.list(nucleus_server + "/Isaac/Props/YCB/Axis_Aligned/")[1]
names = []
loaded_paths = []
for entry in contents:
if not entry.flags & omni.client.ItemFlags.CAN_HAVE_CHILDREN:
names.append(nucleus_server + "/Isaac/Props/YCB/Axis_Aligned/" + entry.relative_path)
loaded_paths.append("/DR/mesh_component/mesh_" + entry.relative_path[0:-4])
print(loaded_paths)
self.omni_kit.create_prim("/World/Floor", "Xform")
stage = omni.usd.get_context().get_stage()
cubeGeom = UsdGeom.Cube.Define(stage, "/World/Floor/thefloor")
cubeGeom.CreateSizeAttr(300)
offset = Gf.Vec3f(75, 75, -150.1)
cubeGeom.AddTranslateOp().Set(offset)
# Create a sphere room so the world is not black
self.omni_kit.create_prim("/World/Room", "Sphere", attributes={"radius": 1e3})
prims = []
self.dr = DomainRandomization()
self.dr.toggle_manual_mode()
self.dr.create_mesh_comp(prim_paths=prims, mesh_list=names, mesh_range=[1, 1])
self.omni_kit.update(1 / 60.0)
print("waiting for materials to load...")
while self.omni_kit.is_loading():
self.omni_kit.update(1 / 60.0)
lights = []
for i in range(5):
prim_path = "/World/Lights/light_" + str(i)
self.omni_kit.create_prim(
prim_path,
"SphereLight",
translation=(0, 0, 200),
rotation=(0, 0, 0),
attributes={"radius": 10, "intensity": 1000.0, "color": (1.0, 1.0, 1.0)},
)
lights.append(prim_path)
frames = 1
# enable randomization for environment
self.dr.create_movement_comp(prim_paths=loaded_paths, min_range=(0, 0, 15), max_range=(150, 150, 15))
self.dr.create_rotation_comp(prim_paths=loaded_paths)
self.dr.create_visibility_comp(prim_paths=loaded_paths, num_visible_range=(15, 15))
self.dr.create_light_comp(light_paths=lights)
self.dr.create_movement_comp(prim_paths=lights, min_range=(0, 0, 30), max_range=(150, 150, 30))
self.dr.create_texture_comp(
prim_paths=["/World/Floor"], enable_project_uvw=True, texture_list=self.texture_list
)
self.dr.create_color_comp(prim_paths=["/World/Room"])
def generate_lights(self):
prim_path = omni.usd.get_stage_next_free_path(self.omni_kit.get_stage(), "/World/Env/Light", False)
self.prims.append(prim_path)
self.omni_kit.create_prim(
prim_path,
"RectLight",
translation=(75, 75, 100),
rotation=(0, 0, 0),
attributes={"height": 150, "width": 150, "intensity": 2000.0, "color": (1.0, 1.0, 1.0)},
)
def reset(self, shape):
# print(self.prims)
# cmd = omni.kit.builtin.init.DeletePrimsCommand(self.prims)
# cmd.do()
stage = omni.usd.get_context().get_stage()
for layer in stage.GetLayerStack():
edit = Sdf.BatchNamespaceEdit()
for path in self.prims:
prim_spec = layer.GetPrimAtPath(path)
if prim_spec is None:
continue
parent_spec = prim_spec.realNameParent
if parent_spec is not None:
edit.Add(path, Sdf.Path.emptyPath)
layer.Apply(edit)
self.prims = []
self.generate_road(shape)
self.dr.randomize_once()
def generate_road(self, shape):
self.tiles, self.state, self.road_map = self.map_generator.generate(shape)
tiles = self.tiles
state = self.state
self.road_path_helper = RoadMapPathHelper(self.road_map)
if tiles.shape != state.shape:
print("tiles and state sizes don't match")
return
stage = self.omni_kit.get_stage()
rows, cols = tiles.shape
self.valid_tiles = []
for x in range(0, rows):
for y in range(0, cols):
if tiles[x, y] != 0:
pos_x = x * self.tile_size[0] + 12.5
pos_y = y * self.tile_size[1] + 12.5
self.create_tile(
stage,
self.tile_usd[tiles[x, y]]["asset"],
Gf.Vec3d(pos_x, pos_y, self.height),
self.direction_map[state[x, y]] + self.tile_usd[tiles[x, y]]["offset"],
)
for x in range(0, rows):
for y in range(0, cols):
# print(paths[x,y])
if tiles[x, y] != 0:
self.valid_tiles.append([x, y])
def generate_road_from_numpy(self, tiles, state):
self.tiles = tiles
self.state = state
self.road_map = RoadMap.create_from_numpy(self.tiles, self.state)
self.road_path_helper = RoadMapPathHelper(self.road_map)
if tiles.shape != state.shape:
print("tiles and state sizes don't match")
return
stage = self.omni_kit.get_stage()
rows, cols = tiles.shape
self.valid_tiles = []
for x in range(0, rows):
for y in range(0, cols):
if tiles[x, y] != 0:
pos_x = x * self.tile_size[0] + 12.5
pos_y = y * self.tile_size[1] + 12.5
self.create_tile(
stage,
self.tile_usd[tiles[x, y]]["asset"],
Gf.Vec3d(pos_x, pos_y, self.height),
self.direction_map[state[x, y]] + self.tile_usd[tiles[x, y]]["offset"],
)
for x in range(0, rows):
for y in range(0, cols):
# print(paths[x,y])
if tiles[x, y] != 0:
self.valid_tiles.append([x, y])
def create_tile(self, stage, path, location, rotation):
prefix = "/World/Env/Tiles/Tile"
prim_path = omni.usd.get_stage_next_free_path(stage, prefix, False)
self.prims.append(prim_path)
tile_prim = stage.DefinePrim(prim_path, "Xform")
tile_prim.GetReferences().AddReference(path)
xform = UsdGeom.Xformable(tile_prim)
xform_op = xform.AddXformOp(UsdGeom.XformOp.TypeTransform, UsdGeom.XformOp.PrecisionDouble, "")
mat = Gf.Matrix4d().SetTranslate(location)
mat.SetRotateOnly(Gf.Rotation(Gf.Vec3d(0, 0, 1), rotation))
xform_op.Set(mat)
def setup_physics(self):
from pxr import PhysxSchema, PhysicsSchemaTools
stage = self.omni_kit.get_stage()
# Add physics scene
scene = UsdPhysics.Scene.Define(stage, Sdf.Path("/World/Env/PhysicsScene"))
# Set gravity vector
scene.CreateGravityDirectionAttr().Set(Gf.Vec3f(0.0, 0.0, -1.0))
scene.CreateGravityMagnitudeAttr().Set(981.0)
# Set physics scene to use cpu physics
PhysxSchema.PhysxSceneAPI.Apply(stage.GetPrimAtPath("/World/Env/PhysicsScene"))
physxSceneAPI = PhysxSchema.PhysxSceneAPI.Get(stage, "/World/Env/PhysicsScene")
physxSceneAPI.CreateEnableCCDAttr(True)
physxSceneAPI.CreateEnableStabilizationAttr(True)
physxSceneAPI.CreateEnableGPUDynamicsAttr(False)
physxSceneAPI.CreateBroadphaseTypeAttr("MBP")
physxSceneAPI.CreateSolverTypeAttr("TGS")
# Create physics plane for the ground
PhysicsSchemaTools.addGroundPlane(
stage, "/World/Env/GroundPlane", "Z", 100.0, Gf.Vec3f(0, 0, self.height), Gf.Vec3f(1.0)
)
# Hide the visual geometry
imageable = UsdGeom.Imageable(stage.GetPrimAtPath("/World/Env/GroundPlane/geom"))
if imageable:
imageable.MakeInvisible()
def get_valid_location(self):
if self.tiles is None:
print("cannot provide valid location until road is generated")
return (0, 0)
i = np.random.choice(len(self.valid_tiles), 1)[0]
dist, point = self.road_path_helper.distance_to_path(self.valid_tiles[i])
x, y = point
print("get valid location called", self.valid_tiles[i], point)
return (x * self.tile_size[0], y * self.tile_size[1])
# Computes an approximate forward vector based on the current spawn point and nearby valid path point
def get_forward_direction(self, loc):
if self.road_path_helper is not None:
k = 100
dists, pts = self.road_path_helper.get_k_nearest_path_points(np.array([self.get_tile_from_pose(loc)]), k)
pointa = pts[0][0]
pointb = pts[0][k - 1]
if random.choice([False, True]):
pointa, pointb = pointb, pointa
return math.degrees(math.atan2(pointb[1] - pointa[1], pointb[0] - pointa[0]))
# Compute the x,y tile location from the robot pose
def get_tile_from_pose(self, pose):
return (pose[0] / self.tile_size[0], pose[1] / self.tile_size[1])
def distance_to_path(self, robot_pose):
if self.road_path_helper is not None:
distance, point = self.road_path_helper.distance_to_path(self.get_tile_from_pose(robot_pose))
return distance * self.tile_size[0]
def distance_to_path_in_tiles(self, robot_pose):
if self.road_path_helper is not None:
distance, point = self.road_path_helper.distance_to_path(self.get_tile_from_pose(robot_pose))
return distance
def distance_to_boundary(self, robot_pose):
if self.road_path_helper is not None:
distance = self.road_path_helper.distance_to_boundary(self.get_tile_from_pose(robot_pose))
return distance * self.tile_size[0]
def is_inside_path_boundary(self, robot_pose):
if self.road_path_helper is not None:
return self.road_path_helper.is_inside_path_boundary(self.get_tile_from_pose(robot_pose))
| 13,501 | Python | 42.13738 | 164 | 0.595289 |
KazWong/omniverse_sample/ov_sample/python_samples/jetbot/jetbot_city/__init__.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
| 431 | Python | 52.999993 | 76 | 0.812065 |
KazWong/omniverse_sample/ov_sample/python_samples/jetbot/jetbot_city/road_map.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import enum
import random
from collections import deque
import numpy as np
import os
import io
import cv2
import PIL.Image
import pickle
from typing import List, Set, Dict, Tuple, Optional
from .priority_queue import *
DEFAULT_IMAGE_SIZE = (32, 32)
def mask_L(size=256, thickness=1):
mask = np.zeros((size, size), dtype=np.uint8)
cv2.circle(mask, (size, 0), size // 2, (255, 255, 255), thickness)
return PIL.Image.fromarray(mask)
def mask_I(size=256, thickness=1):
mask = np.zeros((size, size), dtype=np.uint8)
cv2.line(mask, (size // 2, 0), (size // 2, size), (255, 255, 255), thickness, cv2.LINE_4)
return PIL.Image.fromarray(mask)
def mask_T(size=256, thickness=1):
mask = np.zeros((size, size), dtype=np.uint8)
mask = np.maximum(mask, cv2.circle(mask, (0, size), size // 2, (255, 255, 255), thickness))
mask = np.maximum(mask, cv2.circle(mask, (size, size), size // 2, (255, 255, 255), thickness))
mask = np.maximum(mask, cv2.line(mask, (0, size // 2), (size, size // 2), (255, 255, 255), thickness, cv2.LINE_4))
return PIL.Image.fromarray(mask)
def mask_X(size=256, thickness=1):
mask = mask_L(size, thickness)
mask = np.maximum(mask, mask_I(size, thickness))
for i in range(4):
mask = np.maximum(mask, cv2.rotate(mask, cv2.ROTATE_90_CLOCKWISE))
return PIL.Image.fromarray(mask)
_I_IMAGE = mask_I()
_L_IMAGE = mask_L()
_T_IMAGE = mask_T()
_X_IMAGE = mask_X()
class RoadBlockType(enum.IntEnum):
EMPTY = 0
I = 1
L = 2
T = 3
X = 4
def ports(self):
if self == RoadBlockType.I:
return [0, 1, 0, 1] # left, top, right, bottom
elif self == RoadBlockType.L:
return [0, 1, 1, 0]
elif self == RoadBlockType.T:
return [1, 0, 1, 1]
elif self == RoadBlockType.X:
return [1, 1, 1, 1]
else:
return [0, 0, 0, 0]
def image(self, size=DEFAULT_IMAGE_SIZE):
if self == RoadBlockType.I:
return _I_IMAGE.resize(size)
elif self == RoadBlockType.L:
return _L_IMAGE.resize(size)
elif self == RoadBlockType.T:
return _T_IMAGE.resize(size)
elif self == RoadBlockType.X:
return _X_IMAGE.resize(size)
else:
return PIL.Image.fromarray(np.zeros(size + (3,), dtype=np.uint8))
def paths_mask(self, size=DEFAULT_IMAGE_SIZE, thickness=1):
if self == RoadBlockType.I:
return mask_I(size[0], thickness)
elif self == RoadBlockType.L:
return mask_L(size[0], thickness)
elif self == RoadBlockType.T:
return mask_T(size[0], thickness)
elif self == RoadBlockType.X:
return mask_X(size[0], thickness)
else:
return PIL.Image.fromarray(np.zeros(size, dtype=np.uint8))
class RoadBlockState(enum.IntEnum):
HIDDEN = 0
UP = 1 # 0
DOWN = 2 # 180
LEFT = 3 # CCW 90
RIGHT = 4 # CW 90
@staticmethod
def random():
return RoadBlockState(np.random.randint(len(RoadBlockState)))
class RoadBlock(object):
def __init__(self, type: RoadBlockType, state: RoadBlockState):
self.type = type
self.state = state
def __iter__(self):
yield self.type
yield self.state
def ports(self):
if self.state == RoadBlockState.HIDDEN:
return [0, 0, 0, 0]
elif self.state == RoadBlockState.UP:
return self.type.ports()
elif self.state == RoadBlockState.DOWN:
return list(np.roll(self.type.ports(), 2))
elif self.state == RoadBlockState.LEFT:
return list(np.roll(self.type.ports(), -1))
else:
return list(np.roll(self.type.ports(), 1))
def has_left_port(self):
return self.ports()[0]
def has_right_port(self):
return self.ports()[2]
def has_top_port(self):
return self.ports()[1]
def has_bottom_port(self):
return self.ports()[3]
def image(self, size=DEFAULT_IMAGE_SIZE):
# if self.state == RoadBlockState.HIDDEN or self.type == RoadBlockType.EMPTY:
# return PIL.Image.fromarray(np.zeros(size + (3,), dtype=np.uint8))
image = self.type.image(size=size)
if self.state == RoadBlockState.LEFT:
image = image.rotate(90)
elif self.state == RoadBlockState.RIGHT:
image = image.rotate(-90)
elif self.state == RoadBlockState.DOWN:
image = image.rotate(180)
return image
def paths_mask(self, size=DEFAULT_IMAGE_SIZE, thickness=1):
# if self.state == RoadBlockState.HIDDEN or self.type == RoadBlockType.EMPTY:
# return PIL.Image.fromarray(np.zeros(size, dtype=np.uint8))
image = self.type.paths_mask(size=size, thickness=thickness)
if self.state == RoadBlockState.LEFT:
image = image.rotate(90)
elif self.state == RoadBlockState.RIGHT:
image = image.rotate(-90)
elif self.state == RoadBlockState.DOWN:
image = image.rotate(180)
return image
def l1_distance(a, b):
return abs(a[0] - b[0]) + abs(a[1] - b[1])
class RoadLocation(object):
def __init__(self, i, j):
self.i = i
self.j = j
def __iter__(self):
yield self.i
yield self.j
class RoadMap(object):
def __init__(self, grid: List[List[RoadBlock]]):
self.grid = grid
@staticmethod
def create_random_from_types(types: List[RoadBlockType], NI, NJ):
grid = []
for i in range(NI):
row = []
for j in range(NJ):
row.append(RoadBlock(RoadBlockType.EMPTY, RoadBlockState.random()))
grid.append(row)
# construct positions
locations = []
for i in range(NI):
for j in range(NJ):
locations.append(RoadLocation(i, j))
np.random.shuffle(locations)
locations = locations[0 : len(types)]
for i, loc in enumerate(locations):
grid[loc.i][loc.j] = RoadBlock(types[i], RoadBlockState.random())
return RoadMap(grid)
@staticmethod
def create_from_numpy(types, states):
grid = []
for i in range(types.shape[0]):
row = []
for j in range(types.shape[1]):
row.append(RoadBlock(RoadBlockType(types[i, j]), RoadBlockState(states[i, j])))
grid.append(row)
return RoadMap(grid)
@property
def NI(self):
return len(self.grid)
@property
def NJ(self):
return len(self.grid[0])
def numpy(self):
types = []
states = []
for i in range(self.NI):
types_i = []
states_i = []
for j in range(self.NJ):
types_i.append(int(self.grid[i][j].type))
states_i.append(int(self.grid[i][j].state))
types.append(types_i)
states.append(states_i)
return np.array(types), np.array(states)
def _children(self, i, j):
block = self.grid[i][j]
children = []
if i > 0:
top = self.grid[i - 1][j]
if top.has_bottom_port() and block.has_top_port():
children.append((i - 1, j))
if i < self.NI - 1:
bottom = self.grid[i + 1][j]
if bottom.has_top_port() and block.has_bottom_port():
children.append((i + 1, j))
if j > 0:
left = self.grid[i][j - 1]
if left.has_right_port() and block.has_left_port():
children.append((i, j - 1))
if j < self.NJ - 1:
right = self.grid[i][j + 1]
if right.has_left_port() and block.has_right_port():
children.append((i, j + 1))
return children
def _search_path(self, i, j, visited):
q = deque()
q.append((i, j))
path = []
while q:
i, j = q.popleft()
path.append((i, j))
for child in self._children(i, j):
if not visited[child[0], child[1]]:
q.append(child)
visited[child[0], child[1]] = True
return path
def find_shortest_path(self, a, b):
visited = np.zeros((self.NI, self.NJ), dtype=np.bool)
q = PriorityQueue()
q.push((l1_distance(a, b), [a]))
visited[a[0], a[1]] = 1
while not q.empty():
cost, path = q.pop()
tail = path[-1]
if tail[0] == b[0] and tail[1] == b[1]:
return path
for child in self._children(tail[0], tail[1]):
if not visited[child[0], child[1]]:
child_path = path + [child]
child_cost = len(child_path) + l1_distance(child, b)
q.push((child_cost, child_path))
visited[child[0], child[1]] = 1
return None
def paths(self):
visited = np.zeros((self.NI, self.NJ), dtype=np.bool)
# set blocks that cannot be path components as visited
for i in range(self.NI):
for j in range(self.NJ):
block = self.grid[i][j]
if block.state == RoadBlockState.HIDDEN or block.type == RoadBlockType.EMPTY:
visited[i, j] = True
paths = []
for i in range(self.NI):
for j in range(self.NJ):
if not visited[i, j]:
visited[i, j] = True
path = self._search_path(i, j, visited)
paths.append(path)
return paths
def num_open_ports(self):
num_open = 0
for i in range(self.NJ):
for j in range(self.NI):
num_open += np.count_nonzero(self.grid[i][j].ports()) - len(self._children(i, j))
return num_open
def num_ports(self):
num_ports = 0
for i in range(self.NJ):
for j in range(self.NI):
num_ports += np.count_nonzero(self.grid[i][j].ports()) # - len(self._children(i, j))
return num_ports
def num_closed_ports(self):
num_ports = 0
for i in range(self.NJ):
for j in range(self.NI):
num_ports += len(self._children(i, j))
return num_ports
def image(self, block_size=DEFAULT_IMAGE_SIZE):
si = block_size[0]
sj = block_size[1]
image = np.zeros((si * self.NI, sj * self.NJ, 3), dtype=np.uint8)
for i in range(self.NJ):
for j in range(self.NI):
image[i * si : i * si + si, j * sj : j * sj + sj] = np.array(self.grid[i][j].image(size=block_size))
return PIL.Image.fromarray(image)
def paths_mask(self, block_size=DEFAULT_IMAGE_SIZE, thickness=1):
si = block_size[0]
sj = block_size[1]
image = np.zeros((si * self.NI, sj * self.NJ), dtype=np.uint8)
for i in range(self.NJ):
for j in range(self.NI):
image[i * si : i * si + si, j * sj : j * sj + sj] = np.array(
self.grid[i][j].paths_mask(size=block_size, thickness=thickness)
)
return PIL.Image.fromarray(image)
def obs(self):
obs = np.zeros((4, self.NI, self.NJ), dtype=np.float32)
for i in range(self.NI):
for j in range(self.NJ):
obs[0, i, j] = self.grid[i][j].has_left_port()
obs[1, i, j] = self.grid[i][j].has_top_port()
obs[2, i, j] = self.grid[i][j].has_right_port()
obs[3, i, j] = self.grid[i][j].has_bottom_port()
return obs
def swap_(self, a, b):
tmp = self.grid[a[0]][a[1]]
self.grid[a[0]][a[1]] = self.grid[b[0]][b[1]]
self.grid[b[0]][b[1]] = tmp
def render(self, widget):
# Render the environment to the screen
imgByteArr = io.BytesIO()
self.image(block_size=(64, 64)).save(imgByteArr, format="PNG")
imgByteArr = imgByteArr.getvalue()
widget.value = imgByteArr
def save(self, f):
types, states = self.numpy()
data = {"types": types, "states": states}
if isinstance(f, str):
with open(f, "wb") as f:
pickle.dump(data, f)
else:
pickle.dump(data, f)
@staticmethod
def load(f):
if isinstance(f, str):
with open(f, "rb") as f:
data = pickle.load(f)
else:
data = pickle.load(f)
return RoadMap.create_from_numpy(data["types"], data["states"])
def ports(self):
ports = np.zeros((self.NI, self.NJ, 4), np.bool)
for i in range(self.NI):
for j in range(self.NJ):
ports[i, j, 0] = self.grid[i][j].has_left_port()
ports[i, j, 1] = self.grid[i][j].has_top_port()
ports[i, j, 2] = self.grid[i][j].has_right_port()
ports[i, j, 3] = self.grid[i][j].has_bottom_port()
return ports
@staticmethod
def create_from_ports(ports):
NI = ports.shape[0]
NJ = ports.shape[1]
types = np.zeros(ports.shape[0:2], dtype=np.int64)
states = np.zeros(ports.shape[0:2], dtype=np.int64)
for i in range(NI):
for j in range(NJ):
pij = ports[i, j]
for typ in RoadBlockType:
if (np.roll(typ.ports(), 0) == pij).all():
types[i, j] = typ
states[i, j] = RoadBlockState.UP
break
elif (np.roll(typ.ports(), 1) == pij).all():
types[i, j] = typ
states[i, j] = RoadBlockState.RIGHT
break
elif (np.roll(typ.ports(), 2) == pij).all():
types[i, j] = typ
states[i, j] = RoadBlockState.DOWN
break
elif (np.roll(typ.ports(), 3) == pij).all():
types[i, j] = typ
states[i, j] = RoadBlockState.LEFT
break
return RoadMap.create_from_numpy(types, states)
| 14,731 | Python | 31.449339 | 118 | 0.530242 |
KazWong/omniverse_sample/ov_sample/python_samples/jetbot/jetbot_city/road_map_generator.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from .road_map import *
from .priority_queue import *
def children(occupancy, point):
NI = occupancy.shape[0]
NJ = occupancy.shape[1]
children = []
if point[0] > 0:
pt = [point[0] - 1, point[1]]
children.append(pt)
if point[0] < NI - 1:
pt = [point[0] + 1, point[1]]
children.append(pt)
if point[1] > 0:
pt = [point[0], point[1] - 1]
children.append(pt)
if point[1] < NJ - 1:
pt = [point[0], point[1] + 1]
children.append(pt)
return children
def l1_distance(a, b):
return abs(a[0] - b[0]) + abs(a[1] - b[1])
def find_path(occupancy, point_a, point_b):
visited = np.copy(occupancy)
visited[point_a[0], point_b[0]] = 1
q = PriorityQueue() # cost heuristic, path...
for child in children(visited, point_a):
if not visited[child[0], child[1]]:
q.push((1 + l1_distance(child, point_b), [child]))
visited[child[0], child[1]] = 1
while not q.empty():
cost, path = q.pop()
tail = path[-1]
for child in children(visited, tail):
if child[0] == point_b[0] and child[1] == point_b[1]:
return path
elif not visited[child[0], child[1]]:
child_cost = len(path) + l1_distance(child, point_b)
child_path = path + [child]
q.push((child_cost, child_path))
visited[child[0], child[1]] = 1
return None
def add_port(ports, a, b):
# port order: left,top,right,bottom
if b[1] > a[1]:
# b to right of a
ports[a[0], a[1], 2] = 1
ports[b[0], b[1], 0] = 1
elif b[1] < a[1]:
# b to left of a
ports[a[0], a[1], 0] = 1
ports[b[0], b[1], 2] = 1
elif b[0] > a[0]:
# b above a
ports[a[0], a[1], 3] = 1
ports[b[0], b[1], 1] = 1
elif b[0] < a[0]:
# b below a
ports[a[0], a[1], 1] = 1
ports[b[0], b[1], 3] = 1
def ports_to_types_states(ports):
NI = ports.shape[0]
NJ = ports.shape[1]
types = np.zeros(ports.shape[0:2], dtype=np.int64)
states = np.zeros(ports.shape[0:2], dtype=np.int64)
for i in range(NI):
for j in range(NJ):
pij = ports[i, j]
for typ in RoadBlockType:
if (np.roll(typ.ports(), 0) == pij).all():
types[i, j] = typ
states[i, j] = RoadBlockState.UP
break
elif (np.roll(typ.ports(), 1) == pij).all():
types[i, j] = typ
states[i, j] = RoadBlockState.RIGHT
break
elif (np.roll(typ.ports(), 2) == pij).all():
types[i, j] = typ
states[i, j] = RoadBlockState.DOWN
break
elif (np.roll(typ.ports(), 3) == pij).all():
types[i, j] = typ
states[i, j] = RoadBlockState.LEFT
break
return types, states
class RoadMapGenerator(object):
def generate(self, shape):
raise NotImplementedError
class LoopRoadMapGenerator(RoadMapGenerator):
def generate(self, shape):
GRID_SIZE = shape
ports = np.zeros((GRID_SIZE[0], GRID_SIZE[1], 4), np.bool)
occupancy = np.zeros(GRID_SIZE, np.uint8)
start = (np.random.randint(GRID_SIZE[0]), np.random.randint(GRID_SIZE[1]))
path = []
path.append(start)
occupancy[start[0], start[1]] = 1
runner = start
while True:
# get valid children
valid_children = []
for child in children(occupancy, runner):
if not occupancy[child[0], child[1]]:
child_occupancy = np.copy(occupancy)
child_occupancy[child[0], child[1]] = 1
child_path = find_path(child_occupancy, child, start)
if child_path is not None:
valid_children.append(child)
# exit if no valid child paths
if len(valid_children) == 0:
break
# navigate to random child
idx = np.random.randint(len(valid_children))
runner = valid_children[idx]
path.append(runner)
occupancy[runner[0], runner[1]] = 1
path = path + find_path(occupancy, runner, start) + [start]
for i in range(len(path) - 1):
add_port(ports, path[i], path[i + 1])
types, states = ports_to_types_states(ports)
road_map = RoadMap.create_from_numpy(types, states)
return types, states, road_map
| 5,124 | Python | 29.505952 | 82 | 0.523224 |
KazWong/omniverse_sample/ov_sample/python_samples/jetbot/jetbot_city/priority_queue.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
class PriorityQueue(object):
def __init__(self):
self.items = []
def push(self, item):
a = 0
b = len(self.items) - 1
while a <= b:
c = a + (b - a) // 2
if self.items[c][0] < item[0]: # 0, 1 (0),
a = c + 1
elif self.items[c][0] > item[0]:
b = c - 1
else:
break
if a >= len(self.items):
idx = len(self.items)
elif b < 0:
idx = 0
else:
idx = a + (b - a) // 2
self.items.insert(idx, item)
def pop(self):
return self.items.pop(0)
def empty(self):
return len(self.items) == 0
| 1,137 | Python | 27.449999 | 76 | 0.550572 |
KazWong/omniverse_sample/ov_sample/python_samples/jetbot/jetbot_city/road_map_path_helper.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from .road_map import *
from scipy.spatial import KDTree
import cv2
import matplotlib.pyplot as plt
import numpy as np
class RoadMapPathHelper(object):
def __init__(self, road_map, block_resolution=128, path_thickness_ratio=19 / 32):
self._road_map = road_map
self._block_resolution = block_resolution
self._map_path_mask = np.array(self._road_map.paths_mask((block_resolution, block_resolution), thickness=1))
self._map_boundary_mask = np.array(
self._road_map.paths_mask(
(block_resolution, block_resolution), thickness=int(block_resolution * path_thickness_ratio)
)
)
mask_pts = np.transpose(np.nonzero(self._map_path_mask))
mask_pts = mask_pts / block_resolution # get points in grid coordinates
self._path_kdtree = KDTree(mask_pts)
boundary_points = np.transpose(np.nonzero(cv2.Laplacian(self._map_boundary_mask, cv2.CV_32F)))
boundary_points = boundary_points / block_resolution
self._boundary_kdtree = KDTree(boundary_points)
# print("boundary points shape! ", boundary_points.shape)
# plt.imshow(self._map_boundary_mask)
# plt.show()
def get_k_nearest_path_points(self, points, k=1):
dists, indices = self._path_kdtree.query(points, k=k)
return dists, self._path_kdtree.data[indices]
def distance_to_path(self, point):
dists, pts = self.get_k_nearest_path_points(np.array([point]))
return (float(dists[0]), pts[0])
def get_k_nearest_boundary_points(self, points, k=1):
dists, indices = self._boundary_kdtree.query(points, k=k)
return dists, self._boundary_kdtree.data[indices]
def distance_to_boundary(self, point):
dists, pts = self.get_k_nearest_boundary_points(np.array([point]))
return float(dists[0])
def is_inside_path_boundary(self, point):
return (
self._map_boundary_mask[int(point[0] * self._block_resolution), int(point[1] * self._block_resolution)] > 0
)
| 2,485 | Python | 41.862068 | 119 | 0.675654 |
KazWong/omniverse_sample/ov_sample/python_samples/ros/clock.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
import time
import carb
from omni.isaac.python_app import OmniKitHelper
CONFIG = {
"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit',
"renderer": "RayTracedLighting",
"headless": True,
}
if __name__ == "__main__":
# Example ROS bridge sample showing rospy and rosclock interaction
kit = OmniKitHelper(config=CONFIG)
import omni
# enable ROS bridge extension
ext_manager = omni.kit.app.get_app().get_extension_manager()
ext_manager.set_extension_enabled_immediate("omni.isaac.ros_bridge", True)
# check if rosmaster node is running
# this is to prevent this sample from waiting indefinetly if roscore is not running
# can be removed in regular usage
kit.update()
result, check = omni.kit.commands.execute("RosBridgeRosMasterCheck")
if not check:
carb.log_error("Please run roscore before executing this script")
kit.stop()
kit.shutdown()
exit()
# Note that this is not the system level rospy, but one compiled for omniverse
from rosgraph_msgs.msg import Clock
import rospy
# create a clock using sim time
result, prim = omni.kit.commands.execute(
"ROSBridgeCreateClock", path="/ROS_Clock_Sim", clock_topic="/sim_time", sim_time=True
)
# create a clock using system time
result, prim = omni.kit.commands.execute(
"ROSBridgeCreateClock", path="/ROS_Clock_System", clock_topic="/system_time", sim_time=False
)
# create a clock which we will publish manually, set enabled to false to make it manually controlled
result, prim = omni.kit.commands.execute(
"ROSBridgeCreateClock", path="/ROS_Clock_Manual", clock_topic="/manual_time", sim_time=True, enabled=False
)
kit.update()
kit.update()
# Define ROS callbacks
def sim_clock_callback(data):
print("sim time:", data.clock.to_sec())
def system_clock_callback(data):
print("system time:", data.clock.to_sec())
def manual_clock_callback(data):
print("manual stepped sim time:", data.clock.to_sec())
# Create rospy ndoe
rospy.init_node("isaac_sim_test_gripper", anonymous=True, disable_signals=True, log_level=rospy.ERROR)
# create subscribers
sim_clock_sub = rospy.Subscriber("sim_time", Clock, sim_clock_callback)
system_clock_sub = rospy.Subscriber("system_time", Clock, system_clock_callback)
manual_clock_sub = rospy.Subscriber("manual_time", Clock, manual_clock_callback)
time.sleep(1.0)
# start simulation
kit.play()
# perform a fixed number of steps with fixed step size
for frame in range(20):
# publish manual clock every 10 frames
if frame % 10 == 0:
result, status = omni.kit.commands.execute("RosBridgeTickComponent", path="/ROS_Clock_Manual")
kit.update(1.0 / 60.0) # runs with a non-realtime clock
# This sleep is to make this sample run a bit more deterministically for the subscriber callback
# In general this sleep is not needed
time.sleep(0.1)
# perform a fixed number of steps with realtime clock
for frame in range(20):
# publish manual clock every 10 frames
if frame % 10 == 0:
result, status = omni.kit.commands.execute("RosBridgeTickComponent", path="/ROS_Clock_Manual")
kit.update() # runs with a realtime clock
# This sleep is to make this sample run a bit more deterministically for the subscriber callback
# In general this sleep is not needed
time.sleep(0.1)
# cleanup and shutdown
sim_clock_sub.unregister()
system_clock_sub.unregister()
manual_clock_sub.unregister()
kit.stop()
kit.shutdown()
| 4,151 | Python | 37.803738 | 114 | 0.687786 |
KazWong/omniverse_sample/ov_sample/python_samples/ros/carter_stereo.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
import time
import carb
from omni.isaac.python_app import OmniKitHelper
CONFIG = {
"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit',
"renderer": "RayTracedLighting",
"headless": False,
}
if __name__ == "__main__":
# Example ROS bridge sample showing manual control over messages
kit = OmniKitHelper(config=CONFIG)
import omni
from omni.isaac.utils.scripts.nucleus_utils import find_nucleus_server
from pxr import Sdf
# enable ROS bridge extension
ext_manager = omni.kit.app.get_app().get_extension_manager()
ext_manager.set_extension_enabled_immediate("omni.isaac.ros_bridge", True)
# Locate /Isaac folder on nucleus server to load sample
result, nucleus_server = find_nucleus_server()
if result is False:
carb.log_error("Could not find nucleus server with /Isaac folder, exiting")
exit()
usd_path = nucleus_server + "/Isaac/Samples/ROS/Scenario/carter_warehouse_navigation.usd"
omni.usd.get_context().open_stage(usd_path, None)
# Wait two frames so that stage starts loading
kit.app.update()
kit.app.update()
print("Loading stage...")
while kit.is_loading():
kit.update(1.0 / 60.0)
print("Loading Complete")
# Disable all ROS components so we can demonstrate publishing manually
# Otherwise, if a component is enabled, it will publish every timestep
omni.kit.commands.execute(
"ChangeProperty",
prop_path=Sdf.Path("/World/Carter_ROS/ROS_Camera_Stereo_Right.enabled"),
value=False,
prev=None,
)
omni.kit.commands.execute(
"ChangeProperty", prop_path=Sdf.Path("/World/Carter_ROS/ROS_Camera_Stereo_Left.enabled"), value=False, prev=None
)
omni.kit.commands.execute(
"ChangeProperty", prop_path=Sdf.Path("/World/Carter_ROS/ROS_Lidar.enabled"), value=False, prev=None
)
omni.kit.commands.execute(
"ChangeProperty", prop_path=Sdf.Path("/World/Carter_ROS/ROS_DifferentialBase.enabled"), value=False, prev=None
)
omni.kit.commands.execute(
"ChangeProperty",
prop_path=Sdf.Path("/World/Carter_ROS/ROS_Carter_Lidar_Broadcaster.enabled"),
value=False,
prev=None,
)
omni.kit.commands.execute(
"ChangeProperty", prop_path=Sdf.Path("/World/Carter_ROS/ROS_Carter_Broadcaster.enabled"), value=False, prev=None
)
omni.kit.commands.execute("ChangeProperty", prop_path=Sdf.Path("/World/ROS_Clock.enabled"), value=False, prev=None)
kit.play()
kit.update(1.0 / 60.0)
# Tick all of the components once to make sure all of the ROS nodes are initialized
# For cameras this also handles viewport initialization etc.
omni.kit.commands.execute("RosBridgeTickComponent", path="/World/Carter_ROS/ROS_Camera_Stereo_Right")
omni.kit.commands.execute("RosBridgeTickComponent", path="/World/Carter_ROS/ROS_Camera_Stereo_Left")
omni.kit.commands.execute("RosBridgeTickComponent", path="/World/Carter_ROS/ROS_Lidar")
omni.kit.commands.execute("RosBridgeTickComponent", path="/World/Carter_ROS/ROS_DifferentialBase")
omni.kit.commands.execute("RosBridgeTickComponent", path="/World/Carter_ROS/ROS_Carter_Lidar_Broadcaster")
omni.kit.commands.execute("RosBridgeTickComponent", path="/World/Carter_ROS/ROS_Carter_Broadcaster")
omni.kit.commands.execute("RosBridgeTickComponent", path="/World/ROS_Clock")
# Simulate for one second to warm up sim and let everything settle
for frame in range(60):
kit.update(1.0 / 60.0)
# Dock the second camera window
right_viewport = omni.ui.Workspace.get_window("Viewport")
left_viewport = omni.ui.Workspace.get_window("Viewport_2")
if right_viewport is not None and left_viewport is not None:
left_viewport.dock_in(right_viewport, omni.ui.DockPosition.LEFT)
# Create a rostopic to publish message to spin robot in place
# Note that this is not the system level rospy, but one compiled for omniverse
from geometry_msgs.msg import Twist
import rospy
rospy.init_node("carter_stereo", anonymous=True, disable_signals=True, log_level=rospy.ERROR)
pub = rospy.Publisher("cmd_vel", Twist, queue_size=10)
frame = 0
while kit.app.is_running():
# Run with a fixed step size
kit.update(1.0 / 60.0)
# Publish clock every frame
omni.kit.commands.execute("RosBridgeTickComponent", path="/World/ROS_Clock")
# publish TF and Lidar every 2 frames
if frame % 2 == 0:
omni.kit.commands.execute("RosBridgeTickComponent", path="/World/Carter_ROS/ROS_Lidar")
omni.kit.commands.execute("RosBridgeTickComponent", path="/World/Carter_ROS/ROS_DifferentialBase")
omni.kit.commands.execute("RosBridgeTickComponent", path="/World/Carter_ROS/ROS_Carter_Lidar_Broadcaster")
omni.kit.commands.execute("RosBridgeTickComponent", path="/World/Carter_ROS/ROS_Carter_Broadcaster")
# because we only tick the differential base component every two frames, we can also publish the ROS message at the same rate
message = Twist()
message.angular.z = 0.2 # spin in place
pub.publish(message)
# Publish cameras every 60 frames or one second of simulation
if frame % 60 == 0:
omni.kit.commands.execute("RosBridgeTickComponent", path="/World/Carter_ROS/ROS_Camera_Stereo_Right")
omni.kit.commands.execute("RosBridgeTickComponent", path="/World/Carter_ROS/ROS_Camera_Stereo_Left")
frame = frame + 1
pub.unregister()
rospy.signal_shutdown("carter_stereo complete")
kit.stop()
kit.shutdown()
| 6,107 | Python | 45.984615 | 137 | 0.700508 |
KazWong/omniverse_sample/ov_sample/python_samples/ros/moveit.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
import carb
from omni.isaac.python_app import OmniKitHelper
FRANKA_STAGE_PATH = "/Franka"
FRANKA_USD_PATH = "/Isaac/Robots/Franka/franka_alt_fingers.usd"
BACKGROUND_STAGE_PATH = "/background"
BACKGROUND_USD_PATH = "/Isaac/Environments/Simple_Room/simple_room.usd"
CONFIG = {
"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit',
"renderer": "RayTracedLighting",
"headless": False,
}
def wait_load_stage():
# Wait two frames so stage starts loading
kit.app.update()
kit.app.update()
print("Loading stage...")
while kit.is_loading():
kit.update(1.0 / 60.0)
print("Loading Complete")
if __name__ == "__main__":
# Example ROS bridge sample demonstrating the manual loading of stages
# and creation of ROS components
kit = OmniKitHelper(config=CONFIG)
import omni
from omni.isaac.utils.scripts.nucleus_utils import find_nucleus_server
from omni.isaac.utils.scripts.scene_utils import create_background
from pxr import Gf
# enable ROS bridge extension
ext_manager = omni.kit.app.get_app().get_extension_manager()
ext_manager.set_extension_enabled_immediate("omni.isaac.ros_bridge", True)
# Locate /Isaac folder on nucleus server to load environment and robot stages
result, _nucleus_path = find_nucleus_server()
if result is False:
carb.log_error("Could not find nucleus server with /Isaac folder, exiting")
exit()
# Initialize extension and UI elements
_viewport = omni.kit.viewport.get_default_viewport_window()
_usd_context = omni.usd.get_context()
# Preparing stage
_viewport.set_camera_position("/OmniverseKit_Persp", 120, 120, 80, True)
_viewport.set_camera_target("/OmniverseKit_Persp", 0, 0, 50, True)
_stage = _usd_context.get_stage()
# Loading the simple_room environment
background_asset_path = _nucleus_path + BACKGROUND_USD_PATH
create_background(_stage, background_asset_path, background_path=BACKGROUND_STAGE_PATH, offset=Gf.Vec3d(0, 0, 0))
wait_load_stage()
# Loading the franka robot USD
franka_asset_path = _nucleus_path + FRANKA_USD_PATH
prim = _stage.DefinePrim(FRANKA_STAGE_PATH, "Xform")
prim.GetReferences().AddReference(franka_asset_path)
rot_mat = Gf.Matrix3d(Gf.Rotation((0, 0, 1), 90))
omni.kit.commands.execute(
"TransformPrimCommand",
path=prim.GetPath(),
old_transform_matrix=None,
new_transform_matrix=Gf.Matrix4d().SetRotate(rot_mat).SetTranslateOnly(Gf.Vec3d(0, -64, 0)),
)
wait_load_stage()
# Loading all ROS components initially as disabled so we can demonstrate publishing manually
# Otherwise, if a component is enabled, it will publish every timestep
# Load ROS Clock
omni.kit.commands.execute("ROSBridgeCreateClock", path="/ROS_Clock", enabled=False)
# Load Joint State
omni.kit.commands.execute(
"ROSBridgeCreateJointState", path="/ROS_JointState", articulation_prim_rel=[FRANKA_STAGE_PATH], enabled=False
)
# Load Pose Tree
omni.kit.commands.execute(
"ROSBridgeCreatePoseTree", path="/ROS_PoseTree", target_prims_rel=[FRANKA_STAGE_PATH], enabled=False
)
kit.play()
kit.update(1.0 / 60.0)
# Tick all of the components once to make sure all of the ROS nodes are initialized
omni.kit.commands.execute("RosBridgeTickComponent", path="/ROS_JointState")
omni.kit.commands.execute("RosBridgeTickComponent", path="/ROS_PoseTree")
omni.kit.commands.execute("RosBridgeTickComponent", path="/ROS_Clock")
# Simulate for one second to warm up sim and let everything settle
for frame in range(60):
kit.update(1.0 / 60.0)
kit.play()
while kit.app.is_running():
# Run with a fixed step size
kit.update(1.0 / 60.0)
# Publish clock, TF and JointState each frame
omni.kit.commands.execute("RosBridgeTickComponent", path="/ROS_Clock")
omni.kit.commands.execute("RosBridgeTickComponent", path="/ROS_JointState")
omni.kit.commands.execute("RosBridgeTickComponent", path="/ROS_PoseTree")
kit.stop()
kit.shutdown()
| 4,589 | Python | 35.72 | 117 | 0.70146 |
KazWong/omniverse_sample/ov_sample/python_samples/simple/asset_usd_converter.py | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import argparse
import asyncio
import omni
import os
from omni.isaac.python_app import OmniKitHelper
async def convert(in_file, out_file, load_materials=False):
# This import causes conflicts when global
import omni.kit.asset_converter
def progress_callback(progress, total_steps):
pass
converter_context = omni.kit.asset_converter.AssetConverterContext()
# setup converter and flags
converter_context.ignore_materials = not load_materials
# converter_context.ignore_animation = False
# converter_context.ignore_cameras = True
# converter_context.single_mesh = True
# converter_context.smooth_normals = True
# converter_context.preview_surface = False
# converter_context.support_point_instancer = False
# converter_context.embed_mdl_in_usd = False
# converter_context.use_meter_as_world_unit = True
# converter_context.create_world_as_default_root_prim = False
instance = omni.kit.asset_converter.get_instance()
task = instance.create_converter_task(in_file, out_file, progress_callback, converter_context)
success = True
while True:
success = await task.wait_until_finished()
if not success:
await asyncio.sleep(0.1)
else:
break
return success
def asset_convert(args):
supported_file_formats = ["stl", "obj", "fbx"]
for folder in args.folders:
local_asset_output = folder + "_converted"
result = omni.client.create_folder(f"{local_asset_output}")
for folder in args.folders:
print(f"\nConverting folder {folder}...")
(result, models) = omni.client.list(folder)
for i, entry in enumerate(models):
if i >= args.max_models:
print(f"max models ({args.max_models}) reached, exiting conversion")
break
model = str(entry.relative_path)
model_name = os.path.splitext(model)[0]
model_format = (os.path.splitext(model)[1])[1:]
# Supported input file formats
if model_format in supported_file_formats:
input_model_path = folder + "/" + model
converted_model_path = folder + "_converted/" + model_name + "_" + model_format + ".usd"
if not os.path.exists(converted_model_path):
status = asyncio.get_event_loop().run_until_complete(
convert(input_model_path, converted_model_path, True)
)
if not status:
print(f"ERROR Status is {status}")
print(f"---Added {converted_model_path}")
if __name__ == "__main__":
CONFIG = {"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit'}
kit = OmniKitHelper(config=CONFIG)
parser = argparse.ArgumentParser("Convert OBJ/STL assets to USD")
parser.add_argument(
"--folders", type=str, nargs="+", default=None, help="List of folders to convert (space seperated)."
)
parser.add_argument(
"--max-models", type=int, default=50, help="If specified, convert up to `max-models` per folder."
)
parser.add_argument(
"--load-materials", action="store_true", help="If specified, materials will be loaded from meshes"
)
args = parser.parse_args()
if args.folders is None:
raise ValueError(f"No folders specified via --folders argument")
# Ensure Omniverse Kit is launched via OmniKitHelper before asset_convert() is called
asset_convert(args)
# cleanup
kit.shutdown()
| 3,993 | Python | 38.156862 | 108 | 0.65164 |
KazWong/omniverse_sample/ov_sample/python_samples/simple/franka_articulation.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
import carb
from omni.isaac.python_app import OmniKitHelper
CONFIG = {
"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit',
"renderer": "RayTracedLighting",
"headless": True,
}
if __name__ == "__main__":
# This sample loads an articulation and prints its information
kit = OmniKitHelper(config=CONFIG)
import omni
from omni.isaac.dynamic_control import _dynamic_control
from omni.isaac.utils.scripts.nucleus_utils import find_nucleus_server
stage = kit.get_stage()
result, nucleus_server = find_nucleus_server()
if result is False:
carb.log_error("Could not find nucleus server with /Isaac folder")
asset_path = nucleus_server + "/Isaac/Robots/Franka/franka_alt_fingers.usd"
omni.usd.get_context().open_stage(asset_path)
# start simulation
kit.play()
# perform timestep
kit.update(1.0 / 60.0)
dc = _dynamic_control.acquire_dynamic_control_interface()
# Get handle to articulation
art = dc.get_articulation("/panda")
if art == _dynamic_control.INVALID_HANDLE:
print("*** '%s' is not an articulation" % "/panda")
else:
# Print information about articulation
root = dc.get_articulation_root_body(art)
print(str("Got articulation handle %d \n" % art) + str("--- Hierarchy\n"))
body_states = dc.get_articulation_body_states(art, _dynamic_control.STATE_ALL)
print(str("--- Body states:\n") + str(body_states) + "\n")
dof_states = dc.get_articulation_dof_states(art, _dynamic_control.STATE_ALL)
print(str("--- DOF states:\n") + str(dof_states) + "\n")
dof_props = dc.get_articulation_dof_properties(art)
print(str("--- DOF properties:\n") + str(dof_props) + "\n")
# Simulate robot coming to a rest configuration
for i in range(100):
kit.update(1.0 / 60.0)
# Simulate robot for a fixed number of frames and specify a joint position target
for i in range(100):
dof_ptr = dc.find_articulation_dof(art, "panda_joint2")
# This should be called each frame of simulation if state on the articulation is being changed.
dc.wake_up_articulation(art)
# Set joint position target
dc.set_dof_position_target(dof_ptr, -1.5)
kit.update(1.0 / 60.0)
kit.stop()
kit.shutdown()
| 2,778 | Python | 37.068493 | 103 | 0.674946 |
KazWong/omniverse_sample/ov_sample/python_samples/simple/control_your_robot.py | #https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/controlling_robot.html
import os
import omni
from omni.isaac.python_app import OmniKitHelper
omni.timeline.get_timeline_interface().play()
| 202 | Python | 24.374997 | 83 | 0.811881 |
KazWong/omniverse_sample/ov_sample/python_samples/simple/load_stage.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
from omni.isaac.python_app import OmniKitHelper
import carb
import omni
# This sample loads a usd stage and starts simulation
CONFIG = {
"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit',
"width": 1280,
"height": 720,
"sync_loads": True,
"headless": False,
"renderer": "RayTracedLighting",
}
if __name__ == "__main__":
import argparse
# Set up command line arguments
parser = argparse.ArgumentParser("Usd Load sample")
parser.add_argument("--usd_path", type=str, help="Path to usd file", required=True)
parser.add_argument("--headless", default=False, action="store_true", help="Run stage headless")
parser.add_argument("--test", default=False, action="store_true", help="Run in test mode")
args, unknown = parser.parse_known_args()
# Start the omniverse application
CONFIG["headless"] = args.headless
kit = OmniKitHelper(config=CONFIG)
# Locate /Isaac folder on nucleus server to load sample
from omni.isaac.utils.scripts.nucleus_utils import find_nucleus_server
result, nucleus_server = find_nucleus_server()
if result is False:
carb.log_error("Could not find nucleus server with /Isaac folder, exiting")
exit()
asset_path = nucleus_server + "/Isaac"
usd_path = asset_path + args.usd_path
omni.usd.get_context().open_stage(usd_path, None)
# Wait two frames so that stage starts loading
kit.app.update()
kit.app.update()
print("Loading stage...")
while kit.is_loading():
kit.update(1.0 / 60.0)
print("Loading Complete")
kit.play()
# Run in test mode, exit after a fixed number of steps
if args.test is True:
for i in range(10):
# Run in realtime mode, we don't specify the step size
kit.update()
else:
while kit.app.is_running():
# Run in realtime mode, we don't specify the step size
kit.update()
kit.stop()
kit.shutdown()
| 2,421 | Python | 34.101449 | 100 | 0.678232 |
KazWong/omniverse_sample/ov_sample/python_samples/simple/livestream.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
from omni.isaac.python_app import OmniKitHelper
import omni
# This sample enables a livestream server to connect to when running headless
CONFIG = {
"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit',
"width": 1280,
"height": 720,
"window_width": 1920,
"window_height": 1080,
"headless": True,
"renderer": "RayTracedLighting",
"display_options": 3807, # Set display options to show default grid
}
if __name__ == "__main__":
# Start the omniverse application
kit = OmniKitHelper(config=CONFIG)
# Enable Livestream extension
ext_manager = omni.kit.app.get_app().get_extension_manager()
kit.set_setting("/app/window/drawMouse", True)
kit.set_setting("/app/livestream/proto", "ws")
ext_manager.set_extension_enabled_immediate("omni.kit.livestream.core", True)
ext_manager.set_extension_enabled_immediate("omni.kit.livestream.native", True)
# Run until closed
while kit.app.is_running():
# Run in realtime mode, we don't specify the step size
kit.update()
kit.stop()
kit.shutdown()
| 1,538 | Python | 34.790697 | 83 | 0.714564 |
KazWong/omniverse_sample/ov_sample/python_samples/simple/change_resolution.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
from omni.isaac.python_app import OmniKitHelper
import random
CONFIG = {
"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit',
"renderer": "RayTracedLighting",
"headless": True,
}
if __name__ == "__main__":
# Simple example showing how to change resolution
kit = OmniKitHelper(config=CONFIG)
kit.update(1.0 / 60.0)
for i in range(100):
width = random.randint(128, 1980)
height = random.randint(128, 1980)
kit.set_setting("/app/renderer/resolution/width", width)
kit.set_setting("/app/renderer/resolution/height", height)
kit.update(1.0 / 60.0)
print(f"resolution set to: {width}, {height}")
# cleanup
kit.shutdown()
| 1,160 | Python | 34.181817 | 76 | 0.701724 |
KazWong/omniverse_sample/ov_sample/python_samples/simple/urdf_import.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
from omni.isaac.python_app import OmniKitHelper
CONFIG = {
"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit',
"renderer": "RayTracedLighting",
"headless": True,
}
if __name__ == "__main__":
# URDF import, configuration and simualtion sample
kit = OmniKitHelper(config=CONFIG)
import omni.kit.commands
from pxr import Sdf, Gf, UsdPhysics, UsdLux, PhysxSchema
# Setting up import configuration:
status, import_config = omni.kit.commands.execute("URDFCreateImportConfig")
import_config.merge_fixed_joints = False
import_config.convex_decomp = False
import_config.import_inertia_tensor = True
import_config.fix_base = False
# Get path to extension data:
ext_manager = omni.kit.app.get_app().get_extension_manager()
ext_id = ext_manager.get_enabled_extension_id("omni.isaac.urdf")
extension_path = ext_manager.get_extension_path(ext_id)
# Import URDF
omni.kit.commands.execute(
"URDFParseAndImportFile",
urdf_path=extension_path + "/data/urdf/robots/carter/urdf/carter.urdf",
import_config=import_config,
)
# Get stage handle
stage = omni.usd.get_context().get_stage()
# Enable physics
scene = UsdPhysics.Scene.Define(stage, Sdf.Path("/physicsScene"))
# Set gravity
scene.CreateGravityDirectionAttr().Set(Gf.Vec3f(0.0, 0.0, -1.0))
scene.CreateGravityMagnitudeAttr().Set(981.0)
# Set solver settings
PhysxSchema.PhysxSceneAPI.Apply(stage.GetPrimAtPath("/physicsScene"))
physxSceneAPI = PhysxSchema.PhysxSceneAPI.Get(stage, "/physicsScene")
physxSceneAPI.CreateEnableCCDAttr(True)
physxSceneAPI.CreateEnableStabilizationAttr(True)
physxSceneAPI.CreateEnableGPUDynamicsAttr(False)
physxSceneAPI.CreateBroadphaseTypeAttr("MBP")
physxSceneAPI.CreateSolverTypeAttr("TGS")
# Add ground plane
omni.kit.commands.execute(
"AddGroundPlaneCommand",
stage=stage,
planePath="/groundPlane",
axis="Z",
size=1500.0,
position=Gf.Vec3f(0, 0, -50),
color=Gf.Vec3f(0.5),
)
# Add lighting
distantLight = UsdLux.DistantLight.Define(stage, Sdf.Path("/DistantLight"))
distantLight.CreateIntensityAttr(500)
# Get handle to the Drive API for both wheels
left_wheel_drive = UsdPhysics.DriveAPI.Get(stage.GetPrimAtPath("/carter/chassis_link/left_wheel"), "angular")
right_wheel_drive = UsdPhysics.DriveAPI.Get(stage.GetPrimAtPath("/carter/chassis_link/right_wheel"), "angular")
# Set the velocity drive target in degrees/second
left_wheel_drive.GetTargetVelocityAttr().Set(150)
right_wheel_drive.GetTargetVelocityAttr().Set(150)
# Set the drive damping, which controls the strength of the velocity drive
left_wheel_drive.GetDampingAttr().Set(15000)
right_wheel_drive.GetDampingAttr().Set(15000)
# Set the drive stiffness, which controls the strength of the position drive
# In this case because we want to do velocity control this should be set to zero
left_wheel_drive.GetStiffnessAttr().Set(0)
right_wheel_drive.GetStiffnessAttr().Set(0)
# Start simulation
kit.play()
# perform simulation
for frame in range(100):
kit.update(1.0 / 60.0)
# Shutdown and exit
kit.stop()
kit.shutdown()
| 3,757 | Python | 36.58 | 115 | 0.713335 |
KazWong/omniverse_sample/ov_sample/python_samples/simple/time_stepping.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
import carb
from omni.isaac.python_app import OmniKitHelper
CONFIG = {
"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit',
"renderer": "RayTracedLighting",
"headless": True,
}
if __name__ == "__main__":
# Example usage, with step size test
kit = OmniKitHelper(config=CONFIG)
import omni.physx
from pxr import UsdPhysics, Sdf
UsdPhysics.Scene.Define(kit.get_stage(), Sdf.Path("/World/physicsScene"))
# Create callbacks to both editor and physics step callbacks
def editor_update(e: carb.events.IEvent):
dt = e.payload["dt"]
print("kit update step:", dt, "seconds")
def physics_update(dt: float):
print("physics update step:", dt, "seconds")
# start simulation
kit.play()
# assign callbacks
update_sub = omni.kit.app.get_app().get_update_event_stream().create_subscription_to_pop(editor_update)
physics_sub = omni.physx.acquire_physx_interface().subscribe_physics_step_events(physics_update)
# perform step experiments
print(f"Rendering and Physics with {1} second step size:")
kit.update(1.0)
print(f"Rendering and Physics with {1/60} seconds step:")
kit.update(1.0 / 60.0)
print(f"Rendering {1/30} seconds step size and Physics {1/120} seconds step size:")
kit.update(1.0 / 30.0, 1.0 / 120.0, 4)
# cleanup
update_sub = None
physics_sub = None
kit.stop()
kit.shutdown()
| 1,873 | Python | 33.072727 | 107 | 0.695675 |
KazWong/omniverse_sample/ov_sample/python_samples/jetracer/gtc2020_track_utils.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import numpy as np
from PIL import Image
# TODO : This is custom, specific to the GTC2020 Jetracer course.
# Make a more general solution.
def line_seg_closest_point(v0, v1, p0):
# Project p0 onto (v0, v1) line, then clamp to line segment
d = v1 - v0
q = p0 - v0
t = np.dot(q, d) / np.dot(d, d)
t = np.clip(t, 0, 1)
return v0 + t * d
def line_seg_distance(v0, v1, p0):
p = line_seg_closest_point(v0, v1, p0)
return np.linalg.norm(p0 - p)
# Canonical arc is centered at origin, and goes from 0 to a0 radians
def canonical_arc_distance(R, a0, x):
a = np.arctan2(x[1], x[0])
if a < 0:
a = a + 2 * np.pi
if a > a0:
if a < a0 / 2 + np.pi:
a = a0
else:
a = 0
p = R * np.array([np.cos(a), np.sin(a)])
return np.linalg.norm(x - p)
def arc_distance(c, r, a0, a1, x):
# Point relative to arc origin
x0 = x - c
# Rotate point to canonical angle (where arc starts at 0)
c = np.cos(-a0)
s = np.sin(-a0)
R = np.array([[c, -s], [s, c]])
x0 = np.dot(R, x0)
return canonical_arc_distance(r, a1 - a0, x0)
def closest_point_arc(c, r, a0, a1, x):
# Direction to point
x0 = x - c
x0 = x0 / np.linalg.norm(x0)
# print(c, x0, r, c + x0 * r)
return c + x0 * r
# The forward direction at the closest point on an arc
def closest_point_arc_direction(c, r, a0, a1, x):
# Direction to point
x0 = x - c
x0 = x0 / np.linalg.norm(x0)
# The tangent is unit circle point rotated pi/2 radians
return np.array([-x0[1], x0[0]])
def arc_endpoints(c, r, a0, a1):
c0 = np.cos(a0)
s0 = np.sin(a0)
c1 = np.cos(a1)
s1 = np.sin(a1)
return c + r * np.array([[c0, s0], [c1, s1]])
# Measurements (in meters)
m0 = 7.620
m1 = 10.668
m2 = 5.491
m3 = 3.048
m4 = 4.348
m5 = 5.380
# Track width
w = 1.22
w_2 = w / 2
# Arc arrays
arc_center = np.zeros((4, 2))
arc_radius = np.zeros(4)
arc_angles = np.zeros((4, 2))
# Arcs
# Bottom left
arc_center[0] = np.array([w, w])
arc_radius[0] = w_2
arc_angles[0] = [np.pi, np.pi * 1.5]
# Top left
arc_center[1] = np.array([m3, m0])
arc_radius[1] = m3 - w_2
arc_angles[1] = [1.75 * np.pi, 3 * np.pi]
ep1 = arc_endpoints(arc_center[1], arc_radius[1], arc_angles[1][0], arc_angles[1][1])
# Others
arc_center[2] = np.array([m5, m4])
arc_radius[2] = 0.5 * (2.134 + 0.914)
arc_angles[2] = [0.75 * np.pi, 1.25 * np.pi]
ep2 = arc_endpoints(arc_center[2], arc_radius[2], arc_angles[2][0], arc_angles[2][1])
arc_center[3] = np.array([m2, w])
arc_radius[3] = w_2
arc_angles[3] = [np.pi * 1.5, np.pi * 2.25]
ep3 = arc_endpoints(arc_center[3], arc_radius[3], arc_angles[3][0], arc_angles[3][1])
# line segment points
line_verts = [
np.array([w_2, w]),
np.array([w_2, m0]),
ep1[0],
ep2[0],
ep2[1],
ep3[1],
np.array([m2, w_2]),
np.array([w, w_2]),
]
def random_track_point():
# TODO : Refactor these dimensions, which show up in multiple places
p = np.random.random(2) * [6.711, 10.668]
result = track_segment_closest_point(p)
return result * 100 # convert to cm. TODO standardize all entry points to cm
# Minimum distances to all segments of the track
def track_segment_distance(p):
d = np.zeros(8)
d[0] = line_seg_distance(line_verts[0], line_verts[1], p)
d[1] = line_seg_distance(line_verts[2], line_verts[3], p)
d[2] = line_seg_distance(line_verts[4], line_verts[5], p)
d[3] = line_seg_distance(line_verts[6], line_verts[7], p)
d[4] = arc_distance(arc_center[0], arc_radius[0], arc_angles[0][0], arc_angles[0][1], p)
d[5] = arc_distance(arc_center[1], arc_radius[1], arc_angles[1][0], arc_angles[1][1], p)
d[6] = arc_distance(arc_center[2], arc_radius[2], arc_angles[2][0], arc_angles[2][1], p)
d[7] = arc_distance(arc_center[3], arc_radius[3], arc_angles[3][0], arc_angles[3][1], p)
return d
def track_segment_closest_point(p):
d = track_segment_distance(p)
# If a line segment is the closest
if np.min(d[:4]) < np.min(d[4:]):
idx = np.argmin(d[:4], axis=0)
return line_seg_closest_point(line_verts[idx * 2], line_verts[idx * 2 + 1], p)
# If an arc is the closest
else:
idx = np.argmin(d[4:], axis=0)
return closest_point_arc(arc_center[idx], arc_radius[idx], arc_angles[idx][0], arc_angles[idx][1], p)
# Distance to closest point on the track
def center_line_dist(p):
p = 0.01 * p # Convert from m to cm
return np.min(track_segment_distance(p))
# Forward vector at the closest point on the center line
def closest_point_track_direction(p):
p = 0.01 * p # Convert from m to cm
d = track_segment_distance(p)
# If a line segment is the closest
if np.min(d[:4]) < np.min(d[4:]):
idx = np.argmin(d[:4], axis=0)
v = line_verts[idx * 2 + 1] - line_verts[idx * 2]
return v / np.linalg.norm(v)
# If an arc is the closest
else:
idx = np.argmin(d[4:], axis=0)
v = closest_point_arc_direction(arc_center[idx], arc_radius[idx], arc_angles[idx][0], arc_angles[idx][1], p)
# TODO : All arcs are defined counter-clockwise,
# but this doesn't always represent the forward direction on the track.
# This is a hack to correct the tangent vector on all but one of the arcs.
if idx != 2:
v = -v
return v
LANE_WIDTH = 0.7 # width of whole track is w = 1.22. To get out of bound is > 1.22/2, so around 0.7
TRACK_DIMS = [671, 1066] # the track is within (0, 0) to (671.1 cm, 1066.8 cm)
def is_racing_forward(prev_pose, curr_pose):
prev_pose = 0.01 * prev_pose
curr_pose = 0.01 * curr_pose
bottom_left_corner = np.array([0, 0])
top_left_corner = np.array([0, 10.668])
top_right_corner = np.array([6.711, 10.668])
bottom_right_corner = np.array([6.711, 0])
d0 = line_seg_distance(bottom_left_corner, top_left_corner, curr_pose)
d1 = line_seg_distance(top_left_corner, top_right_corner, curr_pose)
d2 = line_seg_distance(top_right_corner, bottom_right_corner, curr_pose)
d3 = line_seg_distance(bottom_right_corner, bottom_left_corner, curr_pose)
min_d = np.min([d0, d1, d2, d3])
which_side = np.array([0, 0])
if min_d == d0:
which_side = top_left_corner - bottom_left_corner
elif min_d == d1:
which_side = top_right_corner - top_left_corner
elif min_d == d2:
which_side = bottom_right_corner - top_right_corner
elif min_d == d3:
which_side = bottom_left_corner - bottom_right_corner
which_size_unit = which_side / np.linalg.norm(which_side)
curr_vel = curr_pose - prev_pose
curr_vel_norm = np.linalg.norm(curr_vel)
curr_vel_unit = np.array([0, 0])
# checking divide by zero
if curr_vel_norm:
curr_vel_unit = curr_vel / curr_vel_norm
return np.dot(curr_vel_unit, which_size_unit)
def is_outside_track_boundary(curr_pose):
dist = center_line_dist(curr_pose)
return dist < LANE_WIDTH
if __name__ == "__main__":
print("Generating test PNGs")
# scale
s = 0.02
H = int(10668 * s)
W = int(6711 * s)
d = np.zeros((H, W))
fwd = np.zeros((H, W, 3))
h = np.zeros((H, W))
print(H, W)
for _ in range(10000):
p_scaled = np.random.random(2) * [W, H]
p_meters = p_scaled / s / 1000.0
# p_proj = line_seg_closest_point(line_verts[6], line_verts[7], p_meters)
p_proj = track_segment_closest_point(p_meters)
# print(h.shape, p_scaled, p_meters, p_proj, p_proj * s)
p_proj = p_proj + np.random.normal([0, 0], 0.1)
idx = p_proj * s * 1000.0
idx = np.floor(idx)
idx = np.clip(idx, [0, 0], [W - 1, H - 1]) # HACK
idx = idx.astype("int")
h[idx[1], idx[0]] = h[idx[1], idx[0]] + 1
for i in range(H):
y = ((i + 0.5) / s) / 10.0
if i % 10 == 0:
print("{:0.1f}%".format(i / H * 100))
for j in range(W):
x = ((j + 0.5) / s) / 10.0
p = np.array([x, y])
d[i, j] = center_line_dist(p)
f = closest_point_track_direction(p)
fwd[i, j] = np.array([0.5 * (f[0] + 1), 0.5 * (f[1] + 1), 0])
print("100.0%")
# Images have zero at the top, so we flip vertically
d = np.flipud(d)
fwd = np.flip(fwd, axis=0)
h = np.flipud(h)
# Distance function
im = Image.fromarray((d * 255 / np.max(d)).astype("uint8"))
im.save("dist.png")
# Track forward vector
im = Image.fromarray((fwd * 255).astype("uint8"), "RGB")
im.save("fwd.png")
# Track forward vector X
im = Image.fromarray((fwd[:, :, 0] * 255).astype("uint8"))
im.save("fwd_x.png")
# Track forward vector Y
im = Image.fromarray((fwd[:, :, 1] * 255).astype("uint8"))
im.save("fwd_y.png")
# H
h = h / np.max(h)
im = Image.fromarray((h * 255).astype("uint8"))
im.save("h.png")
| 9,404 | Python | 25.643059 | 116 | 0.581986 |
KazWong/omniverse_sample/ov_sample/python_samples/jetracer/jetracer_env.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import torch
from torchvision.transforms import ColorJitter
import PIL
import numpy as np
import carb
import omni
import omni.kit.app
from pxr import UsdGeom, Gf, Sdf, Usd, Semantics
import os
import json
import time
import atexit
import asyncio
import numpy as np
import random
import matplotlib.pyplot as plt
from omni.isaac.synthetic_utils import visualization as vis
from omni.isaac.python_app import OmniKitHelper
from omni.isaac.synthetic_utils import SyntheticDataHelper
from jetracer import Jetracer
from track_environment import Environment
from gtc2020_track_utils import *
import gym
from gym import spaces
class JetracerEnv:
metadata = {"render.modes": ["human"]}
# TODO : Extract more training options
def __init__(
self,
omni_kit,
z_height=0,
max_resets=10,
updates_per_step=3,
steps_per_rollout=500,
mirror_mode=False,
backwards_term_mode=0,
reward_mode=0,
):
self.MIRROR_MODE = mirror_mode
self.BACKWARDS_TERMINATION_MODE = backwards_term_mode
self.REWARD_MODE = reward_mode
print("MIRROR_MODE = {}".format(self.MIRROR_MODE))
print("BACKWARDS_TERMINATION_MODE = {}".format(self.BACKWARDS_TERMINATION_MODE))
print("REWARD_MODE = {}".format(self.REWARD_MODE))
self.action_space = spaces.Box(low=-1.0, high=1.0, shape=(2,), dtype=np.float32)
self.observation_space = spaces.Box(low=0, high=255, shape=(224, 224, 6), dtype=np.uint8)
self.color_jitter = ColorJitter(0.1, 0.05, 0.05, 0.05)
self.noise = 0.05
self.dt = 1 / 30.0
self.omniverse_kit = omni_kit
self.sd_helper = SyntheticDataHelper()
self.roads = Environment(self.omniverse_kit)
# make environment z up
self.omniverse_kit.set_up_axis(UsdGeom.Tokens.z)
# generate roads
self.shape = [6, 6]
self.roads.generate_road(self.shape)
self.roads.generate_lights()
# randomize once to initialize stage
# the following two lines must be called prior to Jetracer initialization
# any DR related setup calls should occur before this point
self.omniverse_kit.update(1 / 60.0)
self.roads.dr.randomize_once()
# spawn robot
self.jetracer = Jetracer(self.omniverse_kit)
self.initial_loc = self.roads.get_valid_location()
self.jetracer.spawn(Gf.Vec3d(self.initial_loc[0], self.initial_loc[1], 5), 0)
self.prev_pose = [0, 0, 0]
self.current_pose = [0, 0, 0]
# switch kit camera to jetracer camera
self.jetracer.activate_camera()
# start simulation
self.omniverse_kit.play()
# Step simulation so that objects fall to rest
# wait until all materials are loaded
frame = 0
print("simulating physics...")
while frame < 60 or self.omniverse_kit.is_loading():
self.omniverse_kit.update(self.dt)
frame = frame + 1
print("done after frame: ", frame)
self.initialized = False
self.numsteps = 0
self.numresets = 0
self.maxresets = 10
# set this to 1 after around 200k steps to randomnize less
# self.maxresets = 1
# Randomly mirror horizontally
self.update_mirror_mode()
def update_mirror_mode(self):
# Mirror if mode is enabled and we randomly sample True
self.mirror_mode = self.MIRROR_MODE & random.choice([False, True])
def calculate_reward(self):
# Current and last positions
pose = np.array([self.current_pose[0], self.current_pose[1]])
prev_pose = np.array([self.prev_pose[0], self.prev_pose[1]])
# Finite difference velocity calculation
vel = pose - prev_pose
vel_norm = vel
vel_magnitude = np.linalg.norm(vel)
if vel_magnitude > 0.0:
vel_norm = vel / vel_magnitude
# Distance from the center of the track
dist = center_line_dist(pose)
self.dist = dist
# racing_forward = is_racing_forward(prev_pose, pose)
# reward = racing_forward * self.current_speed * np.exp(-dist ** 2 / 0.05 ** 2)
fwd_dir = closest_point_track_direction(pose)
fwd_dot = np.dot(fwd_dir, vel_norm)
if self.REWARD_MODE == 0:
reward = fwd_dot * self.current_speed * np.exp(-dist ** 2 / 0.05 ** 2)
elif self.REWARD_MODE == 1:
reward = fwd_dot * self.current_speed
return reward
def is_dead(self):
return not is_outside_track_boundary(np.array([self.current_pose[0], self.current_pose[1]]))
def transform_action(self, action):
# If mirrored, swap steering direction
if self.mirror_mode:
action[1] = -action[1]
return action
def transform_state_image(self, im):
# If enabled, mirror image horizontally
if self.mirror_mode:
return np.flip(im, axis=1)
return im
def reset(self):
# Randomly mirror horizontally
self.update_mirror_mode()
if self.numresets % self.maxresets == 0:
self.roads.reset(self.shape)
if not self.initialized:
state, reward, done, info, = self.step([0, 0])
self.initialized = True
# Random track point in cm, with a 10 cm stddev gaussian offset
loc = random_track_point()
loc = loc + np.random.normal([0.0, 0.0], 10.0)
# Forward direction at that point
fwd = closest_point_track_direction(loc)
# Forward angle in degrees, with a 10 degree stddev gaussian offset
rot = np.arctan2(fwd[1], fwd[0])
rot = rot * 180.0 / np.pi
rot = rot + np.random.normal(10.0)
self.jetracer.teleport(Gf.Vec3d(loc[0], loc[1], 5), rot, settle=True)
obs = self.jetracer.observations()
self.current_pose = obs["pose"]
self.current_speed = np.linalg.norm(np.array(obs["linear_velocity"]))
self.current_forward_velocity = obs["local_linear_velocity"][0]
if self.numresets % self.maxresets == 0:
frame = 0
while self.omniverse_kit.is_loading(): # or frame < 750:
self.omniverse_kit.update(self.dt)
frame += 1
viewport = omni.kit.viewport.get_default_viewport_window()
gt = self.sd_helper.get_groundtruth(["rgb"], viewport)
currentState = gt["rgb"][:, :, :3]
currentState = self.transform_state_image(currentState)
img = np.concatenate((currentState, currentState), axis=2)
img = np.clip((255 * self.noise * np.random.randn(224, 224, 6) + img.astype(np.float)), 0, 255).astype(np.uint8)
self.numsteps = 0
self.previousState = currentState
self.numresets += 1
return img
def is_driving_backwards(self):
# TODO : Refactor, the bulk of this code is shared with the reward function.
# Also, find out at what point in an iteration this is called,
# compared to the reward, physics and stuff.
# If off by a timestep it's close enough, probably won't cause any issues.
# Current and last positions
pose = np.array([self.current_pose[0], self.current_pose[1]])
prev_pose = np.array([self.prev_pose[0], self.prev_pose[1]])
# Finite difference velocity calculation
vel = pose - prev_pose
vel_norm = vel
vel_magnitude = np.linalg.norm(vel)
if vel_magnitude > 0.0:
vel_norm = vel / vel_magnitude
# Forward direction on the track
fwd_dir = closest_point_track_direction(pose)
# Normalized velocity projected onto the forward direction
fwd_dot = np.dot(fwd_dir, vel_norm)
# Going backwards more than 3*pi/8 radians
return fwd_dot < np.cos(7.0 * np.pi / 8.0)
def step(self, action):
print("Number of steps ", self.numsteps)
# print("Action ", action)
transformed_action = self.transform_action(action)
self.jetracer.command(transformed_action)
frame = 0
total_reward = 0
reward = 0
while frame < 3:
self.omniverse_kit.update(self.dt)
obs = self.jetracer.observations()
self.prev_pose = self.current_pose
self.current_pose = obs["pose"]
self.current_speed = np.linalg.norm(np.array(obs["linear_velocity"]))
self.current_forward_velocity = obs["local_linear_velocity"][0]
reward = self.calculate_reward()
done = self.is_dead()
total_reward += reward
frame = frame + 1
viewport = omni.kit.viewport.get_default_viewport_window()
gt = self.sd_helper.get_groundtruth(["rgb"], viewport)
currentState = gt["rgb"][:, :, :3]
currentState = self.transform_state_image(currentState)
if not self.initialized:
self.previousState = currentState
img = np.concatenate((currentState, self.previousState), axis=2)
img = np.clip((255 * self.noise * np.random.randn(224, 224, 6) + img.astype(np.float)), 0, 255).astype(np.uint8)
self.previousState = currentState
other = np.array(
[*obs["pose"], *obs["linear_velocity"], *obs["local_linear_velocity"], *obs["angular_velocity"]]
)
other = np.expand_dims(other.astype(float), 0)
self.numsteps += 1
if done:
print("robot is dead")
if self.numsteps > 500:
done = True
print("robot stepped 500 times")
if self.dist > LANE_WIDTH:
print("robot out of bounds. dist = ", self.dist)
done = True
if self.BACKWARDS_TERMINATION_MODE == 0:
if self.current_forward_velocity <= -35:
print("robot was going backwards forward velocity = ", self.current_forward_velocity)
done = True
elif self.BACKWARDS_TERMINATION_MODE == 1:
if self.is_driving_backwards():
print("Robot was driving backwards")
done = True
return img, reward, done, {}
| 10,660 | Python | 32.420063 | 120 | 0.61257 |
KazWong/omniverse_sample/ov_sample/python_samples/jetracer/jetracer_train.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import torch
import os
import sys
import json
import signal
import argparse
from argparse import Namespace
from omni.isaac.python_app import OmniKitHelper
from jetracer_model import CustomCNN
from stable_baselines3 import PPO
from stable_baselines3.common.callbacks import BaseCallback, CheckpointCallback
def train(args):
CUSTOM_CONFIG = {
"width": 224,
"height": 224,
"renderer": "RayTracedLighting",
"headless": args.headless,
"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit',
}
omniverse_kit = OmniKitHelper(CUSTOM_CONFIG)
from jetracer_env import JetracerEnv
# we disable all anti aliasing in the render because we want to train on the raw camera image.
omniverse_kit.set_setting("/rtx/post/aa/op", 0)
env = JetracerEnv(
omniverse_kit,
mirror_mode=args.mirror_mode,
backwards_term_mode=args.backwards_termination_mode,
reward_mode=args.reward_mode,
max_resets=args.rand_freq,
)
checkpoint_callback = CheckpointCallback(save_freq=args.save_freq, save_path="./params/", name_prefix="rl_model")
net_arch = [512, 256, dict(pi=[128, 64, 32], vf=[128, 64, 32])]
policy_kwargs = {"net_arch": net_arch, "features_extractor_class": CustomCNN, "activation_fn": torch.nn.ReLU}
# create a new model
if args.loaded_checkpoint == "":
model = PPO(
"CnnPolicy",
env,
verbose=1,
tensorboard_log=args.tensorboard_dir,
policy_kwargs=policy_kwargs,
device="cuda",
n_steps=args.step_freq,
)
else:
model = PPO.load(args.loaded_checkpoint, env)
model.learn(
total_timesteps=args.total_steps,
callback=checkpoint_callback,
eval_env=env,
eval_freq=args.eval_freq,
eval_log_path=args.evaluation_dir,
reset_num_timesteps=args.reset_num_timesteps,
)
model.save(args.checkpoint_name)
def runEval(args):
CUSTOM_CONFIG = {
"width": 224,
"height": 224,
"renderer": "RayTracedLighting",
"headless": args.headless,
"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit',
}
# load a zip file to evaluate here. PPO also saves the best model so far in the eval_log folder.
# You can evaluate those zip files in the params folder as well (i.e params/rl_model_125999_steps.zip)
agent = PPO.load(args.evaluation_dir + "/best_model.zip", device="cuda")
omniverse_kit = OmniKitHelper(CUSTOM_CONFIG)
from jetracer_env import JetracerEnv
# we disable all anti aliasing in the render because we want to train on the raw camera image.
omniverse_kit.set_setting("/rtx/post/aa/op", 0)
env = JetracerEnv(
omniverse_kit,
mirror_mode=args.mirror_mode,
backwards_term_mode=args.backwards_termination_mode,
reward_mode=args.reward_mode,
max_resets=args.rand_freq,
)
obs = env.reset()
while True:
action = agent.predict(obs)
print(action)
obs, rew, done, infos = env.step(action[0])
if done:
obs = env.reset()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--loaded_checkpoint", help="path to checkpoint to be loaded", default="", nargs="?", type=str)
parser.add_argument("-E", "--eval", help="evaluate checkpoint", action="store_true")
parser.add_argument(
"-R", "--reset_num_timesteps", help="reset the current timestep number (used in logging)", action="store_true"
)
parser.add_argument(
"-M", "--mirror_mode", help="reflect images and actions horizontally during training", action="store_true"
)
parser.add_argument("-H", "--headless", help="run in headless mode (no GUI)", action="store_true")
parser.add_argument(
"--checkpoint_name", help="name of checkpoint file (no suffix)", default="checkpoint_25k", type=str
)
parser.add_argument("--tensorboard_dir", help="path to tensorboard log directory", default="tensorboard", type=str)
parser.add_argument("--evaluation_dir", help="path to evaluation log directory", default="eval_log", type=str)
parser.add_argument("--save_freq", help="number of steps before saving a checkpoint", default=1000, type=int)
parser.add_argument("--eval_freq", help="number of steps before running an evaluation", default=1000, type=int)
parser.add_argument("--step_freq", help="number of steps before executing a PPO update", default=1000, type=int)
parser.add_argument(
"--rand_freq", help="number of environment resets before domain randomization", default=10, type=int
)
parser.add_argument(
"--total_steps",
help="the total number of steps before exiting and saving a final checkpoint",
default=25000,
type=int,
)
parser.add_argument("--backwards_termination_mode", help="???", default=0, type=int)
parser.add_argument("--reward_mode", help="???", default=0, type=int)
parser.add_argument(
"--experimentFile", help="specify configuration via JSON. Overrides commandline", default="", type=str
)
args = parser.parse_args()
if args.experimentFile != "":
args_dict = vars(args)
if os.path.exists(args.experimentFile):
with open(args.experimentFile) as f:
json_args_dict = json.load(f)
args_dict.update(json_args_dict)
args = Namespace(**args_dict)
print("running with args: ", args)
def handle_exit(*args, **kwargs):
print("Exiting training...")
quit()
signal.signal(signal.SIGINT, handle_exit)
if args.eval:
runEval(args)
else:
train(args)
| 6,256 | Python | 31.931579 | 119 | 0.651854 |
KazWong/omniverse_sample/ov_sample/python_samples/jetracer/jetracer.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import carb
import omni
from pxr import UsdGeom, Gf
import numpy as np
class Jetracer:
def __init__(self, omni_kit):
from omni.isaac.dynamic_control import _dynamic_control
from omni.isaac.utils.scripts.nucleus_utils import find_nucleus_server
self.omni_kit = omni_kit
# Enable this after stage is loaded to prevent errors
ext_manager = self.omni_kit.app.get_extension_manager()
ext_manager.set_extension_enabled("omni.physx.vehicle", True)
result, nucleus_server = find_nucleus_server()
if result is False:
carb.log_error("Could not find nucleus server with /Isaac folder")
return
self.usd_path = nucleus_server + "/Isaac/Robots/Jetracer/jetracer.usd"
self.robot_prim = None
self._dynamic_control = _dynamic_control
self.dc = _dynamic_control.acquire_dynamic_control_interface()
self.ar = None
# rotation is in degrees
def spawn(self, location, rotation):
stage = self.omni_kit.get_stage()
prefix = "/World/Robot/Jetracer"
prim_path = omni.usd.get_stage_next_free_path(stage, prefix, False)
print(prim_path)
self.robot_prim = stage.DefinePrim(prim_path, "Xform")
self.robot_prim.GetReferences().AddReference(self.usd_path)
xform = UsdGeom.Xformable(self.robot_prim)
xform_op = xform.AddXformOp(UsdGeom.XformOp.TypeTransform, UsdGeom.XformOp.PrecisionDouble, "")
mat = Gf.Matrix4d().SetTranslate(location)
mat.SetRotateOnly(Gf.Rotation(Gf.Vec3d(0, 0, 1), rotation))
xform_op.Set(mat)
self.camera_path = prim_path + "/Jetracer/Vehicle/jetracer_camera"
# self.camera_path = prim_path + "Vehicle/jetracer_camera"
def teleport(self, location, rotation, settle=False):
if self.ar is None:
self.ar = self.dc.get_rigid_body(self.robot_prim.GetPath().pathString + "/Vehicle")
self.chassis = self.ar
self.dc.wake_up_rigid_body(self.ar)
rot_quat = Gf.Rotation(Gf.Vec3d(0, 0, 1), rotation).GetQuaternion()
tf = self._dynamic_control.Transform(
location,
(rot_quat.GetImaginary()[0], rot_quat.GetImaginary()[1], rot_quat.GetImaginary()[2], rot_quat.GetReal()),
)
self.dc.set_rigid_body_pose(self.chassis, tf)
self.dc.set_rigid_body_linear_velocity(self.chassis, [0, 0, 0])
self.dc.set_rigid_body_angular_velocity(self.chassis, [0, 0, 0])
self.command((0, 0))
if settle:
frame = 0
velocity = 1
print("Settling robot...")
while velocity > 0.1 and frame < 120:
self.omni_kit.update(1.0 / 60.0)
lin_vel = self.dc.get_rigid_body_linear_velocity(self.chassis)
velocity = np.linalg.norm([lin_vel.x, lin_vel.y, lin_vel.z])
# print("velocity magnitude is: ", velocity)
frame = frame + 1
# print("done after frame: HERE", frame)
def activate_camera(self):
vpi = omni.kit.viewport.get_viewport_interface()
vpi.get_viewport_window().set_active_camera(str(self.camera_path))
def command(self, motor_value):
if self.ar is None:
vehicle_path = self.robot_prim.GetPath().pathString + "/Jetracer/Vehicle"
print(vehicle_path)
self.ar = self.dc.get_rigid_body(vehicle_path)
self.chassis = self.ar
print(self.chassis)
stage = self.omni_kit.get_stage()
# for child_prim in stage.Traverse():
# print(child_prim.GetPath().pathString)
self.accelerator = stage.GetPrimAtPath(vehicle_path).GetAttribute("physxVehicleController:accelerator")
self.left_steer = stage.GetPrimAtPath(vehicle_path).GetAttribute("physxVehicleController:steerLeft")
self.right_steer = stage.GetPrimAtPath(vehicle_path).GetAttribute("physxVehicleController:steerRight")
self.target_gear = stage.GetPrimAtPath(vehicle_path).GetAttribute("physxVehicleController:targetGear")
# TODO add brake physxVehicleController:brake
self.dc.wake_up_rigid_body(self.ar)
accel_cmd = self.wheel_speed_from_motor_value(motor_value[0])
steer_left_cmd = self.wheel_speed_from_motor_value(motor_value[1])
acceleration = max(min(accel_cmd, 1), -1)
steering = max(min(steer_left_cmd, 1), -1)
gear = 1 # going forward
if acceleration < 0:
gear = -1 # reverse
self.accelerator.Set(abs(acceleration))
self.target_gear.Set(gear)
if steering > 0:
self.right_steer.Set(steering)
else:
self.left_steer.Set(abs(steering))
# idealized motor model that converts a pwm value to a velocity
def wheel_speed_from_motor_value(self, input):
threshold = 0.05
if input >= 0:
if input > threshold:
return 1.604 * input - 0.05
else:
return 0
elif input < 0:
if input < -threshold:
return 1.725 * input + 0.0757
else:
return 0
def observations(self):
if self.ar is None:
self.ar = self.dc.get_rigid_body(self.robot_prim.GetPath().pathString + "/Vehicle")
self.chassis = self.ar
dc_pose = self.dc.get_rigid_body_pose(self.chassis)
dc_lin_vel = self.dc.get_rigid_body_linear_velocity(self.chassis)
dc_local_lin_vel = self.dc.get_rigid_body_local_linear_velocity(self.chassis)
dc_ang_vel = self.dc.get_rigid_body_angular_velocity(self.chassis)
return {
"pose": (dc_pose.p.x, dc_pose.p.y, dc_pose.p.z, dc_pose.r.w, dc_pose.r.x, dc_pose.r.y, dc_pose.r.z),
"linear_velocity": (dc_lin_vel.x, dc_lin_vel.y, dc_lin_vel.z),
"local_linear_velocity": (dc_local_lin_vel.x, dc_local_lin_vel.y, dc_local_lin_vel.z),
"angular_velocity": (dc_ang_vel.x, dc_ang_vel.y, dc_ang_vel.z),
}
| 6,524 | Python | 42.21192 | 117 | 0.622777 |
KazWong/omniverse_sample/ov_sample/python_samples/jetracer/track_environment.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import carb
import omni
import random
from pxr import UsdGeom, Gf, Sdf, UsdPhysics
from omni.isaac.synthetic_utils import DomainRandomization
from gtc2020_track_utils import *
class Environment:
def __init__(self, omni_kit, z_height=0):
from omni.isaac.utils.scripts.nucleus_utils import find_nucleus_server
self.omni_kit = omni_kit
self.find_nucleus_server = find_nucleus_server
result, nucleus_server = self.find_nucleus_server()
if result is False:
carb.log_error(
"Could not find nucleus server with /Isaac folder. Please specify the correct nucleus server in apps/omni.isaac.sim.python.kit"
)
return
self.texture_list = [
nucleus_server + "/Isaac/Samples/DR/Materials/Textures/checkered.png",
nucleus_server + "/Isaac/Samples/DR/Materials/Textures/marble_tile.png",
nucleus_server + "/Isaac/Samples/DR/Materials/Textures/picture_a.png",
nucleus_server + "/Isaac/Samples/DR/Materials/Textures/picture_b.png",
nucleus_server + "/Isaac/Samples/DR/Materials/Textures/textured_wall.png",
nucleus_server + "/Isaac/Samples/DR/Materials/Textures/checkered_color.png",
]
self.prims = [] # list of spawned tiles
self.height = z_height # height of the ground tiles
self.state = None
# because the ground plane is what the robot drives on, we only do this once. We can then re-generate the road as often as we need without impacting physics
self.setup_physics()
contents = omni.client.list(nucleus_server + "/Isaac/Props/Sortbot_Housing/Materials/Textures/")[1]
for entry in contents:
self.texture_list.append(
nucleus_server + "/Isaac/Props/Sortbot_Housing/Materials/Textures/" + entry.relative_path
)
contents = omni.client.list(nucleus_server + "/Isaac/Props/YCB/Axis_Aligned/")[1]
names = []
loaded_paths = []
for entry in contents:
if not entry.flags & omni.client.ItemFlags.CAN_HAVE_CHILDREN:
names.append(nucleus_server + "/Isaac/Props/YCB/Axis_Aligned/" + entry.relative_path)
loaded_paths.append("/World/DR/mesh_component/mesh_" + entry.relative_path[0:-4])
print(loaded_paths)
self.omni_kit.create_prim("/World/Floor", "Xform")
stage = omni.usd.get_context().get_stage()
cubeGeom = UsdGeom.Cube.Define(stage, "/World/Floor/thefloor")
cubeGeom.CreateSizeAttr(300)
offset = Gf.Vec3f(75, 75, -150.1)
cubeGeom.AddTranslateOp().Set(offset)
prims = []
self.dr = DomainRandomization()
self.dr.toggle_manual_mode()
self.dr.create_mesh_comp(prim_paths=prims, mesh_list=names, mesh_range=[1, 1])
self.omni_kit.update(1 / 60.0)
print("waiting for materials to load...")
while self.omni_kit.is_loading():
self.omni_kit.update(1 / 60.0)
lights = []
for i in range(5):
prim_path = "/World/Lights/light_" + str(i)
self.omni_kit.create_prim(
prim_path,
"SphereLight",
translation=(0, 0, 200),
rotation=(0, 0, 0),
attributes={"radius": 10, "intensity": 1000.0, "color": (1.0, 1.0, 1.0)},
)
lights.append(prim_path)
self.dr.create_movement_comp(
prim_paths=loaded_paths, min_range=(0, 0, 15), max_range=(TRACK_DIMS[0], TRACK_DIMS[1], 15)
)
self.dr.create_rotation_comp(prim_paths=loaded_paths)
self.dr.create_visibility_comp(prim_paths=loaded_paths, num_visible_range=(15, 15))
self.dr.create_light_comp(light_paths=lights)
self.dr.create_movement_comp(
prim_paths=lights, min_range=(0, 0, 30), max_range=(TRACK_DIMS[0], TRACK_DIMS[1], 30)
)
self.dr.create_texture_comp(
prim_paths=["/World/Floor"], enable_project_uvw=True, texture_list=self.texture_list
)
def generate_lights(self):
# TODO: center this onto the track
prim_path = omni.usd.get_stage_next_free_path(self.omni_kit.get_stage(), "/World/Env/Light", False)
# self.prims.append(prim_path)
# LOCMOD revisit (don't add so it won't be removed on reset)
self.omni_kit.create_prim(
prim_path,
"RectLight",
translation=(75, 75, 100),
rotation=(0, 0, 0),
attributes={"height": 150, "width": 150, "intensity": 2000.0, "color": (1.0, 1.0, 1.0)},
)
def reset(self, shape):
# this deletes objects in self.prims
stage = omni.usd.get_context().get_stage()
for layer in stage.GetLayerStack():
edit = Sdf.BatchNamespaceEdit()
for path in self.prims:
prim_spec = layer.GetPrimAtPath(path)
if prim_spec is None:
continue
parent_spec = prim_spec.realNameParent
if parent_spec is not None:
edit.Add(path, Sdf.Path.emptyPath)
layer.Apply(edit)
self.prims = []
# self.pxrImageable.MakeInvisible()
# LOCMOD revisit
# self.generate_road(shape)
self.dr.randomize_once()
def generate_road(self, shape):
stage = self.omni_kit.get_stage()
self.add_track(stage)
def add_track(self, stage):
result, nucleus_server = self.find_nucleus_server()
if result is False:
carb.log_error("Could not find nucleus server with /Isaac folder")
return
path = nucleus_server + "/Isaac/Environments/Jetracer/jetracer_track_solid.usd"
prefix = "/World/Env/Track"
prim_path = omni.usd.get_stage_next_free_path(stage, prefix, False)
# self.prims.append(prim_path) #(don't add so the jetracer track won't be removed on reset)
track_prim = stage.DefinePrim(prim_path, "Xform")
track_prim.GetReferences().AddReference(path)
# xform = UsdGeom.Xformable(track_prim)
# xform_op = xform.AddXformOp(UsdGeom.XformOp.TypeTransform, UsdGeom.XformOp.PrecisionDouble, "")
# mat = Gf.Matrix4d().SetTranslate(location)
# mat.SetRotateOnly(Gf.Rotation(Gf.Vec3d(0, 0, 1), rotation))
# xform_op.Set(mat)
def setup_physics(self):
from pxr import PhysxSchema, PhysicsSchemaTools
stage = self.omni_kit.get_stage()
# Add physics scene
scene = UsdPhysics.Scene.Define(stage, Sdf.Path("/World/Env/PhysicsScene"))
# Set gravity vector
scene.CreateGravityDirectionAttr().Set(Gf.Vec3f(0.0, 0.0, -1.0))
scene.CreateGravityMagnitudeAttr().Set(981.0)
# Set physics scene to use cpu physics
PhysxSchema.PhysxSceneAPI.Apply(stage.GetPrimAtPath("/World/Env/PhysicsScene"))
physxSceneAPI = PhysxSchema.PhysxSceneAPI.Get(stage, "/World/Env/PhysicsScene")
physxSceneAPI.CreateEnableCCDAttr(True)
physxSceneAPI.CreateEnableStabilizationAttr(True)
physxSceneAPI.CreateEnableGPUDynamicsAttr(False)
physxSceneAPI.CreateBroadphaseTypeAttr("MBP")
physxSceneAPI.CreateSolverTypeAttr("TGS")
# Create physics plane for the ground
PhysicsSchemaTools.addGroundPlane(
stage, "/World/Env/GroundPlane", "Z", 100.0, Gf.Vec3f(0, 0, self.height), Gf.Vec3f(1.0)
)
# Hide the visual geometry
imageable = UsdGeom.Imageable(stage.GetPrimAtPath("/World/Env/GroundPlane/geom"))
if imageable:
imageable.MakeInvisible()
def get_valid_location(self):
# keep try until within the center track
dist = 1
x = 4
y = 4
while dist > LANE_WIDTH:
x = random.randint(0, TRACK_DIMS[0])
y = random.randint(0, TRACK_DIMS[1])
dist = center_line_dist(np.array([x, y]))
print("get valid location called", x, y)
return (x, y)
| 8,502 | Python | 41.515 | 164 | 0.620325 |
KazWong/omniverse_sample/ov_sample/python_samples/jetracer/jetracer_model.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Beta, Normal, Categorical
from stable_baselines3.common.torch_layers import BaseFeaturesExtractor
import gym
from gym import spaces
class CustomCNN(BaseFeaturesExtractor):
"""
:param observation_space: (gym.Space)
:param features_dim: (int) Number of features extracted.
This corresponds to the number of unit for the last layer.
"""
def __init__(self, observation_space: gym.spaces.Box, features_dim: int = 512):
super(CustomCNN, self).__init__(observation_space, features_dim)
# We assume CxHxW images (channels first)
# Re-ordering will be done by pre-preprocessing or wrapper
n_input_channels = observation_space.shape[0]
self.cnn = nn.Sequential(
nn.Conv2d(n_input_channels, 32, kernel_size=8, stride=4, padding=0),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=4, stride=2, padding=0),
nn.ReLU(),
nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=0),
nn.ReLU(),
nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=0),
nn.ReLU(),
nn.Flatten(),
)
# Compute shape by doing one forward pass
with torch.no_grad():
n_flatten = self.cnn(torch.as_tensor(observation_space.sample()[None]).float()).shape[1]
print("POST CONV FEATURES = ", n_flatten)
# define the hidden layer to translate to a fixed number of features
self.linear = nn.Sequential(nn.Linear(n_flatten, features_dim), nn.ReLU())
def forward(self, observations: torch.Tensor) -> torch.Tensor:
return self.linear(self.cnn(observations))
| 2,211 | Python | 37.807017 | 100 | 0.677069 |
KazWong/omniverse_sample/ov_sample/python_samples/syntheticdata/online_generation/segmentation/dataset.py | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Dataset with online randomized scene generation for Instance Segmentation training.
Use OmniKit to generate a simple scene. At each iteration, the scene is populated by
adding assets from the user-specified classes with randomized pose and colour.
The camera position is also randomized before capturing groundtruth consisting of
an RGB rendered image, Tight 2D Bounding Boxes and Instance Segmentation masks.
"""
import os
import glob
import torch
import random
import numpy as np
import signal
import omni
from omni.isaac.python_app import OmniKitHelper
# to work around torch's SSL issue
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
# Setup default generation variables
# Value are (min, max) ranges
RANDOM_TRANSLATION_X = (-30.0, 30.0)
RANDOM_TRANSLATION_Z = (-30.0, 30.0)
RANDOM_ROTATION_Y = (0.0, 360.0)
SCALE = 20
CAMERA_DISTANCE = 300
BBOX_AREA_THRESH = 16
# Default rendering parameters
RENDER_CONFIG = {
"renderer": "PathTracing",
"samples_per_pixel_per_frame": 12,
"headless": False,
"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit',
}
class RandomObjects(torch.utils.data.IterableDataset):
"""Dataset of random ShapeNet objects.
Objects are randomly chosen from selected categories and are positioned, rotated and coloured
randomly in an empty room. RGB, BoundingBox2DTight and Instance Segmentation are captured by moving a
camera aimed at the centre of the scene which is positioned at random at a fixed distance from the centre.
This dataset is intended for use with ShapeNet but will function with any dataset of USD models
structured as `root/category/**/*.usd. One note is that this is designed for assets without materials
attached. This is to avoid requiring to compile MDLs and load textures while training.
Args:
categories (tuple of str): Tuple or list of categories. For ShapeNet, these will be the synset IDs.
max_asset_size (int): Maximum asset file size that will be loaded. This prevents out of memory errors
due to loading large meshes.
num_assets_min (int): Minimum number of assets populated in the scene.
num_assets_max (int): Maximum number of assets populated in the scene.
split (float): Fraction of the USDs found to use for training.
train (bool): If true, use the first training split and generate infinite random scenes.
"""
def __init__(
self, root, categories, max_asset_size=None, num_assets_min=3, num_assets_max=5, split=0.7, train=True
):
assert len(categories) > 1
assert (split > 0) and (split <= 1.0)
self.kit = OmniKitHelper(config=RENDER_CONFIG)
from omni.isaac.synthetic_utils import SyntheticDataHelper
from omni.isaac.synthetic_utils import shapenet
self.sd_helper = SyntheticDataHelper()
self.stage = self.kit.get_stage()
# If ShapeNet categories are specified with their names, convert to synset ID
# Remove this if using with a different dataset than ShapeNet
category_ids = [shapenet.LABEL_TO_SYNSET.get(c, c) for c in categories]
self.categories = category_ids
self.range_num_assets = (num_assets_min, max(num_assets_min, num_assets_max))
self.references = self._find_usd_assets(root, category_ids, max_asset_size, split, train)
self._setup_world()
self.cur_idx = 0
self.exiting = False
signal.signal(signal.SIGINT, self._handle_exit)
def _handle_exit(self, *args, **kwargs):
print("exiting dataset generation...")
self.exiting = True
def _setup_world(self):
from pxr import UsdGeom
"""Setup lights, walls, floor, ceiling and camera"""
# In a practical setting, the room parameters should attempt to match those of the
# target domain. Here, we insteady choose for simplicity.
self.kit.create_prim(
"/World/Room", "Sphere", attributes={"radius": 1e3, "primvars:displayColor": [(1.0, 1.0, 1.0)]}
)
self.kit.create_prim(
"/World/Ground",
"Cylinder",
translation=(0.0, -0.5, 0.0),
rotation=(90.0, 0.0, 0.0),
attributes={"height": 1, "radius": 1e4, "primvars:displayColor": [(1.0, 1.0, 1.0)]},
)
self.kit.create_prim(
"/World/Light1",
"SphereLight",
translation=(-450, 350, 350),
attributes={"radius": 100, "intensity": 30000.0, "color": (0.0, 0.365, 0.848)},
)
self.kit.create_prim(
"/World/Light2",
"SphereLight",
translation=(450, 350, 350),
attributes={"radius": 100, "intensity": 30000.0, "color": (1.0, 0.278, 0.0)},
)
self.kit.create_prim("/World/Asset", "Xform")
self.camera_rig = UsdGeom.Xformable(self.kit.create_prim("/World/CameraRig", "Xform"))
self.camera = self.kit.create_prim("/World/CameraRig/Camera", "Camera", translation=(0.0, 0.0, CAMERA_DISTANCE))
vpi = omni.kit.viewport.get_viewport_interface()
vpi.get_viewport_window().set_active_camera(str(self.camera.GetPath()))
self.viewport = omni.kit.viewport.get_default_viewport_window()
self.kit.update()
def _find_usd_assets(self, root, categories, max_asset_size, split, train=True):
"""Look for USD files under root/category for each category specified.
For each category, generate a list of all USD files found and select
assets up to split * len(num_assets) if `train=True`, otherwise select the
remainder.
"""
references = {}
for category in categories:
all_assets = glob.glob(os.path.join(root, category, "**/*.usd"), recursive=True)
# Filter out large files (which can prevent OOM errors during training)
if max_asset_size is None:
assets_filtered = all_assets
else:
assets_filtered = []
for a in all_assets:
if os.stat(a).st_size > max_asset_size * 1e6:
print(f"{a} skipped as it exceeded the max size {max_asset_size} MB.")
else:
assets_filtered.append(a)
num_assets = len(assets_filtered)
if num_assets == 0:
raise ValueError(f"No USDs found for category {category} under max size {max_asset_size} MB.")
if train:
references[category] = assets_filtered[: int(num_assets * split)]
else:
references[category] = assets_filtered[int(num_assets * split) :]
return references
def _add_preview_surface(self, prim, diffuse, roughness, metallic):
from pxr import UsdShade, Sdf
"""Add a preview surface material using the metallic workflow."""
path = f"{prim.GetPath()}/mat"
material = UsdShade.Material.Define(self.stage, path)
pbrShader = UsdShade.Shader.Define(self.stage, f"{path}/shader")
pbrShader.CreateIdAttr("UsdPreviewSurface")
pbrShader.CreateInput("diffuseColor", Sdf.ValueTypeNames.Float3).Set(diffuse)
pbrShader.CreateInput("roughness", Sdf.ValueTypeNames.Float).Set(roughness)
pbrShader.CreateInput("metallic", Sdf.ValueTypeNames.Float).Set(metallic)
material.CreateSurfaceOutput().ConnectToSource(pbrShader, "surface")
UsdShade.MaterialBindingAPI(prim).Bind(material)
def load_single_asset(self, ref, semantic_label, suffix=""):
from pxr import UsdGeom
"""Load a USD asset with random pose.
args
ref (str): Path to the USD that this prim will reference.
semantic_label (str): Semantic label.
suffix (str): String to add to the end of the prim's path.
"""
x = random.uniform(*RANDOM_TRANSLATION_X)
z = random.uniform(*RANDOM_TRANSLATION_Z)
rot_y = random.uniform(*RANDOM_ROTATION_Y)
asset = self.kit.create_prim(
f"/World/Asset/mesh{suffix}",
"Xform",
scale=(SCALE, SCALE, SCALE),
rotation=(0.0, rot_y, 0.0),
ref=ref,
semantic_label=semantic_label,
)
bound = UsdGeom.Mesh(asset).ComputeWorldBound(0.0, "default")
box_min_y = bound.GetBox().GetMin()[1]
UsdGeom.XformCommonAPI(asset).SetTranslate((x, -box_min_y, z))
return asset
def populate_scene(self):
"""Clear the scene and populate it with assets."""
self.stage.RemovePrim("/World/Asset")
self.assets = []
num_assets = random.randint(*self.range_num_assets)
for i in range(num_assets):
category = random.choice(list(self.references.keys()))
ref = random.choice(self.references[category])
self.assets.append(self.load_single_asset(ref, category, i))
def randomize_asset_material(self):
"""Ranomize asset material properties"""
for asset in self.assets:
colour = (random.random(), random.random(), random.random())
# Here we choose not to have materials unrealistically rough or reflective.
roughness = random.uniform(0.1, 0.9)
# Here we choose to have more metallic than non-metallic objects.
metallic = random.choices([0.0, 1.0], weights=(0.8, 0.2))[0]
self._add_preview_surface(asset, colour, roughness, metallic)
def randomize_camera(self):
"""Randomize the camera position."""
# By simply rotating a camera "rig" instead repositioning the camera
# itself, we greatly simplify our job.
# Clear previous transforms
self.camera_rig.ClearXformOpOrder()
# Change azimuth angle
self.camera_rig.AddRotateYOp().Set(random.random() * 360)
# Change elevation angle
self.camera_rig.AddRotateXOp().Set(random.random() * -90)
def __iter__(self):
return self
def __next__(self):
# Generate a new scene
self.populate_scene()
self.randomize_camera()
self.randomize_asset_material()
# step once and then wait for materials to load
self.kit.update()
print("waiting for materials to load...")
while self.kit.is_loading():
self.kit.update()
print("done")
self.kit.update()
# Collect Groundtruth
gt = self.sd_helper.get_groundtruth(["rgb", "boundingBox2DTight", "instanceSegmentation"], self.viewport)
# RGB
# Drop alpha channel
image = gt["rgb"][..., :3]
# Cast to tensor if numpy array
if isinstance(gt["rgb"], np.ndarray):
image = torch.tensor(image, dtype=torch.float, device="cuda")
# Normalize between 0. and 1. and change order to channel-first.
image = image.float() / 255.0
image = image.permute(2, 0, 1)
# Bounding Box
gt_bbox = gt["boundingBox2DTight"]
# Create mapping from categories to index
mapping = {cat: i + 1 for i, cat in enumerate(self.categories)}
bboxes = torch.tensor(gt_bbox[["x_min", "y_min", "x_max", "y_max"]].tolist())
# For each bounding box, map semantic label to label index
labels = torch.LongTensor([mapping[bb["semanticLabel"]] for bb in gt_bbox])
# Calculate bounding box area for each area
areas = (bboxes[:, 2] - bboxes[:, 0]) * (bboxes[:, 3] - bboxes[:, 1])
# Idenfiy invalid bounding boxes to filter final output
valid_areas = (areas > 0.0) * (areas < (image.shape[1] * image.shape[2]))
# Instance Segmentation
instance_data, instance_mappings = gt["instanceSegmentation"][0], gt["instanceSegmentation"][1]
instance_list = [im[0] for im in gt_bbox]
masks = np.zeros((len(instance_list), *instance_data.shape), dtype=np.bool)
for i, instances in enumerate(instance_list):
masks[i] = np.isin(instance_data, instances)
if isinstance(masks, np.ndarray):
masks = torch.tensor(masks, device="cuda")
target = {
"boxes": bboxes[valid_areas],
"labels": labels[valid_areas],
"masks": masks[valid_areas],
"image_id": torch.LongTensor([self.cur_idx]),
"area": areas[valid_areas],
"iscrowd": torch.BoolTensor([False] * len(bboxes[valid_areas])), # Assume no crowds
}
self.cur_idx += 1
return image, target
if __name__ == "__main__":
"Typical usage"
import argparse
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser("Dataset test")
parser.add_argument("--categories", type=str, nargs="+", required=True, help="List of object classes to use")
parser.add_argument(
"--max-asset-size",
type=float,
default=10.0,
help="Maximum asset size to use in MB. Larger assets will be skipped.",
)
parser.add_argument(
"--root",
type=str,
default=None,
help="Root directory containing USDs. If not specified, use {SHAPENET_LOCAL_DIR}_nomat as root.",
)
args = parser.parse_args()
# If root is not specified use the environment variable SHAPENET_LOCAL_DIR with the _nomat suffix as root
if args.root is None:
args.root = f"{os.path.abspath(os.environ['SHAPENET_LOCAL_DIR'])}_nomat"
dataset = RandomObjects(args.root, args.categories, max_asset_size=args.max_asset_size)
from omni.isaac.synthetic_utils import visualization as vis
from omni.isaac.synthetic_utils import shapenet
categories = [shapenet.LABEL_TO_SYNSET.get(c, c) for c in args.categories]
# Iterate through dataset and visualize the output
plt.ion()
_, axes = plt.subplots(1, 2, figsize=(10, 5))
plt.tight_layout()
for image, target in dataset:
for ax in axes:
ax.clear()
ax.axis("off")
np_image = image.permute(1, 2, 0).cpu().numpy()
axes[0].imshow(np_image)
num_instances = len(target["boxes"])
colours = vis.random_colours(num_instances, enable_random=False)
overlay = np.zeros_like(np_image)
for mask, colour in zip(target["masks"].cpu().numpy(), colours):
overlay[mask, :3] = colour
axes[1].imshow(overlay)
mapping = {i + 1: cat for i, cat in enumerate(categories)}
labels = [shapenet.SYNSET_TO_LABEL[mapping[label.item()]] for label in target["labels"]]
vis.plot_boxes(ax, target["boxes"].tolist(), labels=labels, colours=colours)
plt.draw()
plt.savefig("dataset.png")
if dataset.exiting:
break
# cleanup
dataset.kit.shutdown()
| 15,242 | Python | 40.308943 | 120 | 0.63056 |
KazWong/omniverse_sample/ov_sample/python_samples/syntheticdata/online_generation/segmentation/train.py | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Instance Segmentation Training Demonstration
Use a PyTorch dataloader together with OmniKit to generate scenes and groundtruth to
train a [Mask-RCNN](https://arxiv.org/abs/1703.06870) model.
"""
import os
import torch
from torch.utils.data import DataLoader
import torchvision
import matplotlib.pyplot as plt
import numpy as np
import signal
from dataset import RandomObjects
def main(args):
device = "cuda"
# Setup data
train_set = RandomObjects(
args.root, args.categories, num_assets_min=3, num_assets_max=5, max_asset_size=args.max_asset_size
)
train_loader = DataLoader(train_set, batch_size=2, collate_fn=lambda x: tuple(zip(*x)))
def handle_exit(self, *args, **kwargs):
print("exiting dataset generation...")
train_set.exiting = True
signal.signal(signal.SIGINT, handle_exit)
from omni.isaac.synthetic_utils import visualization as vis
from omni.isaac.synthetic_utils import shapenet
# Setup Model
model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False, num_classes=1 + len(args.categories))
model = model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
if args.visualize:
plt.ion()
fig, axes = plt.subplots(1, 2, figsize=(14, 7))
for i, train_batch in enumerate(train_loader):
if i > args.max_iters or train_set.exiting:
print("Exiting ...")
train_set.kit.shutdown()
break
model.train()
images, targets = train_batch
images = [i.to(device) for i in images]
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
loss_dict = model(images, targets)
loss = sum(loss for loss in loss_dict.values())
print(f"ITER {i} | {loss:.6f}")
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % 10 == 0:
model.eval()
with torch.no_grad():
predictions = model(images[:1])
if args.visualize:
idx = 0
score_thresh = 0.5
mask_thresh = 0.5
pred = predictions[idx]
np_image = images[idx].permute(1, 2, 0).cpu().numpy()
for ax in axes:
fig.suptitle(f"Iteration {i:05}", fontsize=14)
ax.cla()
ax.axis("off")
ax.imshow(np_image)
axes[0].set_title("Input")
axes[1].set_title("Input + Predictions")
score_filter = [i for i in range(len(pred["scores"])) if pred["scores"][i] > score_thresh]
num_instances = len(score_filter)
colours = vis.random_colours(num_instances, enable_random=False)
overlay = np.zeros_like(np_image)
for mask, colour in zip(pred["masks"], colours):
overlay[mask.squeeze().cpu().numpy() > mask_thresh, :3] = colour
axes[1].imshow(overlay, alpha=0.5)
# If ShapeNet categories are specified with their names, convert to synset ID
# Remove this if using with a different dataset than ShapeNet
args.categories = [shapenet.LABEL_TO_SYNSET.get(c, c) for c in args.categories]
mapping = {i + 1: cat for i, cat in enumerate(args.categories)}
labels = [shapenet.SYNSET_TO_LABEL[mapping[label.item()]] for label in pred["labels"]]
vis.plot_boxes(axes[1], pred["boxes"], labels=labels, colours=colours)
plt.draw()
plt.savefig("train.png")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser("Dataset test")
parser.add_argument(
"--root",
type=str,
default=None,
help="Root directory containing ShapeNet USDs. If not specified, use {SHAPENET_LOCAL_DIR}_nomat as root.",
)
parser.add_argument(
"--categories", type=str, nargs="+", required=True, help="List of ShapeNet categories to use (space seperated)."
)
parser.add_argument(
"--max-asset-size",
type=float,
default=10.0,
help="Maximum asset size to use in MB. Larger assets will be skipped.",
)
parser.add_argument("-lr", "--learning_rate", type=float, default=1e-4, help="Learning rate")
parser.add_argument("--max-iters", type=float, default=1000, help="Number of training iterations.")
parser.add_argument("--visualize", action="store_true", help="Visualize predicted masks during training.")
args = parser.parse_args()
# If root is not specified use the environment variable SHAPENET_LOCAL_DIR with the _nomat suffix as root
if args.root is None:
args.root = f"{os.path.abspath(os.environ['SHAPENET_LOCAL_DIR'])}_nomat"
main(args)
| 5,320 | Python | 35.951389 | 120 | 0.617105 |
KazWong/omniverse_sample/ov_sample/python_samples/syntheticdata/basic/visualize_groundtruth.py | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Demonstration of using OmniKit to generate a scene, collect groundtruth and visualize
the results.
"""
import copy
import os
import omni
import random
import numpy as np
from omni.isaac.python_app import OmniKitHelper
import matplotlib.pyplot as plt
TRANSLATION_RANGE = 300.0
SCALE = 50.0
def main():
kit = OmniKitHelper(
{"renderer": "RayTracedLighting", "experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit'}
)
from pxr import UsdGeom, Semantics
from omni.isaac.synthetic_utils import SyntheticDataHelper
sd_helper = SyntheticDataHelper()
from omni.syntheticdata import visualize, helpers
# SCENE SETUP
# Get the current stage
stage = kit.get_stage()
# Add a distant light
stage.DefinePrim("/World/Light", "DistantLight")
# Create 10 randomly positioned and coloured spheres and cube
# We will assign each a semantic label based on their shape (sphere/cube)
for i in range(10):
prim_type = random.choice(["Cube", "Sphere"])
prim = stage.DefinePrim(f"/World/cube{i}", prim_type)
translation = np.random.rand(3) * TRANSLATION_RANGE
UsdGeom.XformCommonAPI(prim).SetTranslate(translation.tolist())
UsdGeom.XformCommonAPI(prim).SetScale((SCALE, SCALE, SCALE))
prim.GetAttribute("primvars:displayColor").Set([np.random.rand(3).tolist()])
# Add semantic label based on prim type
sem = Semantics.SemanticsAPI.Apply(prim, "Semantics")
sem.CreateSemanticTypeAttr()
sem.CreateSemanticDataAttr()
sem.GetSemanticTypeAttr().Set("class")
sem.GetSemanticDataAttr().Set(prim_type)
# Get groundtruth
kit.update()
viewport = omni.kit.viewport.get_default_viewport_window()
gt = sd_helper.get_groundtruth(
[
"rgb",
"depth",
"boundingBox2DTight",
"boundingBox2DLoose",
"instanceSegmentation",
"semanticSegmentation",
"boundingBox3D",
],
viewport,
)
# GROUNDTRUTH VISUALIZATION
# Setup a figure
_, axes = plt.subplots(2, 4, figsize=(20, 7))
axes = axes.flat
for ax in axes:
ax.axis("off")
# RGB
axes[0].set_title("RGB")
for ax in axes[:-1]:
ax.imshow(gt["rgb"])
# DEPTH
axes[1].set_title("Depth")
depth_data = np.clip(gt["depth"], 0, 255)
axes[1].imshow(visualize.colorize_depth(depth_data.squeeze()))
# BBOX2D TIGHT
axes[2].set_title("BBox 2D Tight")
rgb_data = copy.deepcopy(gt["rgb"])
axes[2].imshow(visualize.colorize_bboxes(gt["boundingBox2DTight"], rgb_data))
# BBOX2D LOOSE
axes[3].set_title("BBox 2D Loose")
rgb_data = copy.deepcopy(gt["rgb"])
axes[3].imshow(visualize.colorize_bboxes(gt["boundingBox2DLoose"], rgb_data))
# INSTANCE SEGMENTATION
axes[4].set_title("Instance Segmentation")
instance_seg = gt["instanceSegmentation"][0]
instance_rgb = visualize.colorize_segmentation(instance_seg)
axes[4].imshow(instance_rgb, alpha=0.7)
# SEMANTIC SEGMENTATION
axes[5].set_title("Semantic Segmentation")
semantic_seg = gt["semanticSegmentation"]
semantic_rgb = visualize.colorize_segmentation(semantic_seg)
axes[5].imshow(semantic_rgb, alpha=0.7)
# BBOX 3D
axes[6].set_title("BBox 3D")
bbox_3d_data = gt["boundingBox3D"]
bboxes_3d_corners = bbox_3d_data["corners"]
projected_corners = helpers.world_to_image(bboxes_3d_corners.reshape(-1, 3), viewport)
projected_corners = projected_corners.reshape(-1, 8, 3)
rgb_data = copy.deepcopy(gt["rgb"])
bboxes3D_rgb = visualize.colorize_bboxes_3d(projected_corners, rgb_data)
axes[6].imshow(bboxes3D_rgb)
# Save figure
print("saving figure to: ", os.getcwd() + "/visualize_groundtruth.png")
plt.savefig("visualize_groundtruth.png")
# cleanup
kit.shutdown()
if __name__ == "__main__":
main()
| 4,370 | Python | 31.139706 | 110 | 0.670252 |
KazWong/omniverse_sample/ov_sample/python_samples/syntheticdata/offline_generation/generator_stereo.py | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Generate offline synthetic dataset using two cameras
"""
import asyncio
import copy
import numpy as np
import os
import random
import torch
import signal
import carb
import omni
from omni.isaac.python_app import OmniKitHelper
# Default rendering parameters
RENDER_CONFIG = {
"renderer": "RayTracedLighting",
"samples_per_pixel_per_frame": 12,
"headless": False,
"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit',
}
class RandomScenario(torch.utils.data.IterableDataset):
def __init__(self, scenario_path, max_queue_size):
self.kit = OmniKitHelper(config=RENDER_CONFIG)
from omni.isaac.synthetic_utils import SyntheticDataHelper, DataWriter, DomainRandomization
self.sd_helper = SyntheticDataHelper()
self.dr_helper = DomainRandomization()
self.writer_helper = DataWriter
self.dr_helper.toggle_manual_mode()
self.stage = self.kit.get_stage()
self.result = True
from omni.isaac.utils.scripts.nucleus_utils import find_nucleus_server
if scenario_path is None:
self.result, nucleus_server = find_nucleus_server()
if self.result is False:
carb.log_error("Could not find nucleus server with /Isaac folder")
return
self.asset_path = nucleus_server + "/Isaac"
scenario_path = self.asset_path + "/Samples/Synthetic_Data/Stage/warehouse_with_sensors.usd"
self.scenario_path = scenario_path
self.max_queue_size = max_queue_size
self.data_writer = None
self._setup_world(scenario_path)
self.cur_idx = 0
self.exiting = False
self._viewport = omni.kit.viewport.get_viewport_interface()
self._sensor_settings = {}
signal.signal(signal.SIGINT, self._handle_exit)
def _handle_exit(self, *args, **kwargs):
print("exiting dataset generation...")
self.exiting = True
def add_stereo_setup(self):
from pxr import Gf, UsdGeom
stage = omni.usd.get_context().get_stage()
# Create two camera
center_point = Gf.Vec3d(0, 0, 200)
stereoPrimPath = "/World/Stereo"
leftCameraPrimPath = stereoPrimPath + "/LeftCamera"
rightCameraPrimPath = stereoPrimPath + "/RightCamera"
self.stereoPrim = stage.DefinePrim(stereoPrimPath, "Xform")
UsdGeom.XformCommonAPI(self.stereoPrim).SetTranslate(center_point)
leftCameraPrim = stage.DefinePrim(leftCameraPrimPath, "Camera")
UsdGeom.XformCommonAPI(leftCameraPrim).SetTranslate(Gf.Vec3d(0, -10, 0))
UsdGeom.XformCommonAPI(leftCameraPrim).SetRotate(Gf.Vec3f(90, 0, 90))
rightCameraPrim = stage.DefinePrim(rightCameraPrimPath, "Camera")
UsdGeom.XformCommonAPI(rightCameraPrim).SetTranslate(Gf.Vec3d(0, 10, 0))
UsdGeom.XformCommonAPI(rightCameraPrim).SetRotate(Gf.Vec3f(90, 0, 90))
# Need to set this before setting viewport window size
carb.settings.acquire_settings_interface().set_int("/app/renderer/resolution/width", -1)
carb.settings.acquire_settings_interface().set_int("/app/renderer/resolution/height", -1)
# Get existing viewport, set active camera as left camera
viewport_handle_1 = omni.kit.viewport.get_viewport_interface().get_instance("Viewport")
viewport_window_1 = omni.kit.viewport.get_viewport_interface().get_viewport_window(viewport_handle_1)
viewport_window_1.set_texture_resolution(1280, 720)
viewport_window_1.set_active_camera(leftCameraPrimPath)
# Create new viewport, set active camera as right camera
viewport_handle_2 = omni.kit.viewport.get_viewport_interface().create_instance()
viewport_window_2 = omni.kit.viewport.get_viewport_interface().get_viewport_window(viewport_handle_2)
viewport_window_2.set_active_camera("/World/Stereo/RightCamera")
viewport_window_2.set_texture_resolution(1280, 720)
viewport_window_2.set_window_pos(720, 0)
viewport_window_2.set_window_size(720, 890)
# Setup stereo camera movement randomization
radius = 100
target_points_list = []
for theta in range(200, 300):
th = theta * np.pi / 180
x = radius * np.cos(th) + center_point[0]
y = radius * np.sin(th) + center_point[1]
target_points_list.append(Gf.Vec3f(x, y, center_point[2]))
lookat_target_points_list = [a for a in target_points_list[1:]]
lookat_target_points_list.append(target_points_list[0])
result, prim = omni.kit.commands.execute(
"CreateTransformComponentCommand",
prim_paths=[stereoPrimPath],
target_points=target_points_list,
lookat_target_points=lookat_target_points_list,
enable_sequential_behavior=True,
)
async def load_stage(self, path):
await omni.usd.get_context().open_stage_async(path)
def _setup_world(self, scenario_path):
# Load scenario
setup_task = asyncio.ensure_future(self.load_stage(scenario_path))
while not setup_task.done():
self.kit.update()
self.add_stereo_setup()
self.kit.update()
self.kit.setup_renderer()
self.kit.update()
def __iter__(self):
return self
def __next__(self):
# step once and then wait for materials to load
self.dr_helper.randomize_once()
self.kit.update()
while self.kit.is_loading():
self.kit.update()
# Enable/disable sensor output and their format
sensor_settings_viewport_1 = {
"rgb": {"enabled": True},
"depth": {"enabled": True, "colorize": True, "npy": True},
"instance": {"enabled": True, "colorize": True, "npy": True},
"semantic": {"enabled": True, "colorize": True, "npy": True},
"bbox_2d_tight": {"enabled": True, "colorize": True, "npy": True},
"bbox_2d_loose": {"enabled": True, "colorize": True, "npy": True},
}
sensor_settings_viewport_2 = {
"rgb": {"enabled": True},
"depth": {"enabled": True, "colorize": True, "npy": True},
"instance": {"enabled": True, "colorize": True, "npy": True},
"semantic": {"enabled": True, "colorize": True, "npy": True},
"bbox_2d_tight": {"enabled": True, "colorize": True, "npy": True},
"bbox_2d_loose": {"enabled": True, "colorize": True, "npy": True},
}
viewports = self._viewport.get_instance_list()
self._viewport_names = [self._viewport.get_viewport_window_name(vp) for vp in viewports]
# Make sure two viewports are initialized
if len(self._viewport_names) != 2:
return
self._sensor_settings[self._viewport_names[0]] = copy.deepcopy(sensor_settings_viewport_1)
self._sensor_settings[self._viewport_names[1]] = copy.deepcopy(sensor_settings_viewport_2)
self._num_worker_threads = 4
self._output_folder = os.getcwd() + "/output"
# Write to disk
if self.data_writer is None:
self.data_writer = self.writer_helper(
self._output_folder, self._num_worker_threads, self.max_queue_size, self._sensor_settings
)
self.data_writer.start_threads()
image = None
for viewport_name in self._viewport_names:
groundtruth = {
"METADATA": {
"image_id": str(self.cur_idx),
"viewport_name": viewport_name,
"DEPTH": {},
"INSTANCE": {},
"SEMANTIC": {},
"BBOX2DTIGHT": {},
"BBOX2DLOOSE": {},
},
"DATA": {},
}
gt_list = []
if self._sensor_settings[viewport_name]["rgb"]["enabled"]:
gt_list.append("rgb")
if self._sensor_settings[viewport_name]["depth"]["enabled"]:
gt_list.append("depthLinear")
if self._sensor_settings[viewport_name]["bbox_2d_tight"]["enabled"]:
gt_list.append("boundingBox2DTight")
if self._sensor_settings[viewport_name]["bbox_2d_loose"]["enabled"]:
gt_list.append("boundingBox2DLoose")
if self._sensor_settings[viewport_name]["instance"]["enabled"]:
gt_list.append("instanceSegmentation")
if self._sensor_settings[viewport_name]["semantic"]["enabled"]:
gt_list.append("semanticSegmentation")
# Render new frame
self.kit.update()
# Collect Groundtruth
viewport = self._viewport.get_viewport_window(self._viewport.get_instance(viewport_name))
gt = self.sd_helper.get_groundtruth(gt_list, viewport)
# RGB
image = gt["rgb"]
if self._sensor_settings[viewport_name]["rgb"]["enabled"] and gt["state"]["rgb"]:
groundtruth["DATA"]["RGB"] = gt["rgb"]
# Depth
if self._sensor_settings[viewport_name]["depth"]["enabled"] and gt["state"]["depthLinear"]:
groundtruth["DATA"]["DEPTH"] = gt["depthLinear"].squeeze()
groundtruth["METADATA"]["DEPTH"]["COLORIZE"] = self._sensor_settings[viewport_name]["depth"]["colorize"]
groundtruth["METADATA"]["DEPTH"]["NPY"] = self._sensor_settings[viewport_name]["depth"]["npy"]
# Instance Segmentation
if self._sensor_settings[viewport_name]["instance"]["enabled"] and gt["state"]["instanceSegmentation"]:
instance_data = gt["instanceSegmentation"][0]
groundtruth["DATA"]["INSTANCE"] = instance_data
groundtruth["METADATA"]["INSTANCE"]["WIDTH"] = instance_data.shape[1]
groundtruth["METADATA"]["INSTANCE"]["HEIGHT"] = instance_data.shape[0]
groundtruth["METADATA"]["INSTANCE"]["COLORIZE"] = self._sensor_settings[viewport_name]["instance"][
"colorize"
]
groundtruth["METADATA"]["INSTANCE"]["NPY"] = self._sensor_settings[viewport_name]["instance"]["npy"]
# Semantic Segmentation
if self._sensor_settings[viewport_name]["semantic"]["enabled"] and gt["state"]["semanticSegmentation"]:
semantic_data = gt["semanticSegmentation"]
semantic_data[semantic_data == 65535] = 0 # deals with invalid semantic id
groundtruth["DATA"]["SEMANTIC"] = semantic_data
groundtruth["METADATA"]["SEMANTIC"]["WIDTH"] = semantic_data.shape[1]
groundtruth["METADATA"]["SEMANTIC"]["HEIGHT"] = semantic_data.shape[0]
groundtruth["METADATA"]["SEMANTIC"]["COLORIZE"] = self._sensor_settings[viewport_name]["semantic"][
"colorize"
]
groundtruth["METADATA"]["SEMANTIC"]["NPY"] = self._sensor_settings[viewport_name]["semantic"]["npy"]
# 2D Tight BBox
if self._sensor_settings[viewport_name]["bbox_2d_tight"]["enabled"] and gt["state"]["boundingBox2DTight"]:
groundtruth["DATA"]["BBOX2DTIGHT"] = gt["boundingBox2DTight"]
groundtruth["METADATA"]["BBOX2DTIGHT"]["COLORIZE"] = self._sensor_settings[viewport_name][
"bbox_2d_tight"
]["colorize"]
groundtruth["METADATA"]["BBOX2DTIGHT"]["NPY"] = self._sensor_settings[viewport_name]["bbox_2d_tight"][
"npy"
]
# 2D Loose BBox
if self._sensor_settings[viewport_name]["bbox_2d_loose"]["enabled"] and gt["state"]["boundingBox2DLoose"]:
groundtruth["DATA"]["BBOX2DLOOSE"] = gt["boundingBox2DLoose"]
groundtruth["METADATA"]["BBOX2DLOOSE"]["COLORIZE"] = self._sensor_settings[viewport_name][
"bbox_2d_loose"
]["colorize"]
groundtruth["METADATA"]["BBOX2DLOOSE"]["NPY"] = self._sensor_settings[viewport_name]["bbox_2d_loose"][
"npy"
]
self.data_writer.q.put(groundtruth)
self.cur_idx += 1
return image
if __name__ == "__main__":
"Typical usage"
import argparse
parser = argparse.ArgumentParser("Stereo dataset generator")
parser.add_argument("--scenario", type=str, help="Scenario to load from omniverse server")
parser.add_argument("--num_frames", type=int, default=30, help="Number of frames to record")
parser.add_argument("--max_queue_size", type=int, default=500, help="Max size of queue to store and process data")
args = parser.parse_args()
dataset = RandomScenario(args.scenario, args.max_queue_size)
if dataset.result:
# Iterate through dataset and visualize the output
print("Loading materials. Will generate data soon...")
for image in dataset:
print("ID: ", dataset.cur_idx)
if dataset.cur_idx == args.num_frames:
break
if dataset.exiting:
break
# cleanup
dataset.kit.shutdown()
| 13,669 | Python | 44.415282 | 120 | 0.603117 |
KazWong/omniverse_sample/ov_sample/python_samples/syntheticdata/offline_generation/generator.py | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Generate offline synthetic dataset
"""
import asyncio
import copy
import os
import torch
import signal
import carb
import omni
from omni.isaac.python_app import OmniKitHelper
# Default rendering parameters
RENDER_CONFIG = {
"renderer": "RayTracedLighting",
"samples_per_pixel_per_frame": 12,
"headless": True,
"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit',
"width": 1024,
"height": 800,
}
class RandomScenario(torch.utils.data.IterableDataset):
def __init__(self, scenario_path, writer_mode, data_dir, max_queue_size, train_size, classes):
self.kit = OmniKitHelper(config=RENDER_CONFIG)
from omni.isaac.synthetic_utils import SyntheticDataHelper, DataWriter, KittiWriter, DomainRandomization
self.sd_helper = SyntheticDataHelper()
self.dr_helper = DomainRandomization()
self.writer_mode = writer_mode
self.writer_helper = KittiWriter if writer_mode == "kitti" else DataWriter
self.dr_helper.toggle_manual_mode()
self.stage = self.kit.get_stage()
self.result = True
from omni.isaac.utils.scripts.nucleus_utils import find_nucleus_server
if scenario_path is None:
self.result, nucleus_server = find_nucleus_server()
if self.result is False:
carb.log_error("Could not find nucleus server with /Isaac folder")
return
self.asset_path = nucleus_server + "/Isaac"
scenario_path = self.asset_path + "/Samples/Synthetic_Data/Stage/warehouse_with_sensors.usd"
self.scenario_path = scenario_path
self.max_queue_size = max_queue_size
self.data_writer = None
self.data_dir = data_dir
self.train_size = train_size
self.classes = classes
self._setup_world(scenario_path)
self.cur_idx = 0
self.exiting = False
self._sensor_settings = {}
signal.signal(signal.SIGINT, self._handle_exit)
def _handle_exit(self, *args, **kwargs):
print("exiting dataset generation...")
self.exiting = True
async def load_stage(self, path):
await omni.usd.get_context().open_stage_async(path)
def _setup_world(self, scenario_path):
# Load scenario
setup_task = asyncio.ensure_future(self.load_stage(scenario_path))
while not setup_task.done():
self.kit.update()
self.kit.setup_renderer()
self.kit.update()
def __iter__(self):
return self
def __next__(self):
# step once and then wait for materials to load
self.dr_helper.randomize_once()
self.kit.update()
while self.kit.is_loading():
self.kit.update()
# Enable/disable sensor output and their format
self._enable_rgb = True
self._enable_depth = True
self._enable_instance = True
self._enable_semantic = True
self._enable_bbox_2d_tight = True
self._enable_bbox_2d_loose = True
self._enable_depth_colorize = True
self._enable_instance_colorize = True
self._enable_semantic_colorize = True
self._enable_bbox_2d_tight_colorize = True
self._enable_bbox_2d_loose_colorize = True
self._enable_depth_npy = True
self._enable_instance_npy = True
self._enable_semantic_npy = True
self._enable_bbox_2d_tight_npy = True
self._enable_bbox_2d_loose_npy = True
self._num_worker_threads = 4
self._output_folder = self.data_dir
sensor_settings_viewport = {
"rgb": {"enabled": self._enable_rgb},
"depth": {
"enabled": self._enable_depth,
"colorize": self._enable_depth_colorize,
"npy": self._enable_depth_npy,
},
"instance": {
"enabled": self._enable_instance,
"colorize": self._enable_instance_colorize,
"npy": self._enable_instance_npy,
},
"semantic": {
"enabled": self._enable_semantic,
"colorize": self._enable_semantic_colorize,
"npy": self._enable_semantic_npy,
},
"bbox_2d_tight": {
"enabled": self._enable_bbox_2d_tight,
"colorize": self._enable_bbox_2d_tight_colorize,
"npy": self._enable_bbox_2d_tight_npy,
},
"bbox_2d_loose": {
"enabled": self._enable_bbox_2d_loose,
"colorize": self._enable_bbox_2d_loose_colorize,
"npy": self._enable_bbox_2d_loose_npy,
},
}
self._sensor_settings["Viewport"] = copy.deepcopy(sensor_settings_viewport)
# Write to disk
if self.data_writer is None:
print(f"Writing data to {self._output_folder}")
if self.writer_mode == "kitti":
self.data_writer = self.writer_helper(
self._output_folder, self._num_worker_threads, self.max_queue_size, self.train_size, self.classes
)
else:
self.data_writer = self.writer_helper(
self._output_folder, self._num_worker_threads, self.max_queue_size, self._sensor_settings
)
self.data_writer.start_threads()
viewport_iface = omni.kit.viewport.get_viewport_interface()
viewport_name = "Viewport"
viewport = viewport_iface.get_viewport_window(viewport_iface.get_instance(viewport_name))
groundtruth = {
"METADATA": {
"image_id": str(self.cur_idx),
"viewport_name": viewport_name,
"DEPTH": {},
"INSTANCE": {},
"SEMANTIC": {},
"BBOX2DTIGHT": {},
"BBOX2DLOOSE": {},
},
"DATA": {},
}
gt_list = []
if self._enable_rgb:
gt_list.append("rgb")
if self._enable_depth:
gt_list.append("depthLinear")
if self._enable_bbox_2d_tight:
gt_list.append("boundingBox2DTight")
if self._enable_bbox_2d_loose:
gt_list.append("boundingBox2DLoose")
if self._enable_instance:
gt_list.append("instanceSegmentation")
if self._enable_semantic:
gt_list.append("semanticSegmentation")
# Render new frame
self.kit.update()
# Collect Groundtruth
gt = self.sd_helper.get_groundtruth(gt_list, viewport)
# RGB
image = gt["rgb"]
if self._enable_rgb:
groundtruth["DATA"]["RGB"] = gt["rgb"]
# Depth
if self._enable_depth:
groundtruth["DATA"]["DEPTH"] = gt["depthLinear"].squeeze()
groundtruth["METADATA"]["DEPTH"]["COLORIZE"] = self._enable_depth_colorize
groundtruth["METADATA"]["DEPTH"]["NPY"] = self._enable_depth_npy
# Instance Segmentation
if self._enable_instance:
instance_data = gt["instanceSegmentation"][0]
instance_data_shape = instance_data.shape
groundtruth["DATA"]["INSTANCE"] = instance_data
groundtruth["METADATA"]["INSTANCE"]["WIDTH"] = instance_data_shape[1]
groundtruth["METADATA"]["INSTANCE"]["HEIGHT"] = instance_data_shape[0]
groundtruth["METADATA"]["INSTANCE"]["COLORIZE"] = self._enable_instance_colorize
groundtruth["METADATA"]["INSTANCE"]["NPY"] = self._enable_instance_npy
# Semantic Segmentation
if self._enable_semantic:
semantic_data = gt["semanticSegmentation"]
semantic_data_shape = semantic_data.shape
groundtruth["DATA"]["SEMANTIC"] = semantic_data
groundtruth["METADATA"]["SEMANTIC"]["WIDTH"] = semantic_data_shape[1]
groundtruth["METADATA"]["SEMANTIC"]["HEIGHT"] = semantic_data_shape[0]
groundtruth["METADATA"]["SEMANTIC"]["COLORIZE"] = self._enable_semantic_colorize
groundtruth["METADATA"]["SEMANTIC"]["NPY"] = self._enable_semantic_npy
# 2D Tight BBox
if self._enable_bbox_2d_tight:
groundtruth["DATA"]["BBOX2DTIGHT"] = gt["boundingBox2DTight"]
groundtruth["METADATA"]["BBOX2DTIGHT"]["COLORIZE"] = self._enable_bbox_2d_tight_colorize
groundtruth["METADATA"]["BBOX2DTIGHT"]["NPY"] = self._enable_bbox_2d_tight_npy
# 2D Loose BBox
if self._enable_bbox_2d_loose:
groundtruth["DATA"]["BBOX2DLOOSE"] = gt["boundingBox2DLoose"]
groundtruth["METADATA"]["BBOX2DLOOSE"]["COLORIZE"] = self._enable_bbox_2d_loose_colorize
groundtruth["METADATA"]["BBOX2DLOOSE"]["NPY"] = self._enable_bbox_2d_loose_npy
groundtruth["METADATA"]["BBOX2DLOOSE"]["WIDTH"] = RENDER_CONFIG["width"]
groundtruth["METADATA"]["BBOX2DLOOSE"]["HEIGHT"] = RENDER_CONFIG["height"]
self.data_writer.q.put(groundtruth)
self.cur_idx += 1
return image
if __name__ == "__main__":
"Typical usage"
import argparse
parser = argparse.ArgumentParser("Dataset generator")
parser.add_argument("--scenario", type=str, help="Scenario to load from omniverse server")
parser.add_argument("--num_frames", type=int, default=10, help="Number of frames to record")
parser.add_argument("--writer_mode", type=str, default="npy", help="Specify output format - npy or kitti")
parser.add_argument(
"--data_dir", type=str, default=os.getcwd() + "/output", help="Location where data will be output"
)
parser.add_argument("--max_queue_size", type=int, default=500, help="Max size of queue to store and process data")
parser.add_argument(
"--train_size", type=int, default=8, help="Number of frames for training set, works when writer_mode is kitti"
)
parser.add_argument(
"--classes",
type=str,
nargs="+",
default=[],
help="Which classes to write labels for, works when writer_mode is kitti. Defaults to all classes",
)
args = parser.parse_args()
dataset = RandomScenario(
args.scenario, args.writer_mode, args.data_dir, args.max_queue_size, args.train_size, args.classes
)
if dataset.result:
# Iterate through dataset and visualize the output
print("Loading materials. Will generate data soon...")
for image in dataset:
print("ID: ", dataset.cur_idx)
if dataset.cur_idx == args.num_frames:
break
if dataset.exiting:
break
# cleanup
dataset.kit.shutdown()
| 11,119 | Python | 37.881119 | 118 | 0.595377 |
KazWong/omniverse_sample/ov_sample/python_samples/syntheticdata/advanced/shapenet_usd_convertor.py | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
import pprint
from omni.isaac.python_app import OmniKitHelper
"""Convert ShapeNetCore V2 to USD without materials.
By only converting the ShapeNet geometry, we can more quickly load assets into scenes for the purpose of creating
large datasets or for online training of Deep Learning models.
"""
if __name__ == "__main__":
RENDER_CONFIG = {"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit'}
kit = OmniKitHelper(config=RENDER_CONFIG)
import argparse
from omni.isaac.synthetic_utils import shapenet
parser = argparse.ArgumentParser("Convert ShapeNet assets to USD")
parser.add_argument(
"--categories",
type=str,
nargs="+",
default=None,
help="List of ShapeNet categories to convert (space seperated).",
)
parser.add_argument(
"--max-models", type=int, default=None, help="If specified, convert up to `max-models` per category."
)
parser.add_argument(
"--load-materials", action="store_true", help="If specified, materials will be loaded from shapenet meshes"
)
args = parser.parse_args()
# Ensure Omniverse Kit is launched via OmniKitHelper before shapenet_convert() is called
shapenet.shapenet_convert(args.categories, args.max_models, args.load_materials)
# cleanup
kit.shutdown()
| 1,763 | Python | 37.347825 | 115 | 0.723199 |
KazWong/omniverse_sample/ov_sample/python_samples/syntheticdata/advanced/domain_randomization.py | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Dataset with online randomized scene generation for Instance Segmentation training.
Use OmniKit to generate a simple scene. At each iteration, the scene is populated by
adding assets from the user-specified classes with randomized pose and colour.
The camera position is also randomized before capturing groundtruth consisting of
an RGB rendered image, Tight 2D Bounding Boxes and Instance Segmentation masks.
"""
import os
import glob
import torch
import random
import numpy as np
import signal
import carb
import omni
from omni.isaac.python_app import OmniKitHelper
# Setup default generation variables
# Value are (min, max) ranges
RANDOM_TRANSLATION_X = (-30.0, 30.0)
RANDOM_TRANSLATION_Z = (-30.0, 30.0)
RANDOM_ROTATION_Y = (0.0, 360.0)
SCALE = 20
CAMERA_DISTANCE = 300
BBOX_AREA_THRESH = 16
# Default rendering parameters
RENDER_CONFIG = {
"renderer": "PathTracing",
"samples_per_pixel_per_frame": 12,
"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit',
}
class RandomObjects(torch.utils.data.IterableDataset):
"""Dataset of random ShapeNet objects.
Objects are randomly chosen from selected categories and are positioned, rotated and coloured
randomly in an empty room. RGB, BoundingBox2DTight and Instance Segmentation are captured by moving a
camera aimed at the centre of the scene which is positioned at random at a fixed distance from the centre.
This dataset is intended for use with ShapeNet but will function with any dataset of USD models
structured as `root/category/**/*.usd. One note is that this is designed for assets without materials
attached. This is to avoid requiring to compile MDLs and load textures while training.
Args:
categories (tuple of str): Tuple or list of categories. For ShapeNet, these will be the synset IDs.
max_asset_size (int): Maximum asset file size that will be loaded. This prevents out of memory errors
due to loading large meshes.
num_assets_min (int): Minimum number of assets populated in the scene.
num_assets_max (int): Maximum number of assets populated in the scene.
split (float): Fraction of the USDs found to use for training.
train (bool): If true, use the first training split and generate infinite random scenes.
"""
def __init__(
self, root, categories, max_asset_size=None, num_assets_min=3, num_assets_max=5, split=0.7, train=True
):
assert len(categories) > 1
assert (split > 0) and (split <= 1.0)
self.kit = OmniKitHelper(config=RENDER_CONFIG)
from omni.isaac.synthetic_utils import SyntheticDataHelper, DomainRandomization
from omni.isaac.synthetic_utils import shapenet
self.sd_helper = SyntheticDataHelper()
self.dr_helper = DomainRandomization()
self.dr_helper.toggle_manual_mode()
self.stage = self.kit.get_stage()
from omni.isaac.utils.scripts.nucleus_utils import find_nucleus_server
result, nucleus_server = find_nucleus_server()
if result is False:
carb.log_error("Could not find nucleus server with /Isaac folder")
return
self.asset_path = nucleus_server + "/Isaac"
# If ShapeNet categories are specified with their names, convert to synset ID
# Remove this if using with a different dataset than ShapeNet
category_ids = [shapenet.LABEL_TO_SYNSET.get(c, c) for c in categories]
self.categories = category_ids
self.range_num_assets = (num_assets_min, max(num_assets_min, num_assets_max))
self.references = self._find_usd_assets(root, category_ids, max_asset_size, split, train)
self._setup_world()
self.cur_idx = 0
self.exiting = False
signal.signal(signal.SIGINT, self._handle_exit)
def _handle_exit(self, *args, **kwargs):
print("exiting dataset generation...")
self.exiting = True
def _setup_world(self):
from pxr import UsdGeom
"""Setup lights, walls, floor, ceiling and camera"""
# In a practical setting, the room parameters should attempt to match those of the
# target domain. Here, we insteady choose for simplicity.
self.kit.create_prim(
"/World/Room", "Sphere", attributes={"radius": 1e3, "primvars:displayColor": [(1.0, 1.0, 1.0)]}
)
self.kit.create_prim(
"/World/Ground",
"Cylinder",
translation=(0.0, -0.5, 0.0),
rotation=(90.0, 0.0, 0.0),
attributes={"height": 1, "radius": 1e4, "primvars:displayColor": [(1.0, 1.0, 1.0)]},
)
self.kit.create_prim(
"/World/Light1",
"SphereLight",
translation=(-450, 350, 350),
attributes={"radius": 100, "intensity": 30000.0, "color": (0.0, 0.365, 0.848)},
)
self.kit.create_prim(
"/World/Light2",
"SphereLight",
translation=(450, 350, 350),
attributes={"radius": 100, "intensity": 30000.0, "color": (1.0, 0.278, 0.0)},
)
self.kit.create_prim("/World/Asset", "Xform")
self.camera_rig = UsdGeom.Xformable(self.kit.create_prim("/World/CameraRig", "Xform"))
self.camera = self.kit.create_prim("/World/CameraRig/Camera", "Camera", translation=(0.0, 0.0, CAMERA_DISTANCE))
vpi = omni.kit.viewport.get_viewport_interface()
vpi.get_viewport_window().set_active_camera(str(self.camera.GetPath()))
self.viewport = omni.kit.viewport.get_default_viewport_window()
self.kit.update()
# create DR components
self.create_dr_comp()
self.kit.update()
def _find_usd_assets(self, root, categories, max_asset_size, split, train=True):
"""Look for USD files under root/category for each category specified.
For each category, generate a list of all USD files found and select
assets up to split * len(num_assets) if `train=True`, otherwise select the
remainder.
"""
references = {}
for category in categories:
all_assets = glob.glob(os.path.join(root, category, "**/*.usd"), recursive=True)
# Filter out large files (which can prevent OOM errors during training)
if max_asset_size is None:
assets_filtered = all_assets
else:
assets_filtered = []
for a in all_assets:
if os.stat(a).st_size > max_asset_size * 1e6:
print(f"{a} skipped as it exceeded the max size {max_asset_size} MB.")
else:
assets_filtered.append(a)
num_assets = len(assets_filtered)
if num_assets == 0:
raise ValueError(f"No USDs found for category {category} under max size {max_asset_size} MB.")
if train:
references[category] = assets_filtered[: int(num_assets * split)]
else:
references[category] = assets_filtered[int(num_assets * split) :]
return references
def load_single_asset(self, ref, semantic_label, suffix=""):
from pxr import UsdGeom
"""Load a USD asset with random pose.
args
ref (str): Path to the USD that this prim will reference.
semantic_label (str): Semantic label.
suffix (str): String to add to the end of the prim's path.
"""
x = random.uniform(*RANDOM_TRANSLATION_X)
z = random.uniform(*RANDOM_TRANSLATION_Z)
rot_y = random.uniform(*RANDOM_ROTATION_Y)
asset = self.kit.create_prim(
f"/World/Asset/mesh{suffix}",
"Xform",
scale=(SCALE, SCALE, SCALE),
rotation=(0.0, rot_y, 0.0),
ref=ref,
semantic_label=semantic_label,
)
bound = UsdGeom.Mesh(asset).ComputeWorldBound(0.0, "default")
box_min_y = bound.GetBox().GetMin()[1]
UsdGeom.XformCommonAPI(asset).SetTranslate((x, -box_min_y, z))
return asset
def populate_scene(self):
"""Clear the scene and populate it with assets."""
self.stage.RemovePrim("/World/Asset")
self.assets = []
num_assets = random.randint(*self.range_num_assets)
for i in range(num_assets):
category = random.choice(list(self.references.keys()))
ref = random.choice(self.references[category])
self.assets.append(self.load_single_asset(ref, category, i))
def randomize_camera(self):
"""Randomize the camera position."""
# By simply rotating a camera "rig" instead repositioning the camera
# itself, we greatly simplify our job.
# Clear previous transforms
self.camera_rig.ClearXformOpOrder()
# Change azimuth angle
self.camera_rig.AddRotateYOp().Set(random.random() * 360)
# Change elevation angle
self.camera_rig.AddRotateXOp().Set(random.random() * -90)
def create_dr_comp(self):
"""Creates DR components with various attributes.
The asset prims to randomize is an empty list for most components
since we get a new list of assets every iteration.
The asset list will be updated for each component in update_dr_comp()
"""
texture_list = [
self.asset_path + "/Samples/DR/Materials/Textures/checkered.png",
self.asset_path + "/Samples/DR/Materials/Textures/marble_tile.png",
self.asset_path + "/Samples/DR/Materials/Textures/picture_a.png",
self.asset_path + "/Samples/DR/Materials/Textures/picture_b.png",
self.asset_path + "/Samples/DR/Materials/Textures/textured_wall.png",
self.asset_path + "/Samples/DR/Materials/Textures/checkered_color.png",
]
material_list = [
self.asset_path + "/Samples/DR/Materials/checkered.mdl",
self.asset_path + "/Samples/DR/Materials/checkered_color.mdl",
self.asset_path + "/Samples/DR/Materials/marble_tile.mdl",
self.asset_path + "/Samples/DR/Materials/picture_a.mdl",
self.asset_path + "/Samples/DR/Materials/picture_b.mdl",
self.asset_path + "/Samples/DR/Materials/textured_wall.mdl",
]
light_list = ["World/Light1", "World/Light2"]
self.texture_comp = self.dr_helper.create_texture_comp([], True, texture_list)
self.color_comp = self.dr_helper.create_color_comp([])
self.material_comp = self.dr_helper.create_material_comp([], material_list)
self.movement_comp = self.dr_helper.create_movement_comp([])
self.rotation_comp = self.dr_helper.create_rotation_comp([])
self.scale_comp = self.dr_helper.create_scale_comp([], max_range=(50, 50, 50))
self.light_comp = self.dr_helper.create_light_comp(light_list)
self.visibility_comp = self.dr_helper.create_visibility_comp([])
def update_dr_comp(self, dr_comp):
"""Updates DR component with the asset prim paths that will be randomized"""
comp_prim_paths_target = dr_comp.GetPrimPathsRel()
comp_prim_paths_target.ClearTargets(True)
for asset in self.assets:
comp_prim_paths_target.AddTarget(asset.GetPrimPath())
def __iter__(self):
return self
def __next__(self):
# Generate a new scene
self.populate_scene()
self.randomize_camera()
"""The below update calls set the paths of prims that need to be randomized
with the settings provided in their corresponding DR create component
"""
# In this example, either update texture or color or material of assets
# self.update_dr_comp(self.color_comp)
self.update_dr_comp(self.texture_comp)
# self.update_dr_comp(self.material_comp)
# Also update movement, rotation and scale components
# self.update_dr_comp(self.movement_comp)
# self.update_dr_comp(self.rotation_comp)
self.update_dr_comp(self.scale_comp)
# randomize once
self.dr_helper.randomize_once()
# step once and then wait for materials to load
self.kit.update()
print("waiting for materials to load...")
while self.kit.is_loading():
self.kit.update()
print("done")
self.kit.update()
# Collect Groundtruth
gt = self.sd_helper.get_groundtruth(["rgb", "boundingBox2DTight", "instanceSegmentation"], self.viewport)
# RGB
# Drop alpha channel
image = gt["rgb"][..., :3]
# Cast to tensor if numpy array
if isinstance(gt["rgb"], np.ndarray):
image = torch.tensor(image, dtype=torch.float, device="cuda")
# Normalize between 0. and 1. and change order to channel-first.
image = image.float() / 255.0
image = image.permute(2, 0, 1)
# Bounding Box
gt_bbox = gt["boundingBox2DTight"]
# Create mapping from categories to index
mapping = {cat: i + 1 for i, cat in enumerate(self.categories)}
bboxes = torch.tensor(gt_bbox[["x_min", "y_min", "x_max", "y_max"]].tolist())
# For each bounding box, map semantic label to label index
labels = torch.LongTensor([mapping[bb["semanticLabel"]] for bb in gt_bbox])
# Calculate bounding box area for each area
areas = (bboxes[:, 2] - bboxes[:, 0]) * (bboxes[:, 3] - bboxes[:, 1])
# Idenfiy invalid bounding boxes to filter final output
valid_areas = (areas > 0.0) * (areas < (image.shape[1] * image.shape[2]))
# Instance Segmentation
instance_data, instance_mappings = gt["instanceSegmentation"][0], gt["instanceSegmentation"][1]
instance_list = [im[0] for im in gt_bbox]
masks = np.zeros((len(instance_list), *instance_data.shape), dtype=np.bool)
for i, instances in enumerate(instance_list):
masks[i] = np.isin(instance_data, instances)
if isinstance(masks, np.ndarray):
masks = torch.tensor(masks, device="cuda")
target = {
"boxes": bboxes[valid_areas],
"labels": labels[valid_areas],
"masks": masks[valid_areas],
"image_id": torch.LongTensor([self.cur_idx]),
"area": areas[valid_areas],
"iscrowd": torch.BoolTensor([False] * len(bboxes[valid_areas])), # Assume no crowds
}
self.cur_idx += 1
return image, target
if __name__ == "__main__":
"Typical usage"
import argparse
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser("Dataset test")
parser.add_argument("--categories", type=str, nargs="+", required=True, help="List of object classes to use")
parser.add_argument(
"--max-asset-size",
type=float,
default=10.0,
help="Maximum asset size to use in MB. Larger assets will be skipped.",
)
parser.add_argument(
"--root",
type=str,
default=None,
help="Root directory containing USDs. If not specified, use {SHAPENET_LOCAL_DIR}_nomat as root.",
)
args = parser.parse_args()
# If root is not specified use the environment variable SHAPENET_LOCAL_DIR with the _nomat suffix as root
if args.root is None:
args.root = f"{os.path.abspath(os.environ['SHAPENET_LOCAL_DIR'])}_mat"
dataset = RandomObjects(args.root, args.categories, max_asset_size=args.max_asset_size)
from omni.isaac.synthetic_utils import visualization as vis
from omni.isaac.synthetic_utils import shapenet
categories = [shapenet.LABEL_TO_SYNSET.get(c, c) for c in args.categories]
# Iterate through dataset and visualize the output
plt.ion()
_, axes = plt.subplots(1, 2, figsize=(10, 5))
plt.tight_layout()
for image, target in dataset:
for ax in axes:
ax.clear()
ax.axis("off")
np_image = image.permute(1, 2, 0).cpu().numpy()
axes[0].imshow(np_image)
num_instances = len(target["boxes"])
colours = vis.random_colours(num_instances)
overlay = np.zeros_like(np_image)
for mask, colour in zip(target["masks"].cpu().numpy(), colours):
overlay[mask, :3] = colour
axes[1].imshow(overlay)
mapping = {i + 1: cat for i, cat in enumerate(categories)}
labels = [shapenet.SYNSET_TO_LABEL[mapping[label.item()]] for label in target["labels"]]
vis.plot_boxes(ax, target["boxes"].tolist(), labels=labels, colours=colours)
plt.draw()
plt.pause(0.01)
plt.savefig("domain_randomization.png")
if dataset.exiting:
break
# cleanup
dataset.kit.shutdown()
| 17,200 | Python | 41.056235 | 120 | 0.628953 |
KazWong/omniverse_sample/ov_sample/python_samples/syntheticdata/advanced/visualize_groundtruth_physics.py | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Demonstration of using OmniKit to generate a scene, collect groundtruth and visualize
the results. This advanced sample also simulates physics and uses a custom glass material
"""
import copy
import os
import random
import numpy as np
from omni.isaac.python_app import OmniKitHelper
import matplotlib.pyplot as plt
TRANSLATION_RANGE = 300.0
SCALE = 50.0
# specify a custom config
CUSTOM_CONFIG = {
"width": 1024,
"height": 1024,
"renderer": "PathTracing",
"samples_per_pixel_per_frame": 128,
"max_bounces": 10,
"max_specular_transmission_bounces": 6,
"max_volume_bounces": 4,
"subdiv_refinement_level": 2,
"headless": True,
"sync_loads": True,
"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit',
}
def main():
kit = OmniKitHelper(CUSTOM_CONFIG)
from pxr import Gf, Sdf, UsdShade, UsdGeom, Semantics
from omni.isaac.synthetic_utils import SyntheticDataHelper
sd_helper = SyntheticDataHelper()
from omni.syntheticdata import visualize, helpers
from omni.physx.scripts import utils
from pxr import UsdPhysics, PhysxSchema, PhysicsSchemaTools
import omni
# SCENE SETUP
# Get the current stage
stage = kit.get_stage()
# Add a sphere light
kit.create_prim(
"/World/Light1",
"SphereLight",
translation=(0, 200, 0),
attributes={"radius": 100, "intensity": 100000.0, "color": (1, 1, 1)},
)
# Add physics scene
scene = UsdPhysics.Scene.Define(stage, Sdf.Path("/World/physicsScene"))
# Set gravity vector
scene.CreateGravityDirectionAttr().Set(Gf.Vec3f(0.0, 0.0, -1.0))
scene.CreateGravityMagnitudeAttr().Set(981.0)
# Set physics scene to use cpu physics
PhysxSchema.PhysxSceneAPI.Apply(stage.GetPrimAtPath("/World/physicsScene"))
physxSceneAPI = PhysxSchema.PhysxSceneAPI.Get(stage, "/World/physicsScene")
physxSceneAPI.CreateEnableCCDAttr(True)
physxSceneAPI.CreateEnableStabilizationAttr(True)
physxSceneAPI.CreateEnableGPUDynamicsAttr(False)
physxSceneAPI.CreateBroadphaseTypeAttr("MBP")
physxSceneAPI.CreateSolverTypeAttr("TGS")
# Create a ground plane
PhysicsSchemaTools.addGroundPlane(stage, "/World/groundPlane", "Z", 1000, Gf.Vec3f(0, 0, -100), Gf.Vec3f(1.0))
# Create 10 randomly positioned and coloured spheres and cube
# We will assign each a semantic label based on their shape (sphere/cube/cone)
prims = []
for i in range(10):
prim_type = random.choice(["Cube", "Sphere", "Cylinder"])
prim = stage.DefinePrim(f"/World/cube{i}", prim_type)
translation = np.random.rand(3) * TRANSLATION_RANGE
UsdGeom.XformCommonAPI(prim).SetTranslate(translation.tolist())
UsdGeom.XformCommonAPI(prim).SetScale((SCALE, SCALE, SCALE))
# prim.GetAttribute("primvars:displayColor").Set([np.random.rand(3).tolist()])
# Add semantic label based on prim type
sem = Semantics.SemanticsAPI.Apply(prim, "Semantics")
sem.CreateSemanticTypeAttr()
sem.CreateSemanticDataAttr()
sem.GetSemanticTypeAttr().Set("class")
sem.GetSemanticDataAttr().Set(prim_type)
# Add physics to prims
utils.setRigidBody(prim, "convexHull", False)
# Set Mass to 1 kg
mass_api = UsdPhysics.MassAPI.Apply(prim)
mass_api.CreateMassAttr(1)
# add prim reference to list
prims.append(prim)
# Apply glass material
for prim in prims:
# Create Glass material
mtl_created_list = []
kit.execute(
"CreateAndBindMdlMaterialFromLibrary",
mdl_name="OmniGlass.mdl",
mtl_name="OmniGlass",
mtl_created_list=mtl_created_list,
)
mtl_prim = stage.GetPrimAtPath(mtl_created_list[0])
# Set material inputs, these can be determined by looking at the .mdl file
# or by selecting the Shader attached to the Material in the stage window and looking at the details panel
color = Gf.Vec3f(random.random(), random.random(), random.random())
omni.usd.create_material_input(mtl_prim, "glass_color", color, Sdf.ValueTypeNames.Color3f)
omni.usd.create_material_input(mtl_prim, "glass_ior", 1.25, Sdf.ValueTypeNames.Float)
# This value is the volumetric light absorption scale, reduce to zero to make glass clearer
omni.usd.create_material_input(mtl_prim, "depth", 0.001, Sdf.ValueTypeNames.Float)
# Enable for thin glass objects if needed
omni.usd.create_material_input(mtl_prim, "thin_walled", False, Sdf.ValueTypeNames.Bool)
# Bind the material to the prim
prim_mat_shade = UsdShade.Material(mtl_prim)
UsdShade.MaterialBindingAPI(prim).Bind(prim_mat_shade, UsdShade.Tokens.strongerThanDescendants)
# force RayTracedLighting mode for better performance while simulating physics
kit.set_setting("/rtx/rendermode", "RayTracedLighting")
# start simulation
kit.play()
# Step simulation so that objects fall to rest
# wait until all materials are loaded
frame = 0
print("simulating physics...")
while frame < 60 or kit.is_loading():
kit.update(1 / 60.0)
frame = frame + 1
print("done")
# Return to user specified render mode
kit.set_setting("/rtx/rendermode", CUSTOM_CONFIG["renderer"])
print("capturing...")
# Get groundtruth using glass material
kit.update()
viewport = omni.kit.viewport.get_default_viewport_window()
gt = sd_helper.get_groundtruth(
[
"rgb",
"depth",
"boundingBox2DTight",
"boundingBox2DLoose",
"instanceSegmentation",
"semanticSegmentation",
"boundingBox3D",
],
viewport,
)
print("done")
# everything is captured, stop simulating
kit.stop()
print("visualize results")
# GROUNDTRUTH VISUALIZATION
# Setup a figure
_, axes = plt.subplots(2, 4, figsize=(20, 7))
axes = axes.flat
for ax in axes:
ax.axis("off")
# RGB
axes[0].set_title("RGB")
for ax in axes[:-1]:
ax.imshow(gt["rgb"])
# DEPTH
axes[1].set_title("Depth")
depth_data = np.clip(gt["depth"], 0, 255)
axes[1].imshow(visualize.colorize_depth(depth_data.squeeze()))
# BBOX2D TIGHT
axes[2].set_title("BBox 2D Tight")
rgb_data = copy.deepcopy(gt["rgb"])
axes[2].imshow(visualize.colorize_bboxes(gt["boundingBox2DTight"], rgb_data))
# BBOX2D LOOSE
axes[3].set_title("BBox 2D Loose")
rgb_data = copy.deepcopy(gt["rgb"])
axes[3].imshow(visualize.colorize_bboxes(gt["boundingBox2DLoose"], rgb_data))
# INSTANCE SEGMENTATION
axes[4].set_title("Instance Segmentation")
instance_seg = gt["instanceSegmentation"][0]
instance_rgb = visualize.colorize_segmentation(instance_seg)
axes[4].imshow(instance_rgb, alpha=0.7)
# SEMANTIC SEGMENTATION
axes[5].set_title("Semantic Segmentation")
semantic_seg = gt["semanticSegmentation"]
semantic_rgb = visualize.colorize_segmentation(semantic_seg)
axes[5].imshow(semantic_rgb, alpha=0.7)
# BBOX 3D
axes[6].set_title("BBox 3D")
bbox_3d_data = gt["boundingBox3D"]
bboxes_3d_corners = bbox_3d_data["corners"]
projected_corners = helpers.world_to_image(bboxes_3d_corners.reshape(-1, 3), viewport)
projected_corners = projected_corners.reshape(-1, 8, 3)
rgb_data = copy.deepcopy(gt["rgb"])
bboxes3D_rgb = visualize.colorize_bboxes_3d(projected_corners, rgb_data)
axes[6].imshow(bboxes3D_rgb)
# Save figure
print("saving figure to: ", os.getcwd() + "/visualize_groundtruth_physics.png")
plt.savefig("visualize_groundtruth_physics.png")
# cleanup
kit.shutdown()
if __name__ == "__main__":
main()
| 8,243 | Python | 35.31718 | 114 | 0.672449 |
KazWong/omniverse_sample/ov_sample/python_samples/isaac_sdk/load_stage.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
from omni.isaac.python_app import OmniKitHelper
import carb
import omni
# This sample loads a usd stage and creates a robot engine bridge application and starts simulation
# Disposes average fps of the simulation for given time
# Useful for testing an Isaac SDK sample scene using python
CONFIG = {
"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit',
"width": 1280,
"height": 720,
"sync_loads": True,
"headless": False,
"renderer": "RayTracedLighting",
}
class UsdLoadSample:
def __init__(self, args):
CONFIG["headless"] = args.headless
self.kit = OmniKitHelper(config=CONFIG)
self.usd_path = ""
self._viewport = omni.kit.viewport.get_viewport_interface()
def start(self):
self.kit.play()
def stop(self):
self.kit.stop()
omni.kit.commands.execute("RobotEngineBridgeDestroyApplication")
self.kit.shutdown()
def load_stage(self, args):
from omni.isaac.utils.scripts.nucleus_utils import find_nucleus_server
result, nucleus_server = find_nucleus_server()
if result is False:
carb.log_error("Could not find nucleus server with /Isaac folder")
return False
self._asset_path = nucleus_server + "/Isaac"
self.usd_path = self._asset_path + args.usd_path
omni.usd.get_context().open_stage(self.usd_path, None)
# Wait two frames so that stage starts loading
self.kit.app.update()
self.kit.app.update()
return True
def configure_bridge(self, json_file: str = "isaacsim.app.json"):
"""
Configure the SDK bridge application that publishes data over tcp
"""
ext_manager = omni.kit.app.get_app().get_extension_manager()
ext_id = ext_manager.get_enabled_extension_id("omni.isaac.robot_engine_bridge")
reb_extension_path = ext_manager.get_extension_path(ext_id)
app_file = f"{reb_extension_path}/resources/isaac_engine/json/{json_file}"
carb.log_info(f"create application with: {reb_extension_path} {app_file}")
return omni.kit.commands.execute(
"RobotEngineBridgeCreateApplication", asset_path=reb_extension_path, app_file=app_file
)
def disable_existing_reb_cameras(self):
"""
Disable existing REB_Camera prims for perf testing
"""
import omni.isaac.RobotEngineBridgeSchema as REBSchema
stage = self.kit.get_stage()
for prim in stage.Traverse():
if prim.IsA(REBSchema.RobotEngineCamera):
reb_camera_prim = REBSchema.RobotEngineCamera(prim)
reb_camera_prim.GetEnabledAttr().Set(False)
def create_reb_camera(self, cameraIndex, name, width, height):
"""Create a new REB camera in the stage"""
from pxr import Gf
result, reb_camera_prim = omni.kit.commands.execute(
"RobotEngineBridgeCreateCamera",
path="/World/REB_Camera",
parent=None,
rgb_output_component="output",
rgb_output_channel="encoder_color_{}".format(cameraIndex),
depth_output_component="output",
depth_output_channel="encoder_depth_{}".format(cameraIndex),
segmentation_output_component="output",
segmentation_output_channel="encoder_segmentation_{}".format(cameraIndex),
bbox2d_output_component="output",
bbox2d_output_channel="encoder_bbox_{}".format(cameraIndex),
bbox2d_class_list="",
bbox3d_output_component="output",
bbox3d_output_channel="encoder_bbox3d_{}".format(cameraIndex),
bbox3d_class_list="",
rgb_enabled=True,
depth_enabled=False,
segmentaion_enabled=True,
bbox2d_enabled=False,
bbox3d_enabled=False,
camera_prim_rel=["{}".format(name)],
resolution=Gf.Vec2i(int(width), int(height)),
)
if __name__ == "__main__":
import argparse
import time
parser = argparse.ArgumentParser("Usd Load sample")
parser.add_argument("--usd_path", type=str, help="Path to usd file", required=True)
parser.add_argument("--headless", default=False, action="store_true", help="Run stage headless")
parser.add_argument("--test", default=False, action="store_true", help="Run in test mode")
parser.add_argument("--benchmark", default=False, action="store_true", help="Run in benchmark mode")
parser.add_argument(
"--benchmark_timeout", type=int, default=60, help="Total walltime in seconds to calculate average FPS for"
)
parser.add_argument(
"--add_rebcamera",
nargs="*",
type=str,
default=[],
help="Total number of REB Camera prims to add, existing ones will be disabled if this option is specified",
)
args, unknown = parser.parse_known_args()
sample = UsdLoadSample(args)
if sample.load_stage(args):
print("Loading stage...")
while sample.kit.is_loading():
sample.kit.update(1.0 / 60.0)
print("Loading Complete")
# Add parameterized rebcamera along with viewport
if args.add_rebcamera is not None and len(args.add_rebcamera) > 0:
# disable existing cameras if we are making new ones
sample.disable_existing_reb_cameras()
reb_count = 0
for name in args.add_rebcamera:
info = name.split(",")
sample.create_reb_camera(reb_count, info[0], info[1], info[2])
reb_count = reb_count + 1
sample.configure_bridge()
sample.start()
if args.test is True:
for i in range(10):
sample.kit.update()
sample.stop()
elif args.benchmark is True:
# Warm up simulation
while sample._viewport.get_viewport_window().get_fps() < 1:
sample.kit.update(1.0 / 60.0)
fps_count = 0
start_time = time.perf_counter()
end_time = start_time + args.benchmark_timeout
count = 0
# Calculate average fps
while sample.kit.app.is_running() and end_time > time.perf_counter():
sample.kit.update(1.0 / 60.0)
fps = sample._viewport.get_viewport_window().get_fps()
fps_count = fps_count + fps
count = count + 1
sample.stop()
print(f"\n----------- Avg. FPS over {args.benchmark_timeout} sec : {fps_count/count}-----------")
else:
while sample.kit.app.is_running():
# Run in realtime mode, we don't specify the step size
sample.kit.update()
sample.stop()
| 7,211 | Python | 39.977273 | 115 | 0.617667 |
KazWong/omniverse_sample/ov_sample/python_samples/isaac_sdk/pose_estimation.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import random
import os
import omni
from omni.isaac.python_app import OmniKitHelper
import carb.tokens
import argparse
CONFIG = {
"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit',
"width": 1280,
"height": 720,
"sync_loads": True,
"headless": True,
"renderer": "RayTracedLighting",
}
# D435
FOCAL_LEN = 1.93
HORIZONTAL_APERTURE = 2.682
VERTICAL_APERTURE = 1.509
FOCUS_DIST = 400
RANDOMIZE_SCENE_EVERY_N_STEPS = 10
class DualCameraSample:
def __init__(self):
self.kit = OmniKitHelper(config=CONFIG)
import omni.physx
from pxr import UsdGeom, Usd, Gf
from omni.isaac.synthetic_utils import DomainRandomization
from omni.isaac.synthetic_utils import SyntheticDataHelper
from omni.isaac.robot_engine_bridge import _robot_engine_bridge
self._re_bridge = _robot_engine_bridge.acquire_robot_engine_bridge_interface()
self._viewport = omni.kit.viewport.get_viewport_interface()
self.dr_helper = DomainRandomization()
self.sd_helper = SyntheticDataHelper()
self.frame = 0
self.Gf = Gf
self.UsdGeom = UsdGeom
self.Usd = Usd
def shutdown(self):
self.kit.shutdown()
def start(self):
self.kit.play()
def stop(self):
self.kit.stop()
omni.kit.commands.execute("RobotEngineBridgeDestroyApplication")
def create_stage(self):
# open base stage and set up axis to Z
stage = self.kit.get_stage()
rootLayer = stage.GetRootLayer()
rootLayer.SetPermissionToEdit(True)
with self.Usd.EditContext(stage, rootLayer):
self.UsdGeom.SetStageUpAxis(stage, self.UsdGeom.Tokens.z)
# make two prims, one for env and one for just the room
# this allows us to add other prims to environment for randomization and still hide them all at once
self._environment = stage.DefinePrim("/environment", "Xform")
self._room = stage.DefinePrim("/environment/room", "Xform")
from omni.isaac.utils.scripts.nucleus_utils import find_nucleus_server
result, nucleus_server = find_nucleus_server()
if result is False:
carb.log_error("Could not find nucleus server with /Isaac folder")
return False
self._asset_path = nucleus_server + "/Isaac"
stage_path = self._asset_path + "/Environments/Simple_Room/simple_room.usd"
self._room.GetReferences().AddReference(stage_path)
self._target_prim = self.kit.create_prim(
"/objects/cube", "Cube", translation=(0, 0, 100), scale=(10, 10, 50), semantic_label="target"
)
# make sure that we wait for the stage to load
self.kit.app.update()
self.kit.app.update()
return True
def create_camera(self):
self._camera = self.kit.create_prim(
"/World/Camera",
"Camera",
translation=(0.0, 0.0, 0.0),
attributes={
"focusDistance": FOCUS_DIST,
"focalLength": FOCAL_LEN,
"horizontalAperture": HORIZONTAL_APERTURE,
"verticalAperture": VERTICAL_APERTURE,
},
)
# activate new camera
self._viewport.get_viewport_window().set_active_camera(str(self._camera.GetPath()))
# the camera reference frame between sdk and sim seems to be flipped 180 on x
# this prim acts as a proxy to do that coordinate transformation
self._camera_proxy = self.kit.create_prim("/World/Camera/proxy", "Xform", rotation=(180, 0, 0))
def create_bridge_components(self):
result, self.occluded_provider = omni.kit.commands.execute(
"RobotEngineBridgeCreateCamera",
path="/World/REB_Occluded_Provider",
parent=None,
rgb_output_component="output",
rgb_output_channel="encoder_color",
depth_output_component="output",
depth_output_channel="encoder_depth",
segmentation_output_component="output",
segmentation_output_channel="encoder_segmentation",
bbox2d_output_component="output",
bbox2d_output_channel="encoder_bbox",
bbox2d_class_list="",
bbox3d_output_component="output",
bbox3d_output_channel="encoder_bbox3d",
bbox3d_class_list="",
rgb_enabled=True,
depth_enabled=False,
segmentaion_enabled=True,
bbox2d_enabled=False,
bbox3d_enabled=False,
camera_prim_rel=[self._camera.GetPath()],
resolution=self.Gf.Vec2i(1280, 720),
)
result, self.unoccluded_provider = omni.kit.commands.execute(
"RobotEngineBridgeCreateCamera",
path="/World/REB_Unoccluded_Provider",
parent=None,
rgb_output_component="output",
rgb_output_channel="decoder_color",
depth_output_component="output",
depth_output_channel="decoder_depth",
segmentation_output_component="output",
segmentation_output_channel="decoder_segmentation",
bbox2d_output_component="output",
bbox2d_output_channel="decoder_bbox",
bbox2d_class_list="",
bbox3d_output_component="output",
bbox3d_output_channel="decoder_bbox3d",
bbox3d_class_list="",
rgb_enabled=True,
depth_enabled=False,
segmentaion_enabled=True,
bbox2d_enabled=False,
bbox3d_enabled=False,
camera_prim_rel=[self._camera.GetPath()],
resolution=self.Gf.Vec2i(1280, 720),
)
# turn both cameras off so that we don't send an image when time is stepped
self.occluded_provider.GetEnabledAttr().Set(False)
self.unoccluded_provider.GetEnabledAttr().Set(False)
# create rigid body sink to publish ground truth pose information
result, self.rbs_provider = omni.kit.commands.execute(
"RobotEngineBridgeCreateRigidBodySink",
path="/World/REB_RigidBodiesSink",
parent=None,
enabled=False,
output_component="output",
output_channel="bodies",
rigid_body_prims_rel=[self._camera_proxy.GetPath(), self._target_prim.GetPath()],
)
# disable rigid body sink until the final image is sent out so its only published once
self.rbs_provider.GetEnabledAttr().Set(False)
def configure_bridge(self, json_file: str = "isaacsim.app.json"):
ext_manager = omni.kit.app.get_app().get_extension_manager()
ext_id = ext_manager.get_enabled_extension_id("omni.isaac.robot_engine_bridge")
reb_extension_path = ext_manager.get_extension_path(ext_id)
app_file = f"{reb_extension_path}/resources/isaac_engine/json/{json_file}"
carb.log_info(f"create application with: {reb_extension_path} {app_file}")
return omni.kit.commands.execute(
"RobotEngineBridgeCreateApplication", asset_path=reb_extension_path, app_file=app_file
)
def configure_randomization(self):
texture_list = [
self._asset_path + "/Samples/DR/Materials/Textures/checkered.png",
self._asset_path + "/Samples/DR/Materials/Textures/marble_tile.png",
self._asset_path + "/Samples/DR/Materials/Textures/picture_a.png",
self._asset_path + "/Samples/DR/Materials/Textures/picture_b.png",
self._asset_path + "/Samples/DR/Materials/Textures/textured_wall.png",
self._asset_path + "/Samples/DR/Materials/Textures/checkered_color.png",
]
base_path = str(self._room.GetPath())
self.texture_comp = self.dr_helper.create_texture_comp([base_path], False, texture_list)
# self.color_comp = self.dr_helper.create_color_comp([base_path+"/floor"])
# disable automatic DR, we run it ourselves in the step function
# add a movement and rotation component
# the movement component is offset by 100cm in z so that the object remains above the table
self.movement_comp = self.dr_helper.create_movement_comp(
[str(self._target_prim.GetPath())], min_range=(-10, -10, -10 + 100), max_range=(10, 10, 10 + 100)
)
self.rotation_comp = self.dr_helper.create_rotation_comp([str(self._target_prim.GetPath())])
self.dr_helper.toggle_manual_mode()
def randomize_camera(self):
# randomize camera position
self._viewport.get_viewport_window().set_camera_position(
str(self._camera.GetPath()),
random.randrange(-250, 250),
random.randrange(-250, 250),
random.randrange(10, 250),
True,
)
# get target pose and point camera at it
pose = omni.usd.get_world_transform_matrix(self._target_prim)
# can specify an offset on target position
target = pose.ExtractTranslation() + self.Gf.Vec3d(0, 0, 0)
self._viewport.get_viewport_window().set_camera_target(
str(self._camera.GetPath()), target[0], target[1], target[2], True
)
def randomize_scene(self):
self.dr_helper.randomize_once()
def toggle_environment(self, state):
imageable = self.UsdGeom.Imageable(self._environment)
if state:
imageable.MakeVisible()
else:
imageable.MakeInvisible()
def step(self):
# randomize camera every frame
self.randomize_camera()
# randomize textures every 10 frames
if self.frame % RANDOMIZE_SCENE_EVERY_N_STEPS == 0:
self.randomize_scene()
self.toggle_environment(True)
self.kit.update(1.0 / 60.0)
# render occluded view
omni.kit.commands.execute("RobotEngineBridgeTickComponent", path=str(self.occluded_provider.GetPath()))
# hide everything but the object
self.toggle_environment(False)
self.kit.update(0)
# render unoccluded view
omni.kit.commands.execute("RobotEngineBridgeTickComponent", path=str(self.unoccluded_provider.GetPath()))
omni.kit.commands.execute("RobotEngineBridgeTickComponent", path=str(self.rbs_provider.GetPath()))
# output fps every 100 frames
if self.frame % 100 == 0:
print("FPS: ", self._viewport.get_viewport_window().get_fps())
self.frame = self.frame + 1
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Generate Occluded and Unoccluded data")
parser.add_argument("--test", action="store_true")
args, unknown = parser.parse_known_args()
sample = DualCameraSample()
# On start if state creation was successful
if sample.create_stage():
sample.create_camera()
sample.configure_randomization()
# wait for stage to load
while sample.kit.is_loading():
sample.kit.update(0)
sample.create_bridge_components()
sample.configure_bridge()
sample.start()
while sample.kit.app.is_running():
sample.step()
if args.test:
break
sample.stop()
sample.shutdown()
| 11,691 | Python | 39.041096 | 113 | 0.62886 |
KazWong/omniverse_sample/ov_sample/python_samples/core/app_framework.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
import sys
import carb
import omni.kit.app
import asyncio
# Simple example showing the minimal setup to run omniverse app from python
class PythonApp:
def __init__(self):
# Load app plugin
self.framework = carb.get_framework()
self.framework.load_plugins(
loaded_file_wildcards=["omni.kit.app.plugin"],
search_paths=[os.path.abspath(f'{os.environ["CARB_APP_PATH"]}/plugins')],
)
self.app = omni.kit.app.get_app()
# Path to where kit was built to
app_root = os.environ["CARB_APP_PATH"]
# Inject experience config:
sys.argv.insert(1, f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit')
# Add paths to extensions
sys.argv.append(f"--ext-folder")
sys.argv.append(f'{os.path.abspath(os.environ["ISAAC_PATH"])}/exts')
# Run headless
sys.argv.append("--no-window")
# Set some settings
sys.argv.append("--/app/asyncRendering=False")
# Start the app
self.app.startup("Isaac-Sim", app_root, sys.argv)
def shutdown(self):
# Shutdown
self.app.shutdown()
self.framework.unload_all_plugins()
print("Shutdown complete")
if __name__ == "__main__":
kit = PythonApp()
# Do something, in this case we wait for stage to open and then exit
stage_task = asyncio.ensure_future(omni.usd.get_context().new_stage_async())
while not stage_task.done():
kit.app.update()
kit.shutdown()
| 1,952 | Python | 30 | 85 | 0.655225 |
KazWong/omniverse_sample/ov_sample/python_samples/core/helper.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
from omni.isaac.python_app import OmniKitHelper
CONFIG = {
"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit',
"renderer": "RayTracedLighting",
"headless": True,
}
if __name__ == "__main__":
# Simple example showing how to start and stop the helper
kit = OmniKitHelper(config=CONFIG)
### Perform any omniverse imports here after the helper loads ###
kit.play() # Start simulation
kit.update(1.0 / 60.0) # Render a single frame
kit.stop() # Stop Simulation
kit.shutdown() # Cleanup application
| 1,000 | Python | 34.749999 | 76 | 0.727 |
KazWong/omniverse_sample/ov_sample/python_samples/dofbot/online_training/dofbot_cube_detection.py | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Dofbot Cube Detection Demonstration
Use a PyTorch dataloader together with OmniKit to generate scenes and groundtruth to
train a [MobileNetV3](https://arxiv.org/abs/1905.02244) model.
"""
import torch
from torch.utils.data import DataLoader
import torchvision
import matplotlib.pyplot as plt
import numpy as np
import signal
from dofbot_dataset import RandomObjects
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
def main(args):
device = "cuda"
# Setup data
train_set = RandomObjects()
train_loader = DataLoader(train_set, batch_size=2, collate_fn=lambda x: tuple(zip(*x)))
def handle_exit(self, *args, **kwargs):
print("exiting cube detection dataset generation...")
train_set.exiting = True
signal.signal(signal.SIGINT, handle_exit)
from omni.isaac.synthetic_utils import visualization as vis
# Setup Model
if args.eval_model == "":
model = torchvision.models.detection.fasterrcnn_mobilenet_v3_large_fpn(
pretrained=False, num_classes=1 + len(args.categories)
)
model = model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
else:
model = torch.load(args.eval_model)
if args.visualize:
plt.ion()
fig, axes = plt.subplots(1, 2, figsize=(14, 7))
for i, train_batch in enumerate(train_loader):
if i > args.max_iters or train_set.exiting:
print("Exiting ...")
train_set.kit.shutdown()
break
if args.eval_model == "":
model.train()
images, targets = train_batch
images = [i.to(device) for i in images]
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
if args.eval_model == "":
loss_dict = model(images, targets)
loss = sum(loss for loss in loss_dict.values())
print(f"ITER {i} | {loss:.6f}")
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % 10 == 0:
model.eval()
with torch.no_grad():
predictions = model(images[:1])
if args.visualize:
idx = 0
score_thresh = 0.5
pred = predictions[idx]
np_image = images[idx].permute(1, 2, 0).cpu().numpy()
for ax in axes:
if args.eval_model == "":
fig.suptitle(f"Iteration {i:05} \n {loss:.6f}", fontsize=14)
else:
fig.suptitle(f"Iteration {i:05} \n Evaluating", fontsize=14)
ax.cla()
ax.axis("off")
ax.imshow(np_image)
axes[0].set_title("Input")
axes[1].set_title("Input + Predictions")
score_filter = [i for i in range(len(pred["scores"])) if pred["scores"][i] > score_thresh]
num_instances = len(score_filter)
colours = vis.random_colours(num_instances, enable_random=False)
mapping = {i + 1: cat for i, cat in enumerate(args.categories)}
labels = [mapping[label.item()] for label in pred["labels"]]
vis.plot_boxes(ax, pred["boxes"].tolist(), labels=labels, colours=colours, label_size=10)
if not labels:
axes[1].set_title("None")
plt.draw()
plt.savefig("train.png")
# save every 100 steps
if i % 100 == 0 and args.eval_model == "":
torch.save(model, "cube_model_" + str(i) + ".pth")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser("Dataset test")
parser.add_argument("-lr", "--learning_rate", type=float, default=1e-4, help="Learning rate")
parser.add_argument("--max-iters", type=float, default=1000, help="Number of training iterations.")
parser.add_argument("--visualize", action="store_true", help="Visualize predicted bounding boxes during training.")
parser.add_argument("--eval_model", help="model file to evaluate", default="", type=str)
args = parser.parse_args()
# Temporary
args.visualize = True
args.categories = ["None", "Cube", "Sphere", "Cone"]
main(args)
| 4,774 | Python | 34.110294 | 119 | 0.591537 |
KazWong/omniverse_sample/ov_sample/python_samples/dofbot/online_training/dofbot_dataset.py | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Cube Dataset with online randomized scene generation for Bounding Box Detection training.
Use OmniKit to generate a simple scene. At each iteration, the scene is populated by
creating a cube that rests on a plane. The cube pose, colours and textures are randomized.
The camera position is also randomized within a range expected for the Dofbot's POV
before capturing groundtruth consisting of an RGB rendered image, and Tight 2D Bounding Boxes
"""
from math import floor
import os
import torch
import random
import numpy as np
import signal
import omni
import carb
from omni.isaac.python_app import OmniKitHelper
# Setup default generation variables
# Value are (min, max) ranges
OBJ_TRANSLATION_X = (-60.0, 60.0)
OBJ_TRANSLATION_Z = (-60.0, 60.0)
OBJ_ROTATION_Y = (0.0, 360.0)
LIGHT_INTENSITY = (500.0, 50000.0)
# Camera POV generation variables
AZIMUTH_ROTATION = (-30.0, 30.0)
ELEVATION_ROTATION = (-70.0, -20.0)
CAM_TRANSLATION_XYZ = (-50.0, 50.0)
OBJECT_SCALE = (15, 20)
CAMERA_DISTANCE = 800
BBOX_AREA_THRESH = 16
BLANK_SCENES = (5, 8) # between 5-8%
# Default rendering parameters
RENDER_CONFIG = {
"renderer": "PathTracing",
"samples_per_pixel_per_frame": 12,
"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit',
"headless": False,
"width": 640,
"height": 480,
}
class RandomObjects(torch.utils.data.IterableDataset):
"""Dataset of cube + distractor objects - domain randomize position/colour/texture/lighting/camera angle
The RGB image and BoundingBox are captured by moving a camera aimed at the centre of the scene
which is positioned at random but at a fixed distance from the centre.
"""
def __init__(
self, categories=["None", "Cube", "Sphere", "Cone"], num_assets_min=1, num_assets_max=3, split=0.7, train=True
):
assert len(categories) > 1
assert (split > 0) and (split <= 1.0)
self.kit = OmniKitHelper(config=RENDER_CONFIG)
self.stage = self.kit.get_stage()
from omni.isaac.synthetic_utils import SyntheticDataHelper
from omni.isaac.synthetic_utils import DomainRandomization
self.sd_helper = SyntheticDataHelper()
self.dr_helper = DomainRandomization()
self.dr_helper.toggle_manual_mode()
from omni.isaac.utils.scripts.nucleus_utils import find_nucleus_server
result, nucleus_server = find_nucleus_server()
if result is False:
carb.log_error(
"Could not find nucleus server with /Isaac folder. Please specify the correct nucleus server in apps/omni.isaac.sim.python.kit"
)
return
result, nucleus_server = find_nucleus_server("/Library/Props/Road_Tiles/Parts/")
if result is False:
carb.log_error(
"Could not find nucleus server with /Library/Props/Road_Tiles/Parts/ folder. Please refer to the documentation to aquire the road tile assets"
)
return
self.categories = categories
self.range_num_assets = (num_assets_min, num_assets_max)
self.asset_path = nucleus_server + "/Isaac"
self._setup_world()
self.cur_idx = 0
self.empty_idx = floor(100 / random.uniform(*BLANK_SCENES))
self.exiting = False
signal.signal(signal.SIGINT, self._handle_exit)
# turn this on to fix the PathTracing + Play (needed for overlap test) producing line artifacts
carb.settings.get_settings().set_bool("/rtx/resetPtAccumOnAnimTimeChange", True)
def _handle_exit(self, *args, **kwargs):
print("exiting dataset generation...")
self.exiting = True
def _setup_world(self):
from pxr import Sdf, UsdGeom, Gf, UsdPhysics, PhysxSchema
# Create physics scene for collision testing
scene = UsdPhysics.Scene.Define(self.stage, Sdf.Path("/World/physicsScene"))
scene.CreateGravityDirectionAttr().Set(Gf.Vec3f(0.0, 0.0, -1.0))
scene.CreateGravityMagnitudeAttr().Set(981.0)
# Set physics scene to use cpu physics
PhysxSchema.PhysxSceneAPI.Apply(self.stage.GetPrimAtPath("/World/physicsScene"))
physxSceneAPI = PhysxSchema.PhysxSceneAPI.Get(self.stage, "/World/physicsScene")
physxSceneAPI.CreateEnableCCDAttr(True)
physxSceneAPI.CreateEnableStabilizationAttr(True)
physxSceneAPI.CreateEnableGPUDynamicsAttr(False)
physxSceneAPI.CreateBroadphaseTypeAttr("MBP")
physxSceneAPI.CreateSolverTypeAttr("TGS")
"""Setup lights, walls, floor, ceiling and camera"""
# Setup Room
self.kit.create_prim(
"/World/Room", "Sphere", attributes={"radius": 1e3, "primvars:displayColor": [(1.0, 1.0, 1.0)]}
)
# Setup ground plane
ground_scale = max(OBJECT_SCALE)
ground_prim = self.stage.DefinePrim("/World/Ground", "Cylinder")
UsdGeom.XformCommonAPI(ground_prim).SetScale((ground_scale, ground_scale, ground_scale))
UsdGeom.XformCommonAPI(ground_prim).SetTranslate((0.0, ground_scale * -0.5, 0.0))
UsdGeom.XformCommonAPI(ground_prim).SetRotate((90.0, 0.0, 0.0))
attributes = {"height": 1, "radius": 1e4, "primvars:displayColor": [(1.0, 1.0, 1.0)]}
for k, v in attributes.items():
ground_prim.GetAttribute(k).Set(v)
# Setup lights
self.kit.create_prim(
"/World/Light1",
"SphereLight",
translation=(-450, 350, 350),
attributes={"radius": 100, "intensity": 30000.0, "color": (0.0, 0.365, 0.848)},
)
self.kit.create_prim(
"/World/Light2",
"SphereLight",
translation=(450, 350, 350),
attributes={"radius": 100, "intensity": 30000.0, "color": (1.0, 0.278, 0.0)},
)
self.kit.create_prim("/World/Asset", "Xform")
# Setup camera
self.camera_rig = UsdGeom.Xformable(self.kit.create_prim("/World/CameraRig", "Xform"))
self.camera = self.kit.create_prim("/World/CameraRig/Camera", "Camera", translation=(0.0, 0.0, CAMERA_DISTANCE))
# Change azimuth angle
self.camera_rig.AddRotateYOp().Set(0)
# Change elevation angle
self.camera_rig.AddRotateXOp().Set(-40)
vpi = omni.kit.viewport.get_viewport_interface()
vpi.get_viewport_window().set_active_camera(str(self.camera.GetPath()))
self.viewport = omni.kit.viewport.get_default_viewport_window()
self.create_dr_comp()
self.kit.update()
def load_single_asset(self, prim_type, scale, i):
from omni.physx.scripts import utils
from pxr import Semantics, UsdGeom
overlapping = True
attempts = 0
max_attempts = 5 # after 5 placement attempts, move on
stage = self.kit.get_stage()
# Randomly generate transforms until a valid position is found
# (i.e. new object will not overlap with existing ones)
# print("attempting to spawn object...", end=" ")
while overlapping and attempts < max_attempts:
x = random.uniform(*OBJ_TRANSLATION_X)
y = scale # assumes bounding box of standard prim is 1 cubic unit
z = random.uniform(*OBJ_TRANSLATION_Z)
rot_y = random.uniform(*OBJ_ROTATION_Y)
# Validate this proposed transform
rot = carb.Float4(0.0, 0.0, 1.0, 0.0)
origin = carb.Float3(float(x), float(y), float(z))
extent = carb.Float3(float(scale), float(scale), float(scale))
overlapping = self.check_overlap(extent, origin, rot)
attempts += 1
if overlapping:
return None
# print("object spawned!")
# No overlap, define the prim and apply the transform
prim = stage.DefinePrim(f"/World/Asset/obj{i}", prim_type)
bound = UsdGeom.Mesh(prim).ComputeWorldBound(0.0, "default")
box_min_y = bound.GetBox().GetMin()[1] * scale
UsdGeom.XformCommonAPI(prim).SetScale((scale, scale, scale))
UsdGeom.XformCommonAPI(prim).SetTranslate((x, -box_min_y, z))
UsdGeom.XformCommonAPI(prim).SetRotate((0, rot_y, 0))
# Add semantic label based on prim type
sem = Semantics.SemanticsAPI.Apply(prim, "Semantics")
sem.CreateSemanticTypeAttr()
sem.CreateSemanticDataAttr()
sem.GetSemanticTypeAttr().Set("class")
sem.GetSemanticDataAttr().Set(prim_type)
# Add physics to the prim
utils.setCollider(prim, approximationShape="convexHull")
return prim
# OVERLAP --------------------------------------------
def report_hit(self, hit):
""" Existing object turns red if the proposed position would result in a collision
Note: use for troubleshooting, material randomization must be disabled for this to work
"""
from pxr import UsdGeom, Gf, Vt
hitColor = Vt.Vec3fArray([Gf.Vec3f(180.0 / 255.0, 16.0 / 255.0, 0.0)])
usdGeom = UsdGeom.Mesh.Get(self.stage, hit.rigid_body)
usdGeom.GetDisplayColorAttr().Set(hitColor)
return True
def check_overlap(self, extent, origin, rot):
from omni.physx import get_physx_scene_query_interface
numHits = get_physx_scene_query_interface().overlap_box(extent, origin, rot, self.report_hit, False)
return numHits > 0
# POPULATE AND RANDOMIZE -------------------------------
def create_dr_comp(self):
"""Creates DR components with various attributes.
The asset prims to randomize is an empty list for most components
since we get a new list of assets every iteration.
The asset list will be updated for each component in update_dr_comp()
"""
texture_list = [
self.asset_path + "/Samples/DR/Materials/Textures/checkered.png",
self.asset_path + "/Samples/DR/Materials/Textures/marble_tile.png",
self.asset_path + "/Samples/DR/Materials/Textures/picture_a.png",
self.asset_path + "/Samples/DR/Materials/Textures/picture_b.png",
self.asset_path + "/Samples/DR/Materials/Textures/textured_wall.png",
self.asset_path + "/Samples/DR/Materials/Textures/checkered_color.png",
]
material_list = [
self.asset_path + "/Samples/DR/Materials/checkered.mdl",
self.asset_path + "/Samples/DR/Materials/checkered_color.mdl",
self.asset_path + "/Samples/DR/Materials/marble_tile.mdl",
self.asset_path + "/Samples/DR/Materials/picture_a.mdl",
self.asset_path + "/Samples/DR/Materials/picture_b.mdl",
self.asset_path + "/Samples/DR/Materials/textured_wall.mdl",
]
light_list = ["World/Light1", "World/Light2"]
self.texture_comp = self.dr_helper.create_texture_comp([], True, texture_list)
self.color_comp = self.dr_helper.create_color_comp([])
self.material_comp = self.dr_helper.create_material_comp([], material_list)
self.movement_comp = self.dr_helper.create_movement_comp([])
self.rotation_comp = self.dr_helper.create_rotation_comp([])
self.scale_comp = self.dr_helper.create_scale_comp([], max_range=(50, 50, 50))
self.light_comp = self.dr_helper.create_light_comp(light_list)
self.visibility_comp = self.dr_helper.create_visibility_comp([])
def update_dr_comp(self, dr_comp):
"""Updates DR component with the asset prim paths that will be randomized"""
comp_prim_paths_target = dr_comp.GetPrimPathsRel()
comp_prim_paths_target.ClearTargets(True)
# Add targets for all objects in scene (cube + distractors)
for asset in self.assets:
comp_prim_paths_target.AddTarget(asset.GetPrimPath())
# Can also add target for ground plane
# comp_prim_paths_target.AddTarget("/World/Ground")
def populate_scene(self):
from omni.physx.scripts import utils
"""Clear the scene and populate it with assets."""
self.stage.RemovePrim("/World/Asset")
self.assets = []
# Start simulation so we can check overlaps before spawning
self.kit.play()
# After every (n = self.empty_idx) scenes, generate a blank scene
if (self.cur_idx % self.empty_idx) != 0:
# Add random number of objects
num_objects = random.randint(*self.range_num_assets)
for i in range(num_objects):
prim_type = random.choice(self.categories)
prim_scale = random.uniform(*OBJECT_SCALE)
new_asset = self.load_single_asset(prim_type, prim_scale, i)
# Make sure valid object was returned before appending
if new_asset:
self.assets.append(new_asset)
self.kit.update()
else:
print("Blank scene -------------------------------------------------------------")
self.stage.RemovePrim("/World/Asset")
self.assets = []
# Pick a new value for (n = self.empty_idx)
self.empty_idx = floor(100 / random.uniform(*BLANK_SCENES))
def randomize_camera(self):
"""Randomize the camera position."""
# Clear previous transforms
self.camera_rig.ClearXformOpOrder()
# Change azimuth angle
self.camera_rig.AddRotateYOp().Set(random.uniform(*AZIMUTH_ROTATION))
# Change elevation angle
self.camera_rig.AddRotateXOp().Set(random.uniform(*ELEVATION_ROTATION))
# Move camera position (translate)
translation_xyz = tuple(random.uniform(*CAM_TRANSLATION_XYZ) for _ in range(3))
self.camera_rig.AddTranslateOp().Set(translation_xyz)
def randomize_lighting(self):
self.stage.RemovePrim("/World/Light1")
intens = random.uniform(*LIGHT_INTENSITY)
self.kit.create_prim(
"/World/Light1",
"SphereLight",
translation=(-450, 350, 350),
attributes={"radius": 100, "intensity": intens, "color": (0.0, 0.365, 0.848)},
)
self.kit.update()
# ITERATION----------------------------------------------
def __iter__(self):
return self
def __next__(self):
print("next!------------------------------")
# Generate a new scene
self.populate_scene()
self.randomize_camera()
self.update_dr_comp(self.texture_comp)
self.dr_helper.randomize_once()
self.randomize_lighting()
# Step once and then wait for materials to load
self.kit.update()
print("waiting for materials to load...")
while self.kit.is_loading():
self.kit.update()
print("done")
self.kit.update()
# Collect Groundtruth
gt = self.sd_helper.get_groundtruth(["rgb", "boundingBox2DTight"], self.viewport)
# RGB
# Drop alpha channel
image = gt["rgb"][..., :3]
# Cast to tensor if numpy array
if isinstance(gt["rgb"], np.ndarray):
image = torch.tensor(image, dtype=torch.float, device="cuda")
# Normalize between 0. and 1. and change order to channel-first.
image = image.float() / 255.0
image = image.permute(2, 0, 1)
# Bounding Box
gt_bbox = gt["boundingBox2DTight"]
# Create mapping from categories to index
self.categories = ["None", "Cube", "Sphere", "Cone"]
mapping = {cat: i + 1 for i, cat in enumerate(self.categories)}
bboxes = torch.tensor(gt_bbox[["x_min", "y_min", "x_max", "y_max"]].tolist())
labels = torch.LongTensor([mapping[bb["semanticLabel"]] for bb in gt_bbox])
# If no objects present in view
if bboxes.nelement() == 0:
print("No object present in view")
target = {
"boxes": torch.zeros((0, 4), dtype=torch.float32),
"labels": torch.tensor([1], dtype=torch.int64),
"image_id": torch.LongTensor([self.cur_idx]),
"area": torch.tensor(0, dtype=torch.float32),
"iscrowd": torch.zeros((0,), dtype=torch.int64),
}
else:
# Calculate bounding box area for each area
areas = (bboxes[:, 2] - bboxes[:, 0]) * (bboxes[:, 3] - bboxes[:, 1])
# Identify invalid bounding boxes to filter final output
valid_areas = (areas > 0.0) * (areas < (image.shape[1] * image.shape[2]))
target = {
"boxes": bboxes[valid_areas],
"labels": labels[valid_areas],
"image_id": torch.LongTensor([self.cur_idx]),
"area": areas[valid_areas],
"iscrowd": torch.BoolTensor([False] * len(bboxes[valid_areas])), # Assume no crowds
}
self.cur_idx += 1
return image, target
if __name__ == "__main__":
"Typical usage"
import argparse
import matplotlib.pyplot as plt
dataset = RandomObjects()
from omni.isaac.synthetic_utils import visualization as vis
# Iterate through dataset and visualize the output
plt.ion()
_, axes = plt.subplots(1, 2, figsize=(10, 5))
plt.tight_layout()
count = 0
for image, target in dataset:
for ax in axes:
ax.clear()
ax.axis("off")
np_image = image.permute(1, 2, 0).cpu().numpy()
axes[0].imshow(np_image)
num_instances = len(target["boxes"])
colours = vis.random_colours(num_instances, enable_random=False)
categories = categories = ["None", "Cube", "Sphere", "Cone"]
mapping = {i + 1: cat for i, cat in enumerate(categories)}
labels = [mapping[label.item()] for label in target["labels"]]
vis.plot_boxes(ax, target["boxes"].tolist(), labels=labels, colours=colours)
plt.draw()
plt.savefig("dataset.png")
if dataset.exiting:
break
# cleanup
dataset.kit.stop()
dataset.kit.shutdown()
| 18,433 | Python | 40.147321 | 158 | 0.616015 |
KazWong/omniverse_sample/ov_sample/ros_samples/joint_control/ros_publisher.py | #!/usr/bin/env python
# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import rospy
from sensor_msgs.msg import JointState
import numpy as np
import time
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--mode", required=True, help="position: for joint position control. velocity for joint velocity control"
)
args = parser.parse_args()
rospy.init_node("test_rosbridge", anonymous=True)
pub = rospy.Publisher("/joint_command", JointState, queue_size=10)
joint_state = JointState()
joint_state.name = [
"panda_joint1",
"panda_joint2",
"panda_joint3",
"panda_joint4",
"panda_joint5",
"panda_joint6",
"panda_joint7",
"panda_finger_joint1",
"panda_finger_joint2",
]
num_joints = len(joint_state.name)
# make sure kit's editor is playing for receiving messages ##
if args.mode == "position":
joint_state.position = np.array([0.0] * num_joints)
default_joints = [0.0, -1.16, -0.0, -2.3, -0.0, 1.6, 1.1, 0.4, 0.4]
# limiting the movements to a smaller range (this is not the range of the robot, just the range of the movement
max_joints = np.array(default_joints) + 0.5
min_joints = np.array(default_joints) - 0.5
# position control the robot to wiggle around each joint
time_start = time.time()
rate = rospy.Rate(20)
while not rospy.is_shutdown():
joint_state.position = np.sin(time.time() - time_start) * (max_joints - min_joints) * 0.5 + default_joints
pub.publish(joint_state)
rate.sleep()
elif args.mode == "velocity":
rate = rospy.Rate(0.5)
joint_state.position = []
joint_state.velocity = np.array([-0.7] * num_joints)
while not rospy.is_shutdown():
pub.publish(joint_state)
rate.sleep()
joint_state.velocity = -joint_state.velocity
else:
print("control mode error")
| 2,235 | Python | 28.421052 | 115 | 0.689485 |
KazWong/omniverse_sample/ov_sample/ros_samples/isaac_moveit/scripts/panda_finger_joint2_publisher.py | #!/usr/bin/env python
# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#!/usr/bin/env python
import rospy
from sensor_msgs.msg import JointState
finger_joint2 = JointState()
finger_joint2.name = ["panda_finger_joint2"]
def joint_states_callback(message):
for i, name in enumerate(message.name):
if name == "panda_finger_joint1":
pos = message.position[i]
finger_joint2.position = [pos]
pub.publish(finger_joint2)
return
if __name__ == "__main__":
rospy.init_node("panda_finger_joint2_publisher")
pub = rospy.Publisher("/joint_command", JointState, queue_size=1)
rospy.Subscriber("/joint_command", JointState, joint_states_callback, queue_size=1)
rospy.spin()
| 1,116 | Python | 32.848484 | 87 | 0.72043 |
KazWong/omniverse_sample/ov_sample/ros_samples/navigation/carter_2dnav/map/carter_warehouse_navigation.yaml | image: carter_warehouse_navigation.png
resolution: 0.05
origin: [-10.325, -12.225, 0.0000]
negate: 0
occupied_thresh: 0.65
free_thresh: 0.196
| 142 | YAML | 19.428569 | 38 | 0.739437 |
KazWong/omniverse_sample/ov_sample/ros_samples/navigation/carter_2dnav/params/base_local_planner_params.yaml | TrajectoryPlannerROS:
holonomic_robot: false
max_vel_x: 0.5
min_vel_x: 0.1
max_vel_theta: 0.8
min_vel_theta: -0.8
min_in_place_vel_theta: 0.4
acc_lim_theta: 0.2
acc_lim_x: 0.5
acc_lim_y: 0.0
xy_goal_tolerance: 0.15
yaw_goal_tolerance: 0.05
occdist_scale: 1.0
escape_vel: -0.1
| 303 | YAML | 19.266665 | 29 | 0.646865 |
KazWong/omniverse_sample/ov_sample/ros_samples/navigation/carter_2dnav/params/costmap_common_params.yaml | obstacle_range: 100
raytrace_range: 3
robot_radius: 0.5
inflation_radius: 0.15
observation_sources: laser_scan_sensor
laser_scan_sensor: {sensor_frame: carter_lidar, data_type: LaserScan, topic: /scan, marking: true, clearing: true}
| 233 | YAML | 32.428567 | 114 | 0.772532 |
KazWong/omniverse_sample/ov_sample/ros_samples/navigation/carter_2dnav/params/global_costmap_params.yaml | global_costmap:
global_frame: map
robot_base_frame: base_link
update_frequency: 1.0
publish_frequency: 0.5
static_map: true
transform_tolerance: 0.5
| 161 | YAML | 19.249998 | 29 | 0.726708 |
KazWong/omniverse_sample/ov_sample/ros_samples/navigation/carter_2dnav/params/local_costmap_params.yaml | local_costmap:
global_frame: odom
robot_base_frame: base_link
update_frequency: 5.0
publish_frequency: 2.0
static_map: false
rolling_window: true
width: 7.0
height: 7.0
resolution: 0.1
transform_tolerance: 0.5
| 230 | YAML | 18.249998 | 29 | 0.704348 |
KazWong/omniverse_sample/ov_sample/ros_samples/teleport/ros_pose_client.py | #!/usr/bin/env python
# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from __future__ import print_function
from isaac_ros_messages.srv import IsaacPose, IsaacPoseRequest
from geometry_msgs.msg import Pose, Twist, Vector3
import rospy
import numpy as np
def send_pose_cube_client(new_pose):
rospy.wait_for_service("/teleport_pos")
try:
send_pose = rospy.ServiceProxy("/teleport_pos", IsaacPose)
send_pose(new_pose)
except rospy.ServiceException as e:
print("Service call failed: %s" % e)
def compose_pose(pos_vec, quat_vec):
obj_pose = Pose()
obj_pose.position.x = pos_vec[0]
obj_pose.position.y = pos_vec[1]
obj_pose.position.z = pos_vec[2]
obj_pose.orientation.w = quat_vec[0]
obj_pose.orientation.x = quat_vec[1]
obj_pose.orientation.y = quat_vec[2]
obj_pose.orientation.z = quat_vec[3]
return obj_pose
def compose_twist(lx, ly, lz, ax, ay, az):
obj_twist = Twist()
obj_twist.linear.x = lx
obj_twist.linear.y = ly
obj_twist.linear.z = lz
obj_twist.angular.x = ax
obj_twist.angular.y = ay
obj_twist.angular.z = az
return obj_twist
def compose_vec3(x, y, z):
obj_scale = Vector3()
obj_scale.x = x
obj_scale.y = y
obj_scale.z = z
return obj_scale
if __name__ == "__main__":
rospy.init_node("test_ros_teleport", anonymous=True)
new_isaac_pose_cube = IsaacPoseRequest()
new_isaac_pose_cube.names = ["/Cube"]
cube_pos_vec = np.array([0.0, 0.0, 0.0])
quat_vec = np.array([1, 0.0, 0.0, 0.0])
rate = rospy.Rate(1) # hz
while not rospy.is_shutdown():
# new random pose
cube_pos_vec = np.random.rand(3) * 0.1
cube_pose = compose_pose(cube_pos_vec, quat_vec)
new_isaac_pose_cube.poses = [cube_pose]
# publish
send_pose_cube_client(new_isaac_pose_cube)
rate.sleep()
| 2,264 | Python | 28.415584 | 76 | 0.661219 |
KazWong/omniverse_sample/script_window/read_robot_joint.py | from omni.isaac.dynamic_control import _dynamic_control
dc = _dynamic_control.acquire_dynamic_control_interface()
art = dc.get_articulation("/World/wheelbarrow")
dof_states = dc.get_articulation_dof_states(art, _dynamic_control.STATE_ALL)
#print(dof_states)
back_left = dc.find_articulation_dof(art, "wheel_left_joint")
back_right = dc.find_articulation_dof(art, "wheel_right_joint")
back_left_state = dc.get_dof_state(back_left)
back_right_state = dc.get_dof_state(back_right)
#print(back_left_state.pos)
#print(back_right_state.pos)
agv_base_link = dc.find_articulation_body(art, "agv_base_link")
base_footprint = dc.find_articulation_body(art, "base_footprint")
wheel_center_link = dc.find_articulation_body(art, "wheel_center_link")
agv_base_link_state = dc.get_rigid_body_angular_velocity(agv_base_link)
base_footprint_state = dc.get_rigid_body_angular_velocity(base_footprint)
wheel_center_link_state = dc.get_rigid_body_angular_velocity(wheel_center_link)
print(agv_base_link_state)
print(base_footprint_state)
print(wheel_center_link_state)
| 1,057 | Python | 35.482757 | 79 | 0.768212 |
KazWong/omniverse_sample/script_window/move_robot_joint.py | from omni.isaac.dynamic_control import _dynamic_control
dc = _dynamic_control.acquire_dynamic_control_interface()
art = dc.get_articulation("/World/soap_odom/odom/robot")
front_left = dc.find_articulation_dof(art, "wheel_front_left_joint")
front_right = dc.find_articulation_dof(art, "wheel_front_right_joint")
back_left = dc.find_articulation_dof(art, "wheel_back_left_joint")
back_right = dc.find_articulation_dof(art, "wheel_back_right_joint")
dc.wake_up_articulation(art)
dc.set_dof_velocity_target(front_left, -3.14)
dc.set_dof_velocity_target(front_right, 3.14)
dc.set_dof_velocity_target(back_left, -3.14)
dc.set_dof_velocity_target(back_right, 3.14)
| 662 | Python | 37.999998 | 70 | 0.758308 |
KazWong/omniverse_sample/script_window/check_collision.py | from omni.physx.scripts import utils
stage = omni.usd.get_context().get_stage()
omni.kit.commands.execute("CreateMeshPrimCommand", prim_type="Cube")
cube_prim = stage.GetPrimAtPath("/World/Cube")
UsdGeom.XformCommonAPI(cube_prim).SetTranslate((0,0,100.0))
utils.setRigidBody(cube_prim, "convexHull", False)
import carb
import omni.physx
from omni.physx import get_physx_scene_query_interface
counter = 0
# Defines a cubic region to check overlap with
extent = carb.Float3(200.0, 200.0, 200.0)
origin = carb.Float3(0.0, 0.0, 0.0)
rotation = carb.Float4(0.0, 0.0, 1.0, 0.0)
def report_hit(hit):
return True
while True:
# physX query to detect number of hits for a cubic region
numHits = get_physx_scene_query_interface().overlap_box(extent, origin, rotation, report_hit, False)
print(numHits)
# physX query to detect number of hits for a spherical region
# numHits = get_physx_scene_query_interface().overlap_sphere(radius, origin, self.report_hit, False)
if numHits > 0:
print("collide")
break
| 1,040 | Python | 30.545454 | 104 | 0.723077 |
KazWong/omniverse_sample/script_window/random_cube.py | from omni.physx.scripts import utils
stage = omni.usd.get_context().get_stage()
prim = stage.DefinePrim(f"/World/cube1", "Cube")
UsdGeom.XformCommonAPI(prim).SetTranslate([500.0, 2.0, 60.0])
UsdGeom.XformCommonAPI(prim).SetScale((50.0, 50.0, 50.0))
prim_path = stage.GetPrimAtPath(f"/World/cube1")
utils.setRigidBody(prim_path, "convexHull", False)
| 352 | Python | 31.090906 | 61 | 0.738636 |
KazWong/omniverse_sample/script_window/spawn_and_move.py | import carb
import omni
import omni.kit.app
import time
from pxr import UsdGeom, Gf, Sdf
from omni.isaac.utils.scripts.nucleus_utils import find_nucleus_server
#from omni.isaac.dynamic_control import _dynamic_control
from omni.physx.scripts import utils
from omni.isaac.dynamic_control import _dynamic_control
result, nucleus = find_nucleus_server()
stage = omni.usd.get_context().get_stage()
prefix = "/World/soap_odom"
prim_path = omni.usd.get_stage_next_free_path(stage, prefix, False)
print(prim_path)
robot_prim = stage.DefinePrim(prim_path, "Xform")
robot_prim.GetReferences().AddReference(nucleus + "/Library/Robots/Soap_0/soap_odom.usd")
omni.timeline.get_timeline_interface().play()
print("play")
dc = _dynamic_control.acquire_dynamic_control_interface()
art = dc.get_articulation("/World/soap_odom/odom/robot")
front_left = dc.find_articulation_dof(art, "wheel_front_left_joint")
front_right = dc.find_articulation_dof(art, "wheel_front_right_joint")
back_left = dc.find_articulation_dof(art, "wheel_back_left_joint")
back_right = dc.find_articulation_dof(art, "wheel_back_right_joint")
dc.wake_up_articulation(art)
app = omni.kit.app.get_app()
while not app.is_running():
time.sleep(1.0)
print("running")
dc.set_dof_velocity_target(front_left, -3.14)
dc.set_dof_velocity_target(front_right, 3.14)
dc.set_dof_velocity_target(back_left, -3.14)
dc.set_dof_velocity_target(back_right, 3.14)
while not app.is_running():
app.update()
#omni.timeline.get_timeline_interface().stop()
| 1,504 | Python | 27.396226 | 89 | 0.753324 |
KazWong/omniverse_sample/script_window/teleport_cube_on_run.py | from omni.isaac.dynamic_control import _dynamic_control
dc = _dynamic_control.acquire_dynamic_control_interface()
cube = dc.get_rigid_body(f"/World/cube1")
dc.wake_up_rigid_body(cube)
tf = _dynamic_control.Transform( (250.0, 250.0, 500.0), (0.0, 0.0, 0.0, 1.0))
dc.set_rigid_body_pose(cube, tf)
| 298 | Python | 28.899997 | 77 | 0.711409 |
KazWong/omniverse_sample/load_env_robot/test_env.py | import numpy as np
class Env_config:
def __init__(self, omni, kit):
self.usd_path = "omniverse://localhost/Library/Robots/config_robot/robot_event_cam.usd"
self.kit = kit
self.omni = omni
def create_objects(self, cube_num, cylinder_num, sphere_num):
from pxr import UsdGeom, Gf, PhysxSchema, UsdPhysics, Sdf, PhysicsSchemaTools
from omni.physx.scripts import utils
TRANSLATION_RANGE = 500.0
object_list = []
# create cube
stage = self.kit.get_stage()
for num in range(cube_num):
# create first cube
result, path = self.omni.kit.commands.execute("CreateMeshPrimCommand", prim_type="Cube")
if num == 0:
object_list.append("/World/Cube")
continue
if num < 10:
object_list.append("/World/Cube_0"+str(num))
else:
object_list.append("/World/Cube_"+str(num))
# create cylinder
for num in range(cylinder_num):
# create first cylinder
result, path = self.omni.kit.commands.execute("CreateMeshPrimCommand", prim_type="Cylinder")
if num == 0:
object_list.append("/World/Cylinder")
continue
if num < 10:
object_list.append("/World/Cylinder_0"+str(num))
else:
object_list.append("/World/Cylinder_"+str(num))
# create sphere
for num in range(sphere_num):
# create first sphere
result, path = self.omni.kit.commands.execute("CreateMeshPrimCommand", prim_type="Sphere")
if num == 0:
object_list.append("/World/Sphere")
continue
if num < 10:
object_list.append("/World/Sphere_0"+str(num))
else:
object_list.append("/World/Sphere_"+str(num))
for mesh in object_list:
translation = np.random.rand(3) * TRANSLATION_RANGE
translation[2] = 40.0
cube_prim = stage.GetPrimAtPath(mesh)
UsdGeom.XformCommonAPI(cube_prim).SetTranslate(translation.tolist())
#UsdGeom.XformCommonAPI(cube_prim).SetRotate((0.0, 0.0, 0.0))
#UsdGeom.XformCommonAPI(cube_prim).SetScale((30.0, 30.0, 30.0))
utils.setRigidBody(cube_prim, "convexHull", False)
utils.setCollider(cube_prim, approximationShape="convexHull")
return object_list
def domain_randomization_test(self, target_list):
import omni.isaac.dr as dr
dr_interface = dr._dr.acquire_dr_interface()
asset_path = "omniverse://localhost/Isaac"
# List of textures to randomize from
texture_list = [
asset_path + "/Samples/DR/Materials/Textures/checkered.png",
asset_path + "/Samples/DR/Materials/Textures/marble_tile.png",
asset_path + "/Samples/DR/Materials/Textures/picture_a.png",
asset_path + "/Samples/DR/Materials/Textures/picture_b.png",
asset_path + "/Samples/DR/Materials/Textures/textured_wall.png",
asset_path + "/Samples/DR/Materials/Textures/checkered_color.png",
]
# domain randomization on position
result, prim = self.omni.kit.commands.execute(
'CreateMovementComponentCommand',
path='/World/movement_component',
prim_paths=target_list,
min_range=(-600.0, -600.0, 50.0),
max_range=(600.0, 600.0, 50.0),
target_position=None,
target_paths=None,
duration=1,
include_children=False,
seed=12345)
# domain randomization on textures
#result, prim = self.omni.kit.commands.execute(
# "CreateTextureComponentCommand",
# path='/World/texture_component',
# prim_paths=target_list,
# enable_project_uvw=False,
# texture_list=texture_list,
# duration=1,
# include_children=False,
# seed=12345)
# domain randomization on scale
result, prim = self.omni.kit.commands.execute(
'CreateScaleComponentCommand',
path='/World/scale_component',
prim_paths=target_list,
min_range=(0.5, 0.5, 1),
max_range=(2.0, 2.0, 1),
uniform_scaling=False,
duration=1,
include_children=False,
seed=12345)
dr_interface.toggle_manual_mode()
| 4,631 | Python | 39.278261 | 104 | 0.556467 |
KazWong/omniverse_sample/load_env_robot/load_env_robot.py | import numpy as np
import random
import os
import sys
import signal
import argparse
from argparse import Namespace
from omni.isaac.python_app import OmniKitHelper
def Run(args):
startup_config = {
"renderer": "RayTracedLighting",
"headless": args.headless,
"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit',
}
kit = OmniKitHelper(startup_config)
#include after kit
import carb
import omni
import omni.kit.app
from pxr import UsdGeom, Gf, Sdf
from omni.isaac.utils.scripts.nucleus_utils import find_nucleus_server
#from omni.isaac.dynamic_control import _dynamic_control
from omni.physx.scripts import utils
result, nucleus = find_nucleus_server()
if result is False:
carb.log_error("Could not find nucleus server, exiting")
exit()
# enable extension
ext_manager = omni.kit.app.get_app().get_extension_manager()
ext_manager.set_extension_enabled_immediate("omni.isaac.ros_bridge", True)
ext_manager.set_extension_enabled_immediate("omni.kit.window.stage", True)
#load environment
env_path = nucleus + args.env_path
print(env_path)
omni.usd.get_context().open_stage(env_path, None)
#random 10 objects
stage = kit.get_stage()
TRANSLATION_RANGE = 1000.0
SCALE = 30.0
for i in range(10):
prim_type = random.choice(["Cube", "Sphere", "Cylinder"])
prim = stage.DefinePrim(f"/World/cube{i}", prim_type)
translation = np.random.rand(3) * TRANSLATION_RANGE
translation[2] = 40.0
UsdGeom.XformCommonAPI(prim).SetTranslate(translation.tolist())
UsdGeom.XformCommonAPI(prim).SetScale((SCALE, SCALE, SCALE))
#prim.GetAttribute("primvars:displayColor").Set([np.random.rand(3).tolist()])
prim_path = stage.GetPrimAtPath(f"/World/cube{i}")
utils.setRigidBody(prim_path, "convexHull", False)
#load robot
translation = np.random.rand(3) * TRANSLATION_RANGE
angle = np.random.rand(1)
prefix = "/World/soap_odom"
prim_path = omni.usd.get_stage_next_free_path(stage, prefix, False)
print(prim_path)
robot_prim = stage.DefinePrim(prim_path, "Xform")
robot_prim.GetReferences().AddReference(args.robo_path)
xform = UsdGeom.Xformable(robot_prim)
xform_op = xform.AddXformOp(UsdGeom.XformOp.TypeTransform, UsdGeom.XformOp.PrecisionDouble, "")
mat = Gf.Matrix4d().SetTranslate(translation.tolist())
mat.SetRotateOnly(Gf.Rotation(Gf.Vec3d(0, 0, 1), (angle[0])))
xform_op.Set(mat)
kit.app.update()
kit.app.update()
print("Loading stage...")
while kit.is_loading():
kit.update(1.0 / 60.0)
print("Loading Complete")
omni.kit.commands.execute(
"ChangeProperty", prop_path=Sdf.Path("/World/soap_odom/odom/robot/agv_lidar/ROS_Lidar.enabled"), value=True, prev=None)
omni.kit.commands.execute(
"ChangeProperty", prop_path=Sdf.Path("/World/soap_odom/odom/robot/ROS_PoseTree.enabled"), value=True, prev=None)
omni.kit.commands.execute(
"ChangeProperty", prop_path=Sdf.Path("/World/soap_odom/odom/robot/ROS_JointState.enabled"), value=True, prev=None)
omni.kit.commands.execute("ChangeProperty", prop_path=Sdf.Path("/World/ROS_Clock.enabled"), value=True, prev=None)
kit.play()
while kit.app.is_running():
# Run in realtime mode, we don't specify the step size
kit.update()
kit.stop()
kit.shutdown()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--headless", help="run in headless mode (no GUI)", action="store_true")
parser.add_argument("--env_path", type=str, help="Path to environment usd file", required=True)
parser.add_argument("--robo_path", type=str, help="Path to robot usd file", required=True)
args = parser.parse_args()
print("running with args: ", args)
def handle_exit(*args, **kwargs):
print("Exiting...")
quit()
signal.signal(signal.SIGINT, handle_exit)
Run(args)
| 3,768 | Python | 30.672269 | 127 | 0.718684 |
KazWong/omniverse_sample/load_env_robot/load_env_robot_edited.py | import numpy as np
import random
import os
import sys
import signal
import argparse
from argparse import Namespace
from test_env import Env_config
from test_robot import Robot_config
from omni.isaac.python_app import OmniKitHelper
def Run(args):
startup_config = {
"renderer": "RayTracedLighting",
"headless": args.headless,
"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit',
}
kit = OmniKitHelper(startup_config)
#include after kit
import carb
import omni
import omni.kit.app
from pxr import UsdGeom, Gf, Sdf, UsdPhysics
from omni.isaac.utils.scripts.nucleus_utils import find_nucleus_server
#from omni.isaac.dynamic_control import _dynamic_control
from omni.physx.scripts import utils
result, nucleus = find_nucleus_server()
if result is False:
carb.log_error("Could not find nucleus server, exiting")
exit()
# enable extension
ext_manager = omni.kit.app.get_app().get_extension_manager()
ext_manager.set_extension_enabled_immediate("omni.isaac.ros_bridge", True)
ext_manager.set_extension_enabled_immediate("omni.kit.window.stage", True)
#load environment
env_path = nucleus + args.env_path
print(env_path)
omni.usd.get_context().open_stage(env_path, None)
test_env = Env_config(omni,kit)
# create objects
obj_list = test_env.create_objects(4,4,4)
import omni.isaac.dr as dr
dr_interface = dr._dr.acquire_dr_interface()
#print(obj_list)
# domain randomization
test_env.domain_randomization_test(obj_list)
# load robot
stage = kit.get_stage()
TRANSLATION_RANGE = 1000.0
translation = np.random.rand(3) * TRANSLATION_RANGE
angle = np.random.rand(1)
prefix = "/World/soap_odom"
prim_path = omni.usd.get_stage_next_free_path(stage, prefix, False)
print(prim_path)
robot_prim = stage.DefinePrim(prim_path, "Xform")
robot_prim.GetReferences().AddReference(args.robo_path)
xform = UsdGeom.Xformable(robot_prim)
xform_op = xform.AddXformOp(UsdGeom.XformOp.TypeTransform, UsdGeom.XformOp.PrecisionDouble, "")
mat = Gf.Matrix4d().SetTranslate(translation.tolist())
mat.SetRotateOnly(Gf.Rotation(Gf.Vec3d(0, 0, 1), (angle[0])))
xform_op.Set(mat)
DRIVE_STIFFNESS = 10000.0
# Set joint drive parameters
wheel_back_left_joint = UsdPhysics.DriveAPI.Apply(stage.GetPrimAtPath(f"{prim_path}/agv_base_link/wheel_back_left_joint"), "angular")
wheel_back_left_joint.GetDampingAttr().Set(DRIVE_STIFFNESS)
wheel_back_right_joint = UsdPhysics.DriveAPI.Apply(stage.GetPrimAtPath(f"{prim_path}/agv_base_link/wheel_back_right_joint"), "angular")
wheel_back_right_joint.GetDampingAttr().Set(DRIVE_STIFFNESS)
wheel_front_left_joint = UsdPhysics.DriveAPI.Apply(stage.GetPrimAtPath(f"{prim_path}/agv_base_link/wheel_front_left_joint"), "angular")
wheel_front_left_joint.GetDampingAttr().Set(DRIVE_STIFFNESS)
wheel_front_right_joint = UsdPhysics.DriveAPI.Apply(stage.GetPrimAtPath(f"{prim_path}/agv_base_link/wheel_front_right_joint"), "angular")
wheel_front_right_joint.GetDampingAttr().Set(DRIVE_STIFFNESS)
kit.app.update()
kit.app.update()
print("Loading stage...")
while kit.is_loading():
kit.update(1.0 / 60.0)
print("Loading Complete")
#from omni.isaac import RosBridgeSchema
#omni.kit.commands.execute('ROSBridgeCreatePoseTree', path='/World/soap_odom/ROS_PoseTree', parent=None)
#omni.kit.commands.execute("ChangeProperty", prop_path=Sdf.Path("/World/soap_odom/ROS_PoseTree.enabled"), value=True, prev=None)
#omni.kit.commands.execute('RosBridgeCreatePrim', path='/ROS_PoseTree', parent=None, enabled=True, scehma_type=<class 'omni.isaac.RosBridgeSchema.RosPoseTree'>)
# add ros joint state
#omni.kit.commands.execute('ROSBridgeCreateJointState', path='/World/soap_odom/ROS_JointState', parent=None)
#omni.kit.commands.execute("ChangeProperty", prop_path=Sdf.Path("/World/soap_odom/ROS_JointState.enabled"), value=True, prev=None)
#omni.kit.commands.execute('RosBridgeCreatePrim', path='/World/soap_odom/ROS_JointState', parent=None, enabled=True, scehma_type=<class 'omni.isaac.RosBridgeSchema.RosJointState'>)
# add ros lidar
#omni.kit.commands.execute('ROSBridgeCreateLidar', path='/World/soap_odom/agv_lidar/ROS_Lidar', parent=None)
#omni.kit.commands.execute('RosBridgeCreatePrim', path='/World/soap_odom/agv_lidar/ROS_Lidar', parent=None, enabled=True, scehma_type=<class 'omni.isaac.RosBridgeSchema.RosLidar'>)
# add ros clock
omni.kit.commands.execute('ROSBridgeCreateClock',path='/ROS_Clock',parent=None)
omni.kit.commands.execute('ChangeProperty', prop_path=Sdf.Path('/World/ROS_Clock.queueSize'), value=0, prev=10)
#omni.kit.commands.execute("ChangeProperty", prop_path=Sdf.Path("/World/soap_odom/agv_lidar/ROS_Lidar.enabled"), value=True, prev=None)
#omni.kit.commands.execute("ChangeProperty", prop_path=Sdf.Path("/World/soap_odom/ROS_PoseTree.enabled"), value=True, prev=None)
#omni.kit.commands.execute("ChangeProperty", prop_path=Sdf.Path("/World/soap_odom/ROS_JointState.enabled"), value=True, prev=None)
#omni.kit.commands.execute("ChangeProperty", prop_path=Sdf.Path("/World/ROS_Clock.enabled"), value=True, prev=None)
kit.play()
test_rob = Robot_config(stage, omni, robot_prim)
# initial robot
test_rob.teleport((0,0,30), 0)
while kit.app.is_running():
# Run in realtime mode, we don't specify the step size
if test_rob.check_overlap_box() == True:
# # reset robot to origin
print("colide!!, reset robot")
test_rob.teleport((0,0,30), 0)
dr_interface.randomize_once()
#test_rob.check_overlap_box()
kit.update()
kit.stop()
kit.shutdown()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--headless", help="run in headless mode (no GUI)", action="store_true")
parser.add_argument("--env_path", type=str, help="Path to environment usd file", required=True)
parser.add_argument("--robo_path", type=str, help="Path to robot usd file", required=True)
args = parser.parse_args()
print("running with args: ", args)
def handle_exit(*args, **kwargs):
print("Exiting...")
quit()
signal.signal(signal.SIGINT, handle_exit)
Run(args)
| 6,073 | Python | 40.602739 | 184 | 0.739009 |
KazWong/omniverse_sample/load_env_robot/test_robot.py | import numpy as np
class Robot_config:
def __init__(self, stage, omni, robot_prim):
self.usd_path = "omniverse://localhost/Library/Robots/config_robot/robot_event_cam.usd"
self.omni = omni
from omni.isaac.dynamic_control import _dynamic_control
self.dc = _dynamic_control.acquire_dynamic_control_interface()
self.omni = omni
self.robot_prim = robot_prim
self.ar = None
self.stage = stage
def teleport(self, location, rotation, settle=False):
from pxr import Gf
from omni.isaac.dynamic_control import _dynamic_control
print("before teleport", self.ar)
#if self.ar is None:
print(type(self.robot_prim.GetPath().pathString), self.robot_prim.GetPath().pathString)
self.ar = self.dc.get_articulation(self.robot_prim.GetPath().pathString)
print("after teleport", self.ar)
chassis = self.dc.get_articulation_root_body(self.ar)
self.dc.wake_up_articulation(self.ar)
rot_quat = Gf.Rotation(Gf.Vec3d(0, 0, 1), rotation).GetQuaternion()
tf = _dynamic_control.Transform(
location,
(rot_quat.GetImaginary()[0], rot_quat.GetImaginary()[1], rot_quat.GetImaginary()[2], rot_quat.GetReal()),
)
self.dc.set_rigid_body_pose(chassis, tf)
self.dc.set_rigid_body_linear_velocity(chassis, [0, 0, 0])
self.dc.set_rigid_body_angular_velocity(chassis, [0, 0, 0])
self.command((-20, 20, -20, 20))
# Settle the robot onto the ground
if settle:
frame = 0
velocity = 1
while velocity > 0.1 and frame < 120:
self.omni.usd.get_context().update(1.0 / 60.0)
lin_vel = self.dc.get_rigid_body_linear_velocity(chassis)
velocity = np.linalg.norm([lin_vel.x, lin_vel.y, lin_vel.z])
frame = frame + 1
def command(self,motor_value):
chassis = self.dc.get_articulation_root_body(self.ar)
#num_joints = self.dc.get_articulation_joint_count(self.ar)
#num_dofs = self.dc.get_articulation_dof_count(self.ar)
#num_bodies = self.dc.get_articulation_body_count(self.ar)
wheel_back_left = self.dc.find_articulation_dof(self.ar, "wheel_back_left_joint")
wheel_back_right = self.dc.find_articulation_dof(self.ar, "wheel_back_right_joint")
wheel_front_left = self.dc.find_articulation_dof(self.ar, "wheel_front_left_joint")
wheel_front_right = self.dc.find_articulation_dof(self.ar, "wheel_front_right_joint")
self.dc.wake_up_articulation(self.ar)
wheel_back_left_speed = self.wheel_speed_from_motor_value(motor_value[0])
wheel_back_right_speed = self.wheel_speed_from_motor_value(motor_value[1])
wheel_front_left_speed = self.wheel_speed_from_motor_value(motor_value[2])
wheel_front_right_speed = self.wheel_speed_from_motor_value(motor_value[3])
self.dc.set_dof_velocity_target(wheel_back_left, np.clip(wheel_back_left_speed, -10, 10))
self.dc.set_dof_velocity_target(wheel_back_right, np.clip(wheel_back_right_speed, -10, 10))
self.dc.set_dof_velocity_target(wheel_front_left, np.clip(wheel_front_left_speed, -10, 10))
self.dc.set_dof_velocity_target(wheel_front_right, np.clip(wheel_front_right_speed, -10, 10))
# idealized motor model
def wheel_speed_from_motor_value(self, motor_input):
print("speed is ",motor_input)
return motor_input
def check_overlap_box(self):
# Defines a cubic region to check overlap with
import omni.physx
from omni.physx import get_physx_scene_query_interface
import carb
#print("*"*50)
chassis = self.dc.get_articulation_root_body(self.ar)
robot_base_pose = self.dc.get_rigid_body_pose(chassis)
#print("chassis is ", chassis)
#print("pose is ", robot_base_pose)
print("pose is ", robot_base_pose.p)
#print("*"*50)
extent = carb.Float3(38.0, 26.0, 5.0)
# origin = carb.Float3(0.0, 0.0, 0.0)
origin = robot_base_pose.p
rotation = carb.Float4(0.0, 0.0, 1.0, 0.0)
# physX query to detect number of hits for a cubic region
numHits = get_physx_scene_query_interface().overlap_box(extent, origin, rotation, self.report_hit, False)
print("num of overlaps ", numHits)
# physX query to detect number of hits for a spherical region
# numHits = get_physx_scene_query_interface().overlap_sphere(radius, origin, self.report_hit, False)
#self.kit.update()
return numHits > 1
def report_hit(self, hit):
from pxr import UsdGeom, Gf, Vt
# When a collision is detected, the object colour changes to red.
# hitColor = Vt.Vec3fArray([Gf.Vec3f(180.0 / 255.0, 16.0 / 255.0, 0.0)])
# usdGeom = UsdGeom.Mesh.Get(self.stage, hit.rigid_body)
# usdGeom.GetDisplayColorAttr().Set(hitColor)
return True
| 5,081 | Python | 47.4 | 117 | 0.623696 |
KazWong/omniverse_sample/load_env_robot/README.md | # load_env_robot
This sample integral three functions:
1. Use OmniHelper to run omniverse
2. load environment
3. add rigid body and random position and orientation
4. load robot and enable ROS_bridge
5. random robot position and orientation
| 243 | Markdown | 23.399998 | 53 | 0.798354 |
KazWong/omniverse_sample/omnihelper/check_collision.py | import numpy as np
import random
import os
import sys
import signal
import argparse
from argparse import Namespace
from omni.isaac.python_app import OmniKitHelper
def check_overlap_box():
# Defines a cubic region to check overlap with
import omni.physx
from omni.physx import get_physx_scene_query_interface
import carb
#print("*"*50)
extent = carb.Float3(50.0, 50.0, 50.0)
origin = carb.Float3(0.0, 0.0, 0.0)
rotation = carb.Float4(0.0, 0.0, 1.0, 0.0)
# physX query to detect number of hits for a cubic region
numHits = get_physx_scene_query_interface().overlap_box(extent, origin, rotation, report_hit, False)
print("num of overlaps ", numHits)
# physX query to detect number of hits for a spherical region
# numHits = get_physx_scene_query_interface().overlap_sphere(radius, origin, self.report_hit, False)
#self.kit.update()
return numHits > 1
def report_hit(hit):
#from pxr import UsdGeom, Gf, Vt
#stage = kit.get_stage()
## When a collision is detected, the object colour changes to red.
#hitColor = Vt.Vec3fArray([Gf.Vec3f(180.0 / 255.0, 16.0 / 255.0, 0.0)])
#usdGeom = UsdGeom.Mesh.Get(stage, hit.rigid_body)
#usdGeom.GetDisplayColorAttr().Set(hitColor)
return True
def Run(args):
startup_config = {
"renderer": "RayTracedLighting",
"headless": args.headless,
"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit',
}
kit = OmniKitHelper(startup_config)
#include after kit
import carb
import omni
import omni.kit.app
from pxr import UsdGeom, Gf, Sdf, UsdPhysics
from omni.isaac.utils.scripts.nucleus_utils import find_nucleus_server
from omni.physx.scripts import utils
result, nucleus = find_nucleus_server()
if result is False:
carb.log_error("Could not find nucleus server, exiting")
exit()
ext_manager = omni.kit.app.get_app().get_extension_manager()
ext_manager.set_extension_enabled_immediate("omni.isaac.ros_bridge", True)
ext_manager.set_extension_enabled_immediate("omni.kit.window.stage", True)
env_path = nucleus + args.env_path
print(env_path)
omni.usd.get_context().open_stage(env_path, None)
stage = kit.get_stage()
omni.kit.commands.execute("CreateMeshPrimCommand", prim_type="Cube")
cube_prim = stage.GetPrimAtPath("/World/Cube")
UsdGeom.XformCommonAPI(cube_prim).SetTranslate((0,0,100))
utils.setRigidBody(cube_prim, "convexHull", False)
kit.app.update()
kit.app.update()
print("Loading stage...")
while kit.is_loading():
kit.update(1.0 / 60.0)
print("Loading Complete")
kit.play()
while kit.app.is_running():
# Run in realtime mode, we don't specify the step size
if check_overlap_box() == True:
# # reset robot to origin
print("colide!!")
kit.update()
kit.stop()
kit.shutdown()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--headless", help="run in headless mode (no GUI)", action="store_true")
parser.add_argument("--env_path", type=str, help="Path to environment usd file", required=True)
args = parser.parse_args()
Run(args)
| 3,098 | Python | 29.99 | 104 | 0.698515 |
Moetassem/OmniverseLegRotateExtension/README.md | # Extension Project Template
This project was automatically generated.
- "app" - It is a folder link to the location of your *Omniverse Kit* based app.
- "exts" - It is a folder where you can add new extensions. It was automatically added to extension search path. (Extension Manager -> Gear Icon -> Extension Search Path).
Open this folder using Visual Studio Code. It will suggest you to install few extensions that will make python experience better.
Look for "omni.gym.4LegRL" extension in extension manager and enable it. Try applying changes to any python files, it will hot-reload and you can observe results immediately.
# Sharing Your Extensions
This folder is ready to be pushed to any git repository. Once pushed direct link to a git repository can be added to *Omniverse Kit* extension search paths.
Link might look like this: `git://github.com/[user]/[your_repo].git?branch=main&dir=exts`
git://github.com/Moetassem/OmniverseLegRotateExtension.git?branch=master&dir=exts
Notice `exts` is repo subfolder with extensions. More information can be found in "Git URL as Extension Search Paths" section of developers manual.
To add a link to your *Omniverse Kit* based app go into: Extension Manager -> Gear Icon -> Extension Search Path
| 1,257 | Markdown | 53.69565 | 174 | 0.778839 |
Moetassem/OmniverseLegRotateExtension/exts/omni.gym.4LegRL/config/extension.toml | [package]
# Semantic Versionning is used: https://semver.org/
version = "1.0.0"
# The title and description fields are primarily for displaying extension info in UI
title = "Simple UI Extension Template"
description="The simplest python extension example. Use it as a starting point for your extensions."
# Path (relative to the root) or content of readme markdown file for UI.
readme = "docs/README.md"
# URL of the extension source repository.
repository = ""
# One of categories for UI.
category = "Example"
# Keywords for the extension
keywords = ["kit", "example"]
# Use omni.ui to build simple UI
[dependencies]
"omni.kit.uiapp" = {}
"omni.kit.pipapi" = {}
# [python.pipapi]
# requirements = ["gym"] # SWIPAT filed under: http://nvbugs/3193231
# Main python module this extension provides, it will be publicly available as "import omni.hello.world".
[[python.module]]
name = "omni.gym.4LegRL"
| 908 | TOML | 27.406249 | 105 | 0.731278 |
Moetassem/OmniverseLegRotateExtension/exts/omni.gym.4LegRL/omni/gym/4LegRL/Model.py | import omni.ui as ui
class Model(ui.SimpleFloatModel):
def __init__(self, stage, prim_path: str, axis: str):
axes = ['x', 'y', 'z']
if axis not in axes:
raise ValueError("Invalid sim type. Expected one of: %s" % axes)
self.stage = stage
self.prim_path = prim_path
self.axis = axis
self.axisIndex = axes.index(axis)
self.prim = self.stage.GetPrimAtPath(self.prim_path)
self.primRots = self.prim.GetAttribute('xformOp:rotateXYZ').Get()
super().__init__(self.primRots[self.axisIndex])
def getAxisIndex(self):
return self.axisIndex
def getPrimPath(self):
return self.prim_path
# def setPrimPath(self, new_prim_path: str):
# _value_changed()
# self.prim_path = new_prim_path
def _value_changed(self) -> None:
return super()._value_changed() | 884 | Python | 31.777777 | 76 | 0.602941 |
Moetassem/OmniverseLegRotateExtension/exts/omni.gym.4LegRL/omni/gym/4LegRL/extension.py | import omni.ext
import omni.usd
import omni.ui as ui
import omni.kit.commands
from pxr import Usd, Gf, Tf, Trace
import carb
import carb.events
from .Model import *
import traceback
import asyncio
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class MyExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def __init__(self):
self.context = None
self.stage = None
self.selection = None
self.xmodel = None
self.zmodel = None
# self.app = omni.kit.app.get_app()
# pass
def on_startup(self, ext_id):
print("[omni.gym.4LegRL] MyExtension startup")
self.context = omni.usd.get_context()
self.stage = self.context.get_stage()
self.selection = self.context.get_selection()
self.listener = Tf.Notice.Register(Usd.Notice.ObjectsChanged, self._on_change_event, self.stage)
self._window = ui.Window("My Window", width=300, height=300, dockPreference=ui.DockPreference.RIGHT_BOTTOM)
self.load_window()
def _on_selection_event(e: carb.events.IEvent):
if e.type==int(omni.usd.StageEventType.SELECTION_CHANGED):
self.load_window()
# print(f"Selection Changed!: %s \n" % str(self.context.get_selection().get_selected_prim_paths()))
self.selection_event_sub = (
self.context.get_stage_event_stream().create_subscription_to_pop(_on_selection_event, name="Selection")
)
def load_window(self):
if len(self.context.get_selection().get_selected_prim_paths()) > 1:
with self._window.frame:
with ui.VStack():
ui.Label("Select only one component")
elif len(self.context.get_selection().get_selected_prim_paths()) == 1:
self.primSelectedPath = self.context.get_selection().get_selected_prim_paths()[0]
self.xmodel = Model(self.stage, self.primSelectedPath, "x")
self.zmodel = Model(self.stage, self.primSelectedPath, "z")
def _on_slider_change(self):
if self.axisIndex == 0:
# await omni.kit.app.get_app().next_update_async()
omni.kit.commands.execute('ChangePropertyCommand',
prop_path=self.prim_path+'.xformOp:rotateXYZ',
value= Gf.Vec3d(self.get_value_as_float(),self.primRots[1],self.primRots[2]),
prev=self.primRots)
elif self.axisIndex == 2:
# await omni.kit.app.get_app().next_update_async()
omni.kit.commands.execute('ChangePropertyCommand',
prop_path=self.prim_path+'.xformOp:rotateXYZ',
value= Gf.Vec3d(self.primRots[0],self.primRots[1],self.get_value_as_float()),
prev=self.primRots)
self.xmodel.add_value_changed_fn(_on_slider_change)
self.zmodel.add_value_changed_fn(_on_slider_change)
with self._window.frame:
with ui.VStack():
ui.Label(f"Prim Selected: '{(self.primSelectedPath)}'")
with ui.HStack():
ui.Spacer(height=ui.Percent(10))
ui.FloatSlider(self.xmodel, min=-30, max=30)
ui.Spacer(height=ui.Percent(20))
ui.FloatSlider(self.zmodel, min=-30, max=30)
ui.Spacer(height=ui.Percent(20))
else:
with self._window.frame:
with ui.VStack():
ui.Label("Select a Prim first")
@Trace.TraceFunction
def _on_change_event(self, notice, stage):
# await omni.kit.app.get_app().next_update_async()
self.load_window()
def on_shutdown(self):
print("[omni.gym.4LegRL] MyExtension shutdown") | 4,517 | Python | 44.636363 | 119 | 0.552801 |
Moetassem/OmniverseLegRotateExtension/exts/omni.gym.4LegRL/omni/gym/4LegRL/__init__.py | from .extension import *
from .Model import * | 45 | Python | 21.999989 | 24 | 0.755556 |
xyang2013/kit-exts-command-library/README.md | # Extension Project Template
This project was automatically generated.
- `app` - It is a folder link to the location of your *Omniverse Kit* based app.
- `exts` - It is a folder where you can add new extensions. It was automatically added to extension search path. (Extension Manager -> Gear Icon -> Extension Search Path).
Open this folder using Visual Studio Code. It will suggest you to install few extensions that will make python experience better.
Look for "dli.example.command_library" extension in extension manager and enable it. Try applying changes to any python files, it will hot-reload and you can observe results immediately.
Alternatively, you can launch your app from console with this folder added to search path and your extension enabled, e.g.:
```
> app\omni.code.bat --ext-folder exts --enable company.hello.world
```
# App Link Setup
If `app` folder link doesn't exist or broken it can be created again. For better developer experience it is recommended to create a folder link named `app` to the *Omniverse Kit* app installed from *Omniverse Launcher*. Convenience script to use is included.
Run:
```
> link_app.bat
```
If successful you should see `app` folder link in the root of this repo.
If multiple Omniverse apps is installed script will select recommended one. Or you can explicitly pass an app:
```
> link_app.bat --app create
```
You can also just pass a path to create link to:
```
> link_app.bat --path "C:/Users/bob/AppData/Local/ov/pkg/create-2021.3.4"
```
# Sharing Your Extensions
This folder is ready to be pushed to any git repository. Once pushed direct link to a git repository can be added to *Omniverse Kit* extension search paths.
Link might look like this: `git://github.com/[user]/[your_repo].git?branch=main&dir=exts`
Notice `exts` is repo subfolder with extensions. More information can be found in "Git URL as Extension Search Paths" section of developers manual.
To add a link to your *Omniverse Kit* based app go into: Extension Manager -> Gear Icon -> Extension Search Path
| 2,051 | Markdown | 37.71698 | 258 | 0.757679 |
xyang2013/kit-exts-command-library/tools/scripts/link_app.py | import argparse
import json
import os
import sys
import packmanapi
import urllib3
def find_omniverse_apps():
http = urllib3.PoolManager()
try:
r = http.request("GET", "http://127.0.0.1:33480/components")
except Exception as e:
print(f"Failed retrieving apps from an Omniverse Launcher, maybe it is not installed?\nError: {e}")
sys.exit(1)
apps = {}
for x in json.loads(r.data.decode("utf-8")):
latest = x.get("installedVersions", {}).get("latest", "")
if latest:
for s in x.get("settings", []):
if s.get("version", "") == latest:
root = s.get("launch", {}).get("root", "")
apps[x["slug"]] = (x["name"], root)
break
return apps
def create_link(src, dst):
print(f"Creating a link '{src}' -> '{dst}'")
packmanapi.link(src, dst)
APP_PRIORITIES = ["code", "create", "view"]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create folder link to Kit App installed from Omniverse Launcher")
parser.add_argument(
"--path",
help="Path to Kit App installed from Omniverse Launcher, e.g.: 'C:/Users/bob/AppData/Local/ov/pkg/create-2021.3.4'",
required=False,
)
parser.add_argument(
"--app", help="Name of Kit App installed from Omniverse Launcher, e.g.: 'code', 'create'", required=False
)
args = parser.parse_args()
path = args.path
if not path:
print("Path is not specified, looking for Omniverse Apps...")
apps = find_omniverse_apps()
if len(apps) == 0:
print(
"Can't find any Omniverse Apps. Use Omniverse Launcher to install one. 'Code' is the recommended app for developers."
)
sys.exit(0)
print("\nFound following Omniverse Apps:")
for i, slug in enumerate(apps):
name, root = apps[slug]
print(f"{i}: {name} ({slug}) at: '{root}'")
if args.app:
selected_app = args.app.lower()
if selected_app not in apps:
choices = ", ".join(apps.keys())
print(f"Passed app: '{selected_app}' is not found. Specify one of the following found Apps: {choices}")
sys.exit(0)
else:
selected_app = next((x for x in APP_PRIORITIES if x in apps), None)
if not selected_app:
selected_app = next(iter(apps))
print(f"\nSelected app: {selected_app}")
_, path = apps[selected_app]
if not os.path.exists(path):
print(f"Provided path doesn't exist: {path}")
else:
SCRIPT_ROOT = os.path.dirname(os.path.realpath(__file__))
create_link(f"{SCRIPT_ROOT}/../../app", path)
print("Success!")
| 2,814 | Python | 32.117647 | 133 | 0.562189 |
xyang2013/kit-exts-command-library/tools/packman/config.packman.xml | <config remotes="cloudfront">
<remote2 name="cloudfront">
<transport actions="download" protocol="https" packageLocation="d4i3qtqj3r0z5.cloudfront.net/${name}@${version}" />
</remote2>
</config>
| 211 | XML | 34.333328 | 123 | 0.691943 |
xyang2013/kit-exts-command-library/tools/packman/bootstrap/install_package.py | # Copyright 2019 NVIDIA CORPORATION
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import shutil
import sys
import tempfile
import zipfile
__author__ = "hfannar"
logging.basicConfig(level=logging.WARNING, format="%(message)s")
logger = logging.getLogger("install_package")
class TemporaryDirectory:
def __init__(self):
self.path = None
def __enter__(self):
self.path = tempfile.mkdtemp()
return self.path
def __exit__(self, type, value, traceback):
# Remove temporary data created
shutil.rmtree(self.path)
def install_package(package_src_path, package_dst_path):
with zipfile.ZipFile(package_src_path, allowZip64=True) as zip_file, TemporaryDirectory() as temp_dir:
zip_file.extractall(temp_dir)
# Recursively copy (temp_dir will be automatically cleaned up on exit)
try:
# Recursive copy is needed because both package name and version folder could be missing in
# target directory:
shutil.copytree(temp_dir, package_dst_path)
except OSError as exc:
logger.warning("Directory %s already present, packaged installation aborted" % package_dst_path)
else:
logger.info("Package successfully installed to %s" % package_dst_path)
install_package(sys.argv[1], sys.argv[2])
| 1,844 | Python | 33.166666 | 108 | 0.703362 |
xyang2013/kit-exts-command-library/exts/dli.example.command_library/config/extension.toml | [package]
# Semantic Versioning is used: https://semver.org/
version = "1.0.0"
# Lists people or organizations that are considered the "authors" of the package.
authors = ["NVIDIA"]
# The title and description fields are primarily for displaying extension info in UI
title = "dli example command_library"
description="A simple python extension example to use as a starting point for your extensions."
# Path (relative to the root) or content of readme markdown file for UI.
readme = "docs/README.md"
# URL of the extension source repository.
repository = ""
# One of categories for UI.
category = "Example"
# Keywords for the extension
keywords = ["kit", "example"]
# Location of change log file in target (final) folder of extension, relative to the root.
# More info on writing changelog: https://keepachangelog.com/en/1.0.0/
changelog="docs/CHANGELOG.md"
# Preview image and icon. Folder named "data" automatically goes in git lfs (see .gitattributes file).
# Preview image is shown in "Overview" of Extensions window. Screenshot of an extension might be a good preview image.
preview_image = "data/preview.png"
# Icon is shown in Extensions window, it is recommended to be square, of size 256x256.
icon = "data/icon.png"
# Use omni.ui to build simple UI
[dependencies]
"omni.kit.uiapp" = {}
# Main python module this extension provides, it will be publicly available as "import dli.example.command_library".
[[python.module]]
name = "dli.example.command_library"
[[test]]
# Extra dependencies only to be used during test run
dependencies = [
"omni.kit.ui_test" # UI testing extension
]
| 1,607 | TOML | 32.499999 | 118 | 0.747355 |
xyang2013/kit-exts-command-library/exts/dli.example.command_library/dli/example/command_library/extension.py | import omni.kit.commands
import omni.usd
from typing import List
import omni.ui as ui
class ScaleIncrement(omni.kit.commands.Command):
def __init__(self, prim_paths: List[str]):
self.prim_paths = prim_paths
self.stage = omni.usd.get_context().get_stage()
def set_scale(self, undo: bool = False):
for path in self.prim_paths:
prim = self.stage.GetPrimAtPath(path)
old_scale = prim.GetAttribute('xformOp:scale').Get()
new_scale = tuple(x + 1 for x in old_scale)
if undo:
new_scale = tuple(x - 1 for x in old_scale)
prim.GetAttribute('xformOp:scale').Set(new_scale)
def do(self):
self.set_scale()
def undo(self):
self.set_scale(True)
def get_selection() -> List[str]:
"""Get the list of currently selected prims"""
return omni.usd.get_context().get_selection().get_selected_prim_paths()
class MyExtension(omni.ext.IExt):
def on_startup(self, ext_id):
print("[dli.example.command_library] MyExtension startup")
self._window = ui.Window("My Window", width=300, height=300)
with self._window.frame:
with ui.VStack():
ui.Label("Prim Scaler")
def on_click():
prim_paths = get_selection()
omni.kit.commands.execute('ScaleIncrement', prim_paths=prim_paths)
ui.Button("Scale Up!", clicked_fn=lambda: on_click())
def on_shutdown(self):
print("[dli.example.command_library] MyExtension shutdown")
self._window.destroy()
self._window = None
| 1,637 | Python | 29.90566 | 86 | 0.598045 |
xyang2013/kit-exts-command-library/exts/dli.example.command_library/dli/example/command_library/__init__.py | from .extension import *
| 25 | Python | 11.999994 | 24 | 0.76 |
xyang2013/kit-exts-command-library/exts/dli.example.command_library/dli/example/command_library/tests/__init__.py | from .test_hello_world import * | 31 | Python | 30.999969 | 31 | 0.774194 |
xyang2013/kit-exts-command-library/exts/dli.example.command_library/dli/example/command_library/tests/test_hello_world.py | # NOTE:
# omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests
# For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html
import omni.kit.test
# Extnsion for writing UI tests (simulate UI interaction)
import omni.kit.ui_test as ui_test
# Import extension python module we are testing with absolute import path, as if we are external user (other extension)
import dli.example.command_library
# Having a test class dervived from omni.kit.test.AsyncTestCase declared on the root of module will make it auto-discoverable by omni.kit.test
class Test(omni.kit.test.AsyncTestCase):
# Before running each test
async def setUp(self):
pass
# After running each test
async def tearDown(self):
pass
# Actual test, notice it is "async" function, so "await" can be used if needed
async def test_hello_public_function(self):
result = dli.example.command_library.some_public_function(4)
self.assertEqual(result, 256)
async def test_window_button(self):
# Find a label in our window
label = ui_test.find("My Window//Frame/**/Label[*]")
# Find buttons in our window
add_button = ui_test.find("My Window//Frame/**/Button[*].text=='Add'")
reset_button = ui_test.find("My Window//Frame/**/Button[*].text=='Reset'")
# Click reset button
await reset_button.click()
self.assertEqual(label.widget.text, "empty")
await add_button.click()
self.assertEqual(label.widget.text, "count: 1")
await add_button.click()
self.assertEqual(label.widget.text, "count: 2")
| 1,690 | Python | 34.978723 | 142 | 0.684024 |
xyang2013/kit-exts-command-library/exts/dli.example.command_library/docs/CHANGELOG.md | # Changelog
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
## [1.0.0] - 2021-04-26
- Initial version of extension UI template with a window
| 178 | Markdown | 18.888887 | 80 | 0.702247 |
xyang2013/kit-exts-command-library/exts/dli.example.command_library/docs/README.md | # Python Extension Example [dli.example.command_library]
This is an example of pure python Kit extension. It is intended to be copied and serve as a template to create new extensions.
| 186 | Markdown | 36.399993 | 126 | 0.790323 |
xyang2013/kit-exts-command-library/exts/dli.example.command_library/docs/index.rst | dli.example.command_library
#############################
Example of Python only extension
.. toctree::
:maxdepth: 1
README
CHANGELOG
.. automodule::"dli.example.command_library"
:platform: Windows-x86_64, Linux-x86_64
:members:
:undoc-members:
:show-inheritance:
:imported-members:
:exclude-members: contextmanager
| 355 | reStructuredText | 15.95238 | 44 | 0.630986 |
AndreiVoica/P10-MAP/README.md |
<a name="P10-MAP"></a>
<!-- PROJECT SHIELDS -->
<!--
*** I'm using markdown "reference style" links for readability.
*** Reference links are enclosed in brackets [ ] instead of parentheses ( ).
*** See the bottom of this document for the declaration of the reference variables
*** for contributors-url, forks-url, etc. This is an optional, concise syntax you may use.
*** https://www.markdownguide.org/basic-syntax/#reference-style-links
-->
[![Contributors][contributors-shield]][contributors-url]
[![Stars][stars-shield]][stars-url]
[![CC BY 4.0][cc-by-shield]][cc-by]
<!-- PROJECT LOGO -->
<br />
<div align="center">
<!-- <a href="https://github.com/AndreiVoica/P10-MAP/">
<img src="images/logo.png" alt="Logo" width="80" height="80">
</a> -->
<h3 align="center">P10 - Material Acceleration Platforms</h3>
<p align="center">
Master Thesis in Robotics - Aalborg University
<br />
<a href="https://www.youtube.com/playlist?list=PLTbrI-WjdIEfSyzKvvQM6LQMU2solaiKI">View Demo</a>
<br />
</div>
<!-- TABLE OF CONTENTS -->
<details>
<summary>Table of Contents</summary>
<ol>
<li>
<a href="#about-the-project">About The Project</a>
<ul>
<li><a href="#built-with">Built With</a></li>
</ul>
</li>
<li>
<a href="#getting-started">Getting Started</a>
<ul>
<li><a href="#prerequisites">Prerequisites</a></li>
<li><a href="#installation">Installation</a></li>
</ul>
</li>
<li><a href="#usage">Usage</a></li>
<li><a href="#contributing">Contributing</a></li>
<li><a href="#license">License</a></li>
<li><a href="#contact">Contact</a></li>
<li><a href="#acknowledgments">Acknowledgments</a></li>
</ol>
</details>
<!-- ABOUT THE PROJECT -->
## About the Project
<p align="center">
<img src="/docs/imgs/Frontpage.png" alt="Frontpage" width="700">
</p>
This project focuses on the transformation of chemistry laboratories into autonomous environments that can accelerate the discovery of new materials. The main goal is to optimize chemical processes that are typically performed by humans and can thus be slow and prone to errors.
The project utilizes robotic solutions and simulation to achieve this goal. The autonomous laboratory will be implemented on the AAU Matrix Production setup. This setup consists of five Kuka robotic manipulators, the B&R Automation Acopos 6D magnetic levitation platform, and various custom-made parts.
For development purposes, Nvidia Isaac Sim is used to create a simulated environment that replicates the physical setup. This allows for the execution of different experiments in a virtual setting. The Robot Operating System (ROS1) is used to control both the simulated Kuka manipulators and their real-world counterparts.
The simulation experiments demonstrate that the system is capable of automatically completing a chemical process. However, transferring these capabilities to the physical setup poses a significant challenge.
The project is the outcome of a Master's thesis in Robotics at Aalborg University.
<p align="right">(<a href="#readme-top">back to top</a>)</p>
### Built With
Ubuntu 20.04 together with Isaac Sim 2022.2.1 and ROS Noetic was used for this project.
<p align="left">
<a href="https://www.python.org" target="_blank"> <img src="https://raw.githubusercontent.com/devicons/devicon/master/icons/python/python-original.svg" alt="python" width="40" height="40"/> </a>
<a href="https://www.w3schools.com/cpp/" target="_blank"> <img src="https://raw.githubusercontent.com/devicons/devicon/master/icons/cplusplus/cplusplus-original.svg" alt="cplusplus" width="40" height="40"/> </a>
<a href="https://www.overleaf.com/"> <img src="https://images.ctfassets.net/nrgyaltdicpt/h9dpHuVys19B1sOAWvbP6/5f8d4c6d051f63e4ba450befd56f9189/ologo_square_colour_light_bg.svg" alt="overleaf_logo" width="40" height="40"> </a>
<a href="https://git-scm.com/" target="_blank"> <img src="https://www.vectorlogo.zone/logos/git-scm/git-scm-icon.svg" alt="git" width="40" height="40"/> </a>
<a href="https://www.linux.org/" target="_blank"> <img src="https://raw.githubusercontent.com/devicons/devicon/master/icons/linux/linux-original.svg" alt="linux" width="40" height="40"/> </a>
<a href="https://www.nvidia.com/en-us/omniverse/" target="_blank"> <img src="https://docs.omniverse.nvidia.com/con_connect/_images/renderer.png" alt="OmniverseIsaacSim" width="40" height="40"/> </a>
<a href="https://www.ros.org/"> <img src="https://upload.wikimedia.org/wikipedia/commons/b/bb/Ros_logo.svg" alt="ros_logo" height="36"> </a>
</p>
<p align="right">(<a href="#readme-top">back to top</a>)</p>
<!-- GETTING STARTED -->
## Getting Started
To get a local copy up and running follow these example steps.
### Prerequisites
[06/06/2023]
* Isaac Sim requirements:
Some minimum requirements are needed to install Isaac Sim, check the [Link](https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/requirements.html) for more details.
| Element | Minimum Spec | Good | Ideal |
|---------|------------------------------------|----------------|-----------------------------------------------------|
| OS | Ubuntu 20.04/22.04, Windows 10/11 | Ubuntu 20.04/22.04, Windows 10/11 | Ubuntu 20.04/22.04, Windows 10/11 |
| CPU | Intel Core i7 (7th Generation), AMD Ryzen 5 | Intel Core i7 (9th Generation), AMD Ryzen 7 | Intel Core i9, X-series or higher, AMD Ryzen 9, Threadripper or higher |
| Cores | 4 | 8 | 16 |
| RAM | 32GB* | 64GB* | 64GB* |
| Storage | 50GB SSD | 500GB SSD | 1TB NVMe SSD |
| GPU | GeForce RTX 2070 | GeForce RTX 3080| RTX A6000 |
| VRAM | 8GB* | 10GB* | 48GB* |
Note: GeForce RTX 2060 6GB VRAM is also compatible.
Note: The asterisk (*) indicates that the specified amount is the minimum required, but more is recommended for better performance.
### Installation
* Isaac Sim and MAPs Extension
* ROS
* MoveIt
* KukaVarProxy
* Planar Motor Controller API
1. To install Isaac Sim, follow the instructions in the [Isaac Sim documentation](https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/install_workstation.html#).
Once Isaac Sim is installed follow the steps in [MAPs Extension](/docs/installation/MAPs_Extension/README.md)
2. To install ROS, follow the instructions in the [ROS Noetic documentation](http://wiki.ros.org/noetic/Installation/Ubuntu)
3. To install MoveIt, follow the instructions in the [MoveIt documentation](https://moveit.ros.org/install/)
4. To install the KukaVarProxy, follow the instructions in the [KukaVarProxy documentation](https://github.com/ImtsSrl/KUKAVARPROXY)
5. To install the Planar Motor Controller PMC API, follow the instructions in the [planar motor controller API documentation](/docs/installation/planar_motor_control_API/README.md)
<!-- USAGE EXAMPLES -->
## Usage
The following image shows the communication workflow between ROS and physical robots (blue), Simulation environment
(green) and Magnetic levitation platform (orange). Machine Readable Recipe is not implemented.
<p align="center">
<img src="/docs/imgs/Workflow.drawio_v2.png" alt="Workflow Diagram" width="400">
</p>
### How to run the simulation
1. Launch `roscore`
2. Open Isaac Sim and launch MAPs Extension. Check [MAPs Extension](/docs/installation/MAPs_Extension/README.md) for troubleshooting.
3. Press Play in Isaac Sim GUI
4. Launch `roslaunch isaac_moveit kuka_isaac_execution.launch` from a sourced workspace
5. Start the simulation by pressing the `Start` button in the extension GUI
### How to run the Acopos 6D Digital Twin
1. Launch `roscore`
2. Open Isaac Sim and launch MAPs Extension. Check [MAPs Extension](/docs/installation/MAPs_Extension/README.md) for troubleshooting.
3. Check the computer is in the same range as the PMC (by default, PMC IP: 192.168.10.100)
4. In the MAPs GUI press Connect PMC
5. Press Start Real Setup
Note 1: To send random targets for each shuttle, uncomment the following line: `#self._world.add_physics_callback("sim_step_move_acopos", callback_fn=self.send_xbots_positions)` under `async def _on_real_control_event_async`
Note 2: Adjust `self._number_shuttles = 4` with the number of shuttles in the physical setup
<p align="right">(<a href="#readme-top">back to top</a>)</p>
<!-- CONTRIBUTING
## Contributing
Contributions are what make the open source community such an amazing place to learn, inspire, and create. Any contributions you make are **greatly appreciated**.
If you have a suggestion that would make this better, please fork the repo and create a pull request. You can also simply open an issue with the tag "enhancement".
Don't forget to give the project a star! Thanks again!
1. Fork the Project
2. Create your Feature Branch (`git checkout -b feature/AmazingFeature`)
3. Commit your Changes (`git commit -m 'Add some AmazingFeature'`)
4. Push to the Branch (`git push origin feature/AmazingFeature`)
5. Open a Pull Request
<p align="right">(<a href="#readme-top">back to top</a>)</p>
-->
<!-- LICENSE -->
## License
This work is licensed under a
[Creative Commons Attribution 4.0 International License][cc-by].
<p align="right">(<a href="#readme-top">back to top</a>)</p>
<!-- CONTACT -->
## Contact
Daniel Moreno - [LinkedIn](https://www.linkedin.com/in/daniel-mparis/) - [email protected]
Andrei Voica - [LinkedIn](https://www.linkedin.com/in/andrei-voica-825b7a104/) - [email protected]
Project Link: [https://github.com/AndreiVoica/P10-MAP](https://github.com/AndreiVoica/P10-MAP)
<p align="right">(<a href="#readme-top">back to top</a>)</p>
<!-- ACKNOWLEDGMENTS -->
<!-- ## Acknowledgments
Use this space to list resources you find helpful and would like to give credit to. I've included a few of my favorites to kick things off!
* [Choose an Open Source License](https://choosealicense.com)
* [GitHub Emoji Cheat Sheet](https://www.webpagefx.com/tools/emoji-cheat-sheet)
* [Malven's Flexbox Cheatsheet](https://flexbox.malven.co/)
* [Malven's Grid Cheatsheet](https://grid.malven.co/)
* [Img Shields](https://shields.io)
* [GitHub Pages](https://pages.github.com)
* [Font Awesome](https://fontawesome.com)
* [React Icons](https://react-icons.github.io/react-icons/search)
<p align="right">(<a href="#readme-top">back to top</a>)</p>
-->
<!-- MARKDOWN LINKS & IMAGES -->
<!-- https://www.markdownguide.org/basic-syntax/#reference-style-links -->
[contributors-shield]: https://img.shields.io/github/contributors/AndreiVoica/P10-MAP.svg?style=for-the-badge
[contributors-url]: https://github.com/AndreiVoica/P10-MAP/graphs/contributors
[stars-shield]: https://img.shields.io/github/stars/AndreiVoica/P10-MAP.svg?style=for-the-badge
[stars-url]: https://github.com/AndreiVoica/P10-MAP/stargazers
[issues-shield]: https://img.shields.io/github/issues/AndreiVoica/P10-MAP.svg?style=for-the-badge
[issues-url]: https://github.com/othneildrew/Best-README-Template/issues
[license-shield]: https://img.shields.io/github/license/othneildrew/Best-README-Template.svg?style=for-the-badge
[license-url]: https://github.com/othneildrew/Best-README-Template/blob/master/LICENSE.txt
[linkedin-shield]: https://img.shields.io/badge/-LinkedIn-black.svg?style=for-the-badge&logo=linkedin&colorB=555
[linkedin-url]: https://linkedin.com/in/othneildrew
[product-screenshot]: images/screenshot.png
[Next.js]: https://img.shields.io/badge/next.js-000000?style=for-the-badge&logo=nextdotjs&logoColor=white
[Python-url]: https://nextjs.org/
[React.js]: https://img.shields.io/badge/React-20232A?style=for-the-badge&logo=react&logoColor=61DAFB
[React-url]: https://reactjs.org/
[Vue.js]: https://img.shields.io/badge/Vue.js-35495E?style=for-the-badge&logo=vuedotjs&logoColor=4FC08D
[Vue-url]: https://vuejs.org/
[Angular.io]: https://img.shields.io/badge/Angular-DD0031?style=for-the-badge&logo=angular&logoColor=white
[Angular-url]: https://angular.io/
[Svelte.dev]: https://img.shields.io/badge/Svelte-4A4A55?style=for-the-badge&logo=svelte&logoColor=FF3E00
[Svelte-url]: https://svelte.dev/
[Laravel.com]: https://img.shields.io/badge/Laravel-FF2D20?style=for-the-badge&logo=laravel&logoColor=white
[Laravel-url]: https://laravel.com
[Bootstrap.com]: https://img.shields.io/badge/Bootstrap-563D7C?style=for-the-badge&logo=bootstrap&logoColor=white
[Bootstrap-url]: https://getbootstrap.com
[JQuery.com]: https://img.shields.io/badge/jQuery-0769AD?style=for-the-badge&logo=jquery&logoColor=white
[JQuery-url]: https://jquery.com
[![CC BY 4.0][cc-by-image]][cc-by]
[cc-by]: http://creativecommons.org/licenses/by/4.0/
[cc-by-image]: https://i.creativecommons.org/l/by/4.0/88x31.png
[cc-by-shield]: https://img.shields.io/badge/License-CC%20BY%204.0-lightgrey.svg?style=for-the-badge
| 13,213 | Markdown | 44.723183 | 322 | 0.695603 |
AndreiVoica/P10-MAP/src/kuka_moveit_configuration/package.xml | <package>
<name>kuka_moveit_configuration</name>
<version>0.3.0</version>
<description>
An automatically generated package with all the configuration and launch files for using the kuka_kr3r540 with the MoveIt! Motion Planning Framework
</description>
<author email="[email protected]">Aytac Kahveci</author>
<maintainer email="[email protected]">Aytac Kahveci</maintainer>
<license>BSD</license>
<url type="website">http://moveit.ros.org/</url>
<url type="bugtracker">https://github.com/ros-planning/moveit/issues</url>
<url type="repository">https://github.com/ros-planning/moveit</url>
<buildtool_depend>catkin</buildtool_depend>
<run_depend>moveit_ros_move_group</run_depend>
<run_depend>moveit_kinematics</run_depend>
<run_depend>moveit_planners_ompl</run_depend>
<run_depend>moveit_ros_visualization</run_depend>
<run_depend>joint_state_publisher</run_depend>
<run_depend>robot_state_publisher</run_depend>
<run_depend>xacro</run_depend>
<!-- This package is referenced in the warehouse launch files, but does not build out of the box at the moment. Commented the dependency until this works. -->
<!-- <run_depend>warehouse_ros_mongo</run_depend> -->
<build_depend>kuka_kr3_support</build_depend>
<run_depend>kuka_kr3_support</run_depend>
</package>
| 1,326 | XML | 39.21212 | 160 | 0.739819 |
AndreiVoica/P10-MAP/src/kuka_moveit_configuration/launch/kuka_kr6r900sixx_moveit_sensor_manager.launch.xml | <launch>
</launch>
| 20 | XML | 4.249999 | 9 | 0.6 |
AndreiVoica/P10-MAP/src/kuka_moveit_configuration/launch/sensor_manager.launch.xml | <launch>
<!-- This file makes it easy to include the settings for sensor managers -->
<!-- Params for the octomap monitor -->
<!-- <param name="octomap_frame" type="string" value="some frame in which the robot moves" /> -->
<param name="octomap_resolution" type="double" value="0.025" />
<param name="max_range" type="double" value="5.0" />
<!-- Load the robot specific sensor manager; this sets the moveit_sensor_manager ROS parameter -->
<arg name="moveit_sensor_manager" default="kuka_kr3r540" />
<include file="$(find kuka_moveit_configuration)/launch/$(arg moveit_sensor_manager)_moveit_sensor_manager.launch.xml" />
</launch>
| 657 | XML | 42.866664 | 123 | 0.686454 |
AndreiVoica/P10-MAP/src/kuka_moveit_configuration/launch/kuka_kr3r540_moveit_sensor_manager.launch.xml | <launch>
</launch>
| 20 | XML | 4.249999 | 9 | 0.6 |
AndreiVoica/P10-MAP/src/kuka_moveit_configuration/launch/fake_moveit_controller_manager.launch.xml | <launch>
<!-- Set the param that trajectory_execution_manager needs to find the controller plugin -->
<param name="moveit_controller_manager" value="moveit_fake_controller_manager/MoveItFakeControllerManager"/>
<!-- The rest of the params are specific to this plugin -->
<rosparam file="$(find kuka_moveit_configuration)/config/fake_controllers.yaml"/>
</launch>
| 374 | XML | 36.499996 | 110 | 0.745989 |
AndreiVoica/P10-MAP/src/kuka_moveit_configuration/launch/kuka_kr6r900sixx_moveit_controller_manager.launch.xml | <launch>
<arg name="sim" default="false" />
<arg name="moveit_controller_manager" default="moveit_simple_controller_manager/MoveItSimpleControllerManager"/>
<param name="moveit_controller_manager" value="$(arg moveit_controller_manager)"/>
<!-- load controller_list -->
<rosparam file="$(find kuka_moveit_configuration)/config/controller.yaml"/>
<!-- Load standard kuka controller joint names from YAML file to parameter server -->
<include file="$(find kuka_hw_axis)/launch/trajectory.launch"> </include>
</launch>
| 566 | XML | 46.249996 | 113 | 0.685512 |
AndreiVoica/P10-MAP/src/kuka_moveit_configuration/launch/trajectory_execution2.launch.xml | <launch>
<!-- This file makes it easy to include the settings for trajectory execution -->
<!-- Flag indicating whether MoveIt! is allowed to load/unload or switch controllers -->
<arg name="moveit_manage_controllers" default="true"/>
<param name="moveit_manage_controllers" value="$(arg moveit_manage_controllers)"/>
<!-- When determining the expected duration of a trajectory, this multiplicative factor is applied to get the allowed duration of execution -->
<param name="trajectory_execution/allowed_execution_duration_scaling" value="1.2"/> <!-- default 1.2 -->
<!-- Allow more than the expected execution time before triggering a trajectory cancel (applied after scaling) -->
<param name="trajectory_execution/allowed_goal_duration_margin" value="0.5"/> <!-- default 0.5 -->
<!-- Allowed joint-value tolerance for validation that trajectory's first point matches current robot state -->
<param name="trajectory_execution/allowed_start_tolerance" value="0.01"/> <!-- default 0.01 -->
<!-- Load the robot specific controller manager; this sets the moveit_controller_manager ROS parameter -->
<arg name="moveit_controller_manager" default="kuka_kr3r540" />
<include file="$(find kuka_moveit_configuration)/launch/$(arg moveit_controller_manager)_moveit_controller_manager2.launch.xml" />
</launch>
| 1,343 | XML | 60.090906 | 145 | 0.72971 |
AndreiVoica/P10-MAP/src/kuka_moveit_configuration/launch/planning_pipeline.launch.xml | <launch>
<!-- This file makes it easy to include different planning pipelines;
It is assumed that all planning pipelines are named XXX_planning_pipeline.launch -->
<arg name="pipeline" default="ompl" />
<include file="$(find kuka_moveit_configuration)/launch/$(arg pipeline)_planning_pipeline.launch.xml" />
</launch>
| 339 | XML | 29.909088 | 106 | 0.707965 |
AndreiVoica/P10-MAP/src/kuka_moveit_configuration/launch/kuka_kr3r540_moveit_controller_manager2.launch.xml | <launch>
<arg name="sim" default="false" />
<arg name="moveit_controller_manager" default="moveit_simple_controller_manager/MoveItSimpleControllerManager"/>
<param name="moveit_controller_manager" value="$(arg moveit_controller_manager)"/>
<!-- load controller_list -->
<!--<rosparam file="$(find kuka_moveit_configuration)/config/controller.yaml"/>-->
<rosparam file="$(find kuka_moveit_configuration)/config/controller2.yaml"/>
<!-- Load standard kuka controller joint names from YAML file to parameter server -->
<include file="$(find kuka_hw_axis)/launch/trajectory2.launch"> </include>
</launch>
| 659 | XML | 53.999996 | 113 | 0.681335 |
AndreiVoica/P10-MAP/src/kuka_moveit_configuration/launch/kuka_kr3r540_moveit_controller_manager.launch.xml | <launch>
<arg name="sim" default="false" />
<arg name="moveit_controller_manager" default="moveit_simple_controller_manager/MoveItSimpleControllerManager"/>
<param name="moveit_controller_manager" value="$(arg moveit_controller_manager)"/>
<!-- load controller_list -->
<!--<rosparam file="$(find kuka_moveit_configuration)/config/controller.yaml"/>-->
<rosparam file="$(find kuka_moveit_configuration)/config/controller.yaml"/>
<!-- Load standard kuka controller joint names from YAML file to parameter server -->
<include file="$(find kuka_hw_axis)/launch/trajectory.launch"> </include>
</launch>
| 657 | XML | 53.833329 | 113 | 0.680365 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.