file_path
stringlengths 20
202
| content
stringlengths 9
3.85M
| size
int64 9
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 3.33
100
| max_line_length
int64 8
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
2820207922/isaac_ws/standalone_examples/api/omni.isaac.manipulators/cobotta_900/pick_up_example.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import argparse
import numpy as np
from controllers.pick_place import PickPlaceController
from omni.isaac.core import World
from tasks.pick_place import PickPlace
parser = argparse.ArgumentParser()
parser.add_argument("--test", default=False, action="store_true", help="Run in test mode")
args, unknown = parser.parse_known_args()
my_world = World(stage_units_in_meters=1.0)
target_position = np.array([-0.3, 0.6, 0])
target_position[2] = 0.0515 / 2.0
my_task = PickPlace(name="denso_pick_place", target_position=target_position)
my_world.add_task(my_task)
my_world.reset()
my_denso = my_world.scene.get_object("cobotta_robot")
# initialize the controller
my_controller = PickPlaceController(name="controller", robot_articulation=my_denso, gripper=my_denso.gripper)
task_params = my_world.get_task("denso_pick_place").get_params()
articulation_controller = my_denso.get_articulation_controller()
i = 0
while simulation_app.is_running():
my_world.step(render=True)
if my_world.is_playing():
if my_world.current_time_step_index == 0:
my_world.reset()
my_controller.reset()
observations = my_world.get_observations()
# forward the observation values to the controller to get the actions
actions = my_controller.forward(
picking_position=observations[task_params["cube_name"]["value"]]["position"],
placing_position=observations[task_params["cube_name"]["value"]]["target_position"],
current_joint_positions=observations[task_params["robot_name"]["value"]]["joint_positions"],
end_effector_offset=np.array([0, 0, 0.25]),
)
if my_controller.is_done():
print("done picking and placing")
articulation_controller.apply_action(actions)
if args.test is True:
break
simulation_app.close()
| 2,371 | Python | 38.533333 | 109 | 0.719106 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.manipulators/cobotta_900/follow_target_example.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import argparse
import numpy as np
from controllers.rmpflow import RMPFlowController
from omni.isaac.core import World
from tasks.follow_target import FollowTarget
parser = argparse.ArgumentParser()
parser.add_argument("--test", default=False, action="store_true", help="Run in test mode")
args, unknown = parser.parse_known_args()
my_world = World(stage_units_in_meters=1.0)
# Initialize the Follow Target task with a target location for the cube to be followed by the end effector
my_task = FollowTarget(name="denso_follow_target", target_position=np.array([0.5, 0, 0.5]))
my_world.add_task(my_task)
my_world.reset()
task_params = my_world.get_task("denso_follow_target").get_params()
target_name = task_params["target_name"]["value"]
denso_name = task_params["robot_name"]["value"]
my_denso = my_world.scene.get_object(denso_name)
# initialize the controller
my_controller = RMPFlowController(name="target_follower_controller", robot_articulation=my_denso)
# make RmpFlow aware of the ground plane
ground_plane = my_world.scene.get_object(name="default_ground_plane")
my_controller.add_obstacle(ground_plane)
articulation_controller = my_denso.get_articulation_controller()
while simulation_app.is_running():
my_world.step(render=True)
if my_world.is_playing():
if my_world.current_time_step_index == 0:
my_world.reset()
observations = my_world.get_observations()
actions = my_controller.forward(
target_end_effector_position=observations[target_name]["position"],
target_end_effector_orientation=observations[target_name]["orientation"],
)
articulation_controller.apply_action(actions)
if args.test is True:
break
simulation_app.close()
| 2,270 | Python | 38.842105 | 106 | 0.748899 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.manipulators/cobotta_900/gripper_control.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import argparse
import numpy as np
from omni.isaac.core import World
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage
from omni.isaac.core.utils.types import ArticulationAction
from omni.isaac.manipulators import SingleManipulator
from omni.isaac.manipulators.grippers import ParallelGripper
parser = argparse.ArgumentParser()
parser.add_argument("--test", default=False, action="store_true", help="Run in test mode")
args, unknown = parser.parse_known_args()
my_world = World(stage_units_in_meters=1.0)
assets_root_path = get_assets_root_path()
if assets_root_path is None:
raise Exception("Could not find Isaac Sim assets folder")
asset_path = assets_root_path + "/Isaac/Robots/Denso/cobotta_pro_900.usd"
add_reference_to_stage(usd_path=asset_path, prim_path="/World/cobotta")
# define the gripper
gripper = ParallelGripper(
# We chose the following values while inspecting the articulation
end_effector_prim_path="/World/cobotta/onrobot_rg6_base_link",
joint_prim_names=["finger_joint", "right_outer_knuckle_joint"],
joint_opened_positions=np.array([0, 0]),
joint_closed_positions=np.array([0.628, -0.628]),
action_deltas=np.array([-0.628, 0.628]),
)
# define the manipulator
my_denso = my_world.scene.add(
SingleManipulator(
prim_path="/World/cobotta",
name="cobotta_robot",
end_effector_prim_name="onrobot_rg6_base_link",
gripper=gripper,
)
)
# set the default positions of the other gripper joints to be opened so
# that its out of the way of the joints we want to control when gripping an object for instance.
joints_default_positions = np.zeros(12)
joints_default_positions[7] = 0.628
joints_default_positions[8] = 0.628
my_denso.set_joints_default_state(positions=joints_default_positions)
my_world.scene.add_default_ground_plane()
my_world.reset()
i = 0
while simulation_app.is_running():
my_world.step(render=True)
if my_world.is_playing():
if my_world.current_time_step_index == 0:
my_world.reset()
i += 1
gripper_positions = my_denso.gripper.get_joint_positions()
if i < 500:
# close the gripper slowly
my_denso.gripper.apply_action(
ArticulationAction(joint_positions=[gripper_positions[0] + 0.1, gripper_positions[1] - 0.1])
)
if i > 500:
# open the gripper slowly
my_denso.gripper.apply_action(
ArticulationAction(joint_positions=[gripper_positions[0] - 0.1, gripper_positions[1] + 0.1])
)
if i == 1000:
i = 0
if args.test is True:
break
simulation_app.close()
| 3,250 | Python | 37.247058 | 108 | 0.707692 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.manipulators/cobotta_900/rmpflow/robot_descriptor.yaml | # The robot description defines the generalized coordinates and how to map those
# to the underlying URDF dofs.
api_version: 1.0
# Defines the generalized coordinates. Each generalized coordinate is assumed
# to have an entry in the URDF.
# Lula will only use these joints to control the robot position.
cspace:
- joint_1
- joint_2
- joint_3
- joint_4
- joint_5
- joint_6
default_q: [
0.0,0.3,1.2,0.0,0.0,0.0
]
# Most dimensions of the cspace have a direct corresponding element
# in the URDF. This list of rules defines how unspecified coordinates
# should be extracted or how values in the URDF should be overwritten.
cspace_to_urdf_rules:
- {name: finger_joint, rule: fixed, value: 0.0}
- {name: left_inner_knuckle_joint, rule: fixed, value: 0.0}
- {name: right_inner_knuckle_joint, rule: fixed, value: 0.0}
- {name: right_outer_knuckle_joint, rule: fixed, value: 0.0}
- {name: left_inner_finger_joint, rule: fixed, value: 0.0}
- {name: right_inner_finger_joint, rule: fixed, value: 0.0}
# Lula uses collision spheres to define the robot geometry in order to avoid
# collisions with external obstacles. If no spheres are specified, Lula will
# not be able to avoid obstacles.
collision_spheres:
- J1:
- "center": [0.0, 0.0, 0.1]
"radius": 0.08
- "center": [0.0, 0.0, 0.15]
"radius": 0.08
- "center": [0.0, 0.0, 0.2]
"radius": 0.08
- J2:
- "center": [0.0, 0.08, 0.0]
"radius": 0.08
- "center": [0.0, 0.174, 0.0]
"radius": 0.08
- "center": [-0.0, 0.186, 0.05]
"radius": 0.065
- "center": [0.0, 0.175, 0.1]
"radius": 0.065
- "center": [-0.0, 0.18, 0.15]
"radius": 0.065
- "center": [0.0, 0.175, 0.2]
"radius": 0.065
- "center": [0.0, 0.175, 0.25]
"radius": 0.065
- "center": [0.0, 0.175, 0.3]
"radius": 0.065
- "center": [0.0, 0.175, 0.35]
"radius": 0.065
- "center": [0.0, 0.175, 0.4]
"radius": 0.065
- "center": [0.0, 0.175, 0.45]
"radius": 0.065
- "center": [0.0, 0.175, 0.5]
"radius": 0.065
- "center": [-0.002, 0.1, 0.507]
"radius": 0.07
- J3:
- "center": [0.0, 0.025, 0.0]
"radius": 0.065
- "center": [0.0, -0.025, 0.0]
"radius": 0.065
- "center": [0.0, -0.025, 0.05]
"radius": 0.065
- "center": [0.0, -0.025, 0.1]
"radius": 0.065
- "center": [0.0, -0.025, 0.15]
"radius": 0.06
- "center": [0.0, -0.025, 0.2]
"radius": 0.06
- "center": [0.0, -0.025, 0.25]
"radius": 0.06
- "center": [0.0, -0.025, 0.3]
"radius": 0.06
- "center": [0.0, -0.025, 0.35]
"radius": 0.055
- "center": [0.0, -0.025, 0.4]
"radius": 0.055
- J5:
- "center": [0.0, 0.05, 0.0]
"radius": 0.055
- "center": [0.0, 0.1, 0.0]
"radius": 0.055
- J6:
- "center": [0.0, 0.0, -0.05]
"radius": 0.05
- "center": [0.0, 0.0, -0.1]
"radius": 0.05
- "center": [0.0, 0.0, -0.15]
"radius": 0.05
- "center": [0.0, 0.0, 0.04]
"radius": 0.035
- "center": [0.0, 0.0, 0.08]
"radius": 0.035
- "center": [0.0, 0.0, 0.12]
"radius": 0.035
- right_inner_knuckle:
- "center": [0.0, 0.0, 0.0]
"radius": 0.02
- "center": [0.0, -0.03, 0.025]
"radius": 0.02
- "center": [0.0, -0.05, 0.05]
"radius": 0.02
- right_inner_finger:
- "center": [0.0, 0.02, 0.0]
"radius": 0.015
- "center": [0.0, 0.02, 0.015]
"radius": 0.015
- "center": [0.0, 0.02, 0.03]
"radius": 0.015
- "center": [0.0, 0.025, 0.04]
"radius": 0.01
- left_inner_knuckle:
- "center": [0.0, 0.0, 0.0]
"radius": 0.02
- "center": [0.0, -0.03, 0.025]
"radius": 0.02
- "center": [0.0, -0.05, 0.05]
"radius": 0.02
- left_inner_finger:
- "center": [0.0, 0.02, 0.0]
"radius": 0.015
- "center": [0.0, 0.02, 0.015]
"radius": 0.015
- "center": [0.0, 0.02, 0.03]
"radius": 0.015
- "center": [0.0, 0.025, 0.04]
"radius": 0.01
| 4,073 | YAML | 27.690141 | 80 | 0.51289 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.manipulators/cobotta_900/tasks/pick_place.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import os
from typing import Optional
import numpy as np
import omni.isaac.core.tasks as tasks
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage
from omni.isaac.manipulators import SingleManipulator
from omni.isaac.manipulators.grippers import ParallelGripper
class PickPlace(tasks.PickPlace):
def __init__(
self,
name: str = "denso_pick_place",
cube_initial_position: Optional[np.ndarray] = None,
cube_initial_orientation: Optional[np.ndarray] = None,
target_position: Optional[np.ndarray] = None,
offset: Optional[np.ndarray] = None,
) -> None:
tasks.PickPlace.__init__(
self,
name=name,
cube_initial_position=cube_initial_position,
cube_initial_orientation=cube_initial_orientation,
target_position=target_position,
cube_size=np.array([0.0515, 0.0515, 0.0515]),
offset=offset,
)
return
def set_robot(self) -> SingleManipulator:
assets_root_path = get_assets_root_path()
if assets_root_path is None:
raise Exception("Could not find Isaac Sim assets folder")
asset_path = assets_root_path + "/Isaac/Robots/Denso/cobotta_pro_900.usd"
add_reference_to_stage(usd_path=asset_path, prim_path="/World/cobotta")
gripper = ParallelGripper(
end_effector_prim_path="/World/cobotta/onrobot_rg6_base_link",
joint_prim_names=["finger_joint", "right_outer_knuckle_joint"],
joint_opened_positions=np.array([0, 0]),
joint_closed_positions=np.array([0.628, -0.628]),
action_deltas=np.array([-0.2, 0.2]),
)
manipulator = SingleManipulator(
prim_path="/World/cobotta",
name="cobotta_robot",
end_effector_prim_name="onrobot_rg6_base_link",
gripper=gripper,
)
joints_default_positions = np.zeros(12)
joints_default_positions[7] = 0.628
joints_default_positions[8] = 0.628
manipulator.set_joints_default_state(positions=joints_default_positions)
return manipulator
| 2,648 | Python | 39.753846 | 81 | 0.66352 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.manipulators/cobotta_900/controllers/pick_place.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import omni.isaac.manipulators.controllers as manipulators_controllers
from omni.isaac.core.articulations import Articulation
from omni.isaac.manipulators.grippers import ParallelGripper
from .rmpflow import RMPFlowController
class PickPlaceController(manipulators_controllers.PickPlaceController):
def __init__(self, name: str, gripper: ParallelGripper, robot_articulation: Articulation, events_dt=None) -> None:
if events_dt is None:
events_dt = [0.005, 0.002, 1, 0.05, 0.0008, 0.005, 0.0008, 0.1, 0.0008, 0.008]
manipulators_controllers.PickPlaceController.__init__(
self,
name=name,
cspace_controller=RMPFlowController(
name=name + "_cspace_controller", robot_articulation=robot_articulation
),
gripper=gripper,
events_dt=events_dt,
end_effector_initial_height=0.6,
)
return
| 1,362 | Python | 41.593749 | 118 | 0.713656 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.manipulators/cobotta_900/controllers/rmpflow.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import os
import omni.isaac.motion_generation as mg
from omni.isaac.core.articulations import Articulation
from omni.isaac.core.utils.extensions import get_extension_path_from_name
class RMPFlowController(mg.MotionPolicyController):
def __init__(self, name: str, robot_articulation: Articulation, physics_dt: float = 1.0 / 60.0) -> None:
self.rmpflow = mg.lula.motion_policies.RmpFlow(
robot_description_path=os.path.join(os.path.dirname(__file__), "../rmpflow/robot_descriptor.yaml"),
rmpflow_config_path=os.path.join(os.path.dirname(__file__), "../rmpflow/denso_rmpflow_common.yaml"),
urdf_path=os.path.join(os.path.dirname(__file__), "../rmpflow/cobotta_pro_900.urdf"),
end_effector_frame_name="gripper_center",
maximum_substep_size=0.00334,
)
self.articulation_rmp = mg.ArticulationMotionPolicy(robot_articulation, self.rmpflow, physics_dt)
mg.MotionPolicyController.__init__(self, name=name, articulation_motion_policy=self.articulation_rmp)
(
self._default_position,
self._default_orientation,
) = self._articulation_motion_policy._robot_articulation.get_world_pose()
self._motion_policy.set_robot_base_pose(
robot_position=self._default_position, robot_orientation=self._default_orientation
)
return
def reset(self):
mg.MotionPolicyController.reset(self)
self._motion_policy.set_robot_base_pose(
robot_position=self._default_position, robot_orientation=self._default_orientation
)
| 2,038 | Python | 45.340908 | 112 | 0.705103 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.sensor/camera.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import matplotlib.pyplot as plt
import numpy as np
import omni.isaac.core.utils.numpy.rotations as rot_utils
from omni.isaac.core import World
from omni.isaac.core.objects import DynamicCuboid
from omni.isaac.sensor import Camera
my_world = World(stage_units_in_meters=1.0)
cube_2 = my_world.scene.add(
DynamicCuboid(
prim_path="/new_cube_2",
name="cube_1",
position=np.array([5.0, 3, 1.0]),
scale=np.array([0.6, 0.5, 0.2]),
size=1.0,
color=np.array([255, 0, 0]),
)
)
cube_3 = my_world.scene.add(
DynamicCuboid(
prim_path="/new_cube_3",
name="cube_2",
position=np.array([-5, 1, 3.0]),
scale=np.array([0.1, 0.1, 0.1]),
size=1.0,
color=np.array([0, 0, 255]),
linear_velocity=np.array([0, 0, 0.4]),
)
)
camera = Camera(
prim_path="/World/camera",
position=np.array([0.0, 0.0, 25.0]),
frequency=20,
resolution=(256, 256),
orientation=rot_utils.euler_angles_to_quats(np.array([0, 90, 0]), degrees=True),
)
my_world.scene.add_default_ground_plane()
my_world.reset()
camera.initialize()
i = 0
camera.add_motion_vectors_to_frame()
while simulation_app.is_running():
my_world.step(render=True)
print(camera.get_current_frame())
if i == 100:
points_2d = camera.get_image_coords_from_world_points(
np.array([cube_3.get_world_pose()[0], cube_2.get_world_pose()[0]])
)
points_3d = camera.get_world_points_from_image_coords(points_2d, np.array([24.94, 24.9]))
print(points_2d)
print(points_3d)
imgplot = plt.imshow(camera.get_rgba()[:, :, :3])
plt.show()
print(camera.get_current_frame()["motion_vectors"])
if my_world.is_playing():
if my_world.current_time_step_index == 0:
my_world.reset()
i += 1
simulation_app.close()
| 2,399 | Python | 29 | 97 | 0.64777 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.sensor/imu_sensor.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import sys
import carb
import numpy as np
from omni.isaac.core import World
from omni.isaac.core.articulations import Articulation
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage
from omni.isaac.core.utils.types import ArticulationAction
from omni.isaac.sensor import IMUSensor
from omni.isaac.wheeled_robots.controllers.differential_controller import DifferentialController
my_world = World(stage_units_in_meters=1.0)
my_world.scene.add_default_ground_plane()
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
simulation_app.close()
sys.exit()
asset_path = assets_root_path + "/Isaac/Robots/Carter/nova_carter_sensors.usd"
add_reference_to_stage(usd_path=asset_path, prim_path="/World/Carter")
my_carter = my_world.scene.add(
Articulation(prim_path="/World/Carter", name="my_carter", position=np.array([0, 0.0, 0.5]))
)
wheel_dof_names = ["joint_wheel_left", "joint_wheel_right"]
my_controller = DifferentialController(name="simple_control", wheel_radius=0.04295, wheel_base=0.4132)
imu_sensor = my_world.scene.add(
IMUSensor(
prim_path="/World/Carter/caster_wheel_left/imu_sensor",
name="imu",
frequency=60,
translation=np.array([0, 0, 0]),
)
)
my_world.reset()
i = 0
while simulation_app.is_running():
my_world.step(render=True)
if my_world.is_playing():
wheel_dof_indices = [my_carter.get_dof_index(wheel_dof_names[i]) for i in range(len(wheel_dof_names))]
if my_world.current_time_step_index == 0:
my_world.reset()
my_controller.reset()
print(imu_sensor.get_current_frame())
actions = ArticulationAction()
if i >= 0 and i < 1000:
# forward
actions = my_controller.forward(command=[0.05, 0])
elif i >= 1000 and i < 1265:
# rotate
actions = my_controller.forward(command=[0.0, np.pi / 12])
elif i >= 1265 and i < 2000:
# forward
actions = my_controller.forward(command=[0.05, 0])
elif i == 2000:
i = 0
i += 1
joint_actions = ArticulationAction()
joint_actions.joint_velocities = np.zeros(my_carter.num_dof)
if actions.joint_velocities is not None:
for j in range(len(wheel_dof_indices)):
joint_actions.joint_velocities[wheel_dof_indices[j]] = actions.joint_velocities[j]
my_carter.apply_action(joint_actions)
simulation_app.close()
| 3,133 | Python | 35.022988 | 110 | 0.687201 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.sensor/effort_sensor.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
# In this example, please drag the cube along the arm and see how the effort measurement from the effort sensor changes
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import sys
import carb
import numpy as np
from omni.isaac.core import World
from omni.isaac.core.objects import DynamicCuboid
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import add_reference_to_stage
from omni.isaac.sensor.scripts.effort_sensor import EffortSensor
from pxr import UsdPhysics
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
simulation_app.close()
sys.exit()
my_world = World(stage_units_in_meters=1.0, physics_dt=1.0 / 60, rendering_dt=1.0 / 60)
my_world.scene.add_default_ground_plane(z_position=-1)
asset_path = assets_root_path + "/Isaac/Robots/Simple/simple_articulation.usd"
add_reference_to_stage(usd_path=asset_path, prim_path="/Articulation")
arm_joint = "/Articulation/Arm/RevoluteJoint"
prim = get_prim_at_path(arm_joint)
joint = UsdPhysics.RevoluteJoint(prim)
joint.CreateAxisAttr("Y")
DynamicCuboid(
prim_path="/World/Cube",
name="cube_1",
position=np.array([1.5, 0, 0.2]),
color=np.array([255, 0, 0]),
size=0.1,
mass=1,
)
my_world.reset()
effort_sensor = EffortSensor(prim_path=arm_joint)
while simulation_app.is_running():
my_world.step(render=True)
if my_world.is_playing():
reading = effort_sensor.get_sensor_reading()
print(f"Sensor Time: {reading.time} Value: {reading.value} Validity: {reading.is_valid}")
if my_world.current_time_step_index == 0:
my_world.reset()
simulation_app.close()
| 2,253 | Python | 33.151515 | 119 | 0.743009 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.sensor/rotating_lidar_rtx.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import argparse
import sys
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import carb
import numpy as np
from omni.isaac.core import World
from omni.isaac.core.objects import DynamicCuboid
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.sensor import LidarRtx
from omni.isaac.wheeled_robots.controllers.differential_controller import DifferentialController
from omni.isaac.wheeled_robots.robots import WheeledRobot
parser = argparse.ArgumentParser()
parser.add_argument("--test", default=False, action="store_true", help="Run in test mode")
args, unknown = parser.parse_known_args()
my_world = World(stage_units_in_meters=1.0)
my_world.scene.add_default_ground_plane()
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
simulation_app.close()
sys.exit()
asset_path = assets_root_path + "/Isaac/Robots/Carter/nova_carter_sensors.usd"
my_carter = my_world.scene.add(
WheeledRobot(
prim_path="/World/Carter",
name="my_carter",
wheel_dof_names=["joint_wheel_left", "joint_wheel_right"],
create_robot=True,
usd_path=asset_path,
position=np.array([0, 0.0, 0.5]),
)
)
# config_file_name="Example_Rotary"
my_lidar = my_world.scene.add(
LidarRtx(prim_path="/World/Carter/chassis_link/front_hawk/right/lidar_rig/lidar", name="lidar")
)
cube_1 = my_world.scene.add(
DynamicCuboid(prim_path="/World/cube", name="cube_1", position=np.array([2, 2, 2.5]), scale=np.array([20, 0.2, 5]))
)
cube_2 = my_world.scene.add(
DynamicCuboid(
prim_path="/World/cube_2", name="cube_2", position=np.array([2, -2, 2.5]), scale=np.array([20, 0.2, 5])
)
)
my_controller = DifferentialController(name="simple_control", wheel_radius=0.04295, wheel_base=0.4132)
my_world.reset()
my_lidar.add_range_data_to_frame()
my_lidar.add_point_cloud_data_to_frame()
my_lidar.enable_visualization()
i = 0
while simulation_app.is_running():
my_world.step(render=True)
if my_world.is_playing():
if my_world.current_time_step_index == 0:
my_world.reset()
my_controller.reset()
if i >= 0 and i < 1000:
print(my_lidar.get_current_frame())
# forward
my_carter.apply_wheel_actions(my_controller.forward(command=[0.05, 0]))
elif i >= 1000 and i < 1265:
# rotate
my_carter.apply_wheel_actions(my_controller.forward(command=[0.0, np.pi / 12]))
elif i >= 1265 and i < 2000:
# forward
my_carter.apply_wheel_actions(my_controller.forward(command=[0.05, 0]))
elif i == 2000:
i = 0
i += 1
if args.test is True and i > 100:
break
my_world.stop()
simulation_app.update()
simulation_app.close()
| 3,309 | Python | 33.123711 | 119 | 0.684195 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.sensor/camera_opencv_fisheye.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": True}) # Option: "renderer": "PathTracing"
import numpy as np
import omni.isaac.core.utils.numpy.rotations as rot_utils
from omni.isaac.core import World
from omni.isaac.core.objects import DynamicCuboid
from omni.isaac.sensor import Camera
from PIL import Image, ImageDraw
# Given the OpenCV camera matrix and distortion coefficients (Fisheye, Kannala-Brandt model),
# creates a camera and a sample scene, renders an image and saves it to
# camera_opencv_fisheye.png file. The asset is also saved to camera_opencv_fisheye.usd file.
# Currently only supports square images (there is an issue in the rendering pipeline).
# To produce non-square images, the region of the image that is not used should be cropped
width, height = 1920, 1200
camera_matrix = [[455.8, 0.0, 943.8], [0.0, 454.7, 602.3], [0.0, 0.0, 1.0]]
distortion_coefficients = [0.05, 0.01, -0.003, -0.0005]
# Camera sensor size and optical path parameters. These parameters are not the part of the
# OpenCV camera model, but they are nessesary to simulate the depth of field effect.
#
# To disable the depth of field effect, set the f_stop to 0.0. This is useful for debugging.
pixel_size = 3 # in microns, 3 microns is common
f_stop = 1.8 # f-number, the ratio of the lens focal length to the diameter of the entrance pupil
focus_distance = 0.6 # in meters, the distance from the camera to the object plane
diagonal_fov = 235 # in degrees, the diagonal field of view to be rendered
# Create a world, add a 1x1x1 meter cube, a ground plane, and a camera
world = World(stage_units_in_meters=1.0)
world.scene.add_default_ground_plane()
cube_1 = world.scene.add(
DynamicCuboid(
prim_path="/new_cube_1",
name="cube_1",
position=np.array([0, 0, 0.5]),
scale=np.array([1.0, 1.0, 1.0]),
size=1.0,
color=np.array([255, 0, 0]),
)
)
cube_2 = world.scene.add(
DynamicCuboid(
prim_path="/new_cube_2",
name="cube_2",
position=np.array([2, 0, 0.5]),
scale=np.array([1.0, 1.0, 1.0]),
size=1.0,
color=np.array([0, 255, 0]),
)
)
cube_3 = world.scene.add(
DynamicCuboid(
prim_path="/new_cube_3",
name="cube_3",
position=np.array([0, 4, 1]),
scale=np.array([2.0, 2.0, 2.0]),
size=1.0,
color=np.array([0, 0, 255]),
)
)
camera = Camera(
prim_path="/World/camera",
position=np.array([0.0, 0.0, 2.0]), # 1 meter away from the side of the cube
frequency=30,
resolution=(width, height),
orientation=rot_utils.euler_angles_to_quats(np.array([0, 90, 0]), degrees=True),
)
# Setup the scene and render a frame
world.reset()
camera.initialize()
# Calculate the focal length and aperture size from the camera matrix
((fx, _, cx), (_, fy, cy), (_, _, _)) = camera_matrix
horizontal_aperture = pixel_size * 1e-3 * width
vertical_aperture = pixel_size * 1e-3 * height
focal_length_x = fx * pixel_size * 1e-3
focal_length_y = fy * pixel_size * 1e-3
focal_length = (focal_length_x + focal_length_y) / 2 # in mm
# Set the camera parameters, note the unit conversion between Isaac Sim sensor and Kit
camera.set_focal_length(focal_length / 10.0)
camera.set_focus_distance(focus_distance)
camera.set_lens_aperture(f_stop * 100.0)
camera.set_horizontal_aperture(horizontal_aperture / 10.0)
camera.set_vertical_aperture(vertical_aperture / 10.0)
camera.set_clipping_range(0.05, 1.0e5)
# Set the distortion coefficients
camera.set_projection_type("fisheyePolynomial")
camera.set_kannala_brandt_properties(width, height, cx, cy, diagonal_fov, distortion_coefficients)
# Get the rendered frame and save it to a file
for i in range(100):
world.step(render=True)
camera.get_current_frame()
img = Image.fromarray(camera.get_rgba()[:, :, :3])
# Optional step, draw the 3D points to the image plane using the OpenCV fisheye model
def draw_points_opencv_fisheye(points3d):
import cv2
rvecs, tvecs = np.array([0.0, 0.0, 0.0]), np.array([0.0, 0.0, 0.0])
points, jac = cv2.fisheye.projectPoints(
np.expand_dims(points3d, 1), rvecs, tvecs, np.array(camera_matrix), np.array(distortion_coefficients)
)
draw = ImageDraw.Draw(img)
for pt in points:
x, y = pt[0]
print("Drawing point at: ", x, y)
draw.ellipse((x - 4, y - 4, x + 4, y + 4), fill="yellow", outline="yellow")
# Draw a few 3D points at the image plane (camera is pointing down to the ground plane).
# OpenCV doen't support projecting points behind the camera, so we avoid that.
draw_points_opencv_fisheye(
points3d=np.array(
[
[0.5, 0.5, 1.0],
[-0.5, 0.5, 1.0],
[0.5, -0.5, 1.0],
[-0.5, -0.5, 1.0],
[-3.0, -1.0, 0.0],
[-3.0, 1.0, 0.0],
[-0.5, -1.5, 1.0],
[0.5, -1.5, 1.0],
]
)
)
print("Saving the rendered image to: camera_opencv_fisheye.png")
img.save("camera_opencv_fisheye.png")
print("Saving the asset to camera_opencv_fisheye.usd")
world.scene.stage.Export("camera_opencv_fisheye.usd")
simulation_app.close()
| 5,598 | Python | 34.891025 | 109 | 0.670954 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.sensor/camera_ros.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
# Given a printout of ROS topic, containing the intrinsic and extrinsic parameters of the camera,
# creates a camera and a sample scene, renders an image and saves it to camera_ros.png file.
# The asset is also saved to camera_ros.usd file. The camera model is based on Intel RealSense D435i.
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": True})
import math
import numpy as np
import omni.isaac.core.utils.numpy.rotations as rot_utils
import yaml
from omni.isaac.core import World
from omni.isaac.core.objects import DynamicCuboid
from omni.isaac.sensor import Camera
from PIL import Image, ImageDraw
# To create a model of a given ROS camera, print the camera_info topic with:
# rostopicecho /camera/color/camera_info
# And copy the output into the yaml_data variable below. Populate additional parameters using the sensor manual.
#
# Note: only rational_polynomial model is supported in this example. For plump_bob or pinhole
# models set the distortion_model to "rational_polynomial" and compliment array D with 0.0 to 8 elements
# The camera_info topic in the Isaac Sim ROS bridge will be in the rational_polynomial format.
#
# Note: when fx is not equal to fy (pixels are not square), the average of fx and fy is used as the focal length.
# and the intrinsic matrix is adjusted to have square pixels. This updated matrix is used for rendering and
# it is also populated into the camera_info topic in the Isaac Sim ROS bridge.
yaml_data = """
# rostopic echo /camera/color/camera_info
header:
seq: 211
stamp:
secs: 1694379352
nsecs: 176209771
frame_id: "camera_color_optical_frame"
height: 480
width: 640
distortion_model: "rational_polynomial"
D: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
K: [612.4178466796875, 0.0, 309.72296142578125, 0.0, 612.362060546875, 245.35870361328125, 0.0, 0.0, 1.0]
R: [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
P: [612.4178466796875, 0.0, 309.72296142578125, 0.0, 0.0, 612.362060546875, 245.35870361328125, 0.0, 0.0, 0.0, 1.0, 0.0]
"""
# Camera sensor size and optical path parameters. These parameters are not the part of the
# OpenCV camera model, but they are nessesary to simulate the depth of field effect.
#
# To disable the depth of field effect, set the f_stop to 0.0. This is useful for debugging.
pixel_size = 1.4 # Pixel size in microns, 3 microns is common
f_stop = 2.0 # F-number, the ratio of the lens focal length to the diameter of the entrance pupil
focus_distance = 0.5 # Focus distance in meters, the distance from the camera to the object plane
# Parsing the YAML data
data = yaml.safe_load(yaml_data)
print("Header Frame ID:", data["header"]["frame_id"])
width, height, K, D = data["width"], data["height"], data["K"], data["D"]
# Create a world, add a 1x1x1 meter cube, a ground plane, and a camera
world = World(stage_units_in_meters=1.0)
world.scene.add_default_ground_plane()
world.reset()
cube_1 = world.scene.add(
DynamicCuboid(
prim_path="/new_cube_1",
name="cube_1",
position=np.array([0, 0, 0.5]),
scale=np.array([1.0, 1.0, 1.0]),
size=1.0,
color=np.array([255, 0, 0]),
)
)
camera = Camera(
prim_path="/World/camera",
position=np.array([0.0, 0.0, 3.0]), # 2 meter away from the side of the cube
frequency=30,
resolution=(width, height),
orientation=rot_utils.euler_angles_to_quats(np.array([0, 90, 0]), degrees=True),
)
camera.initialize()
# Calculate the focal length and aperture size from the camera matrix
(fx, _, cx, _, fy, cy, _, _, _) = K
horizontal_aperture = pixel_size * 1e-3 * width
vertical_aperture = pixel_size * 1e-3 * height
focal_length_x = fx * pixel_size * 1e-3
focal_length_y = fy * pixel_size * 1e-3
focal_length = (focal_length_x + focal_length_y) / 2 # in mm
# Set the camera parameters, note the unit conversion between Isaac Sim sensor and Kit
camera.set_focal_length(focal_length / 10.0)
camera.set_focus_distance(focus_distance)
camera.set_lens_aperture(f_stop * 100.0)
camera.set_horizontal_aperture(horizontal_aperture / 10.0)
camera.set_vertical_aperture(vertical_aperture / 10.0)
camera.set_clipping_range(0.05, 1.0e5)
# Set the distortion coefficients, this is nessesary, when cx, cy are not in the center of the image
diagonal = 2 * math.sqrt(max(cx, width - cx) ** 2 + max(cy, height - cy) ** 2)
diagonal_fov = 2 * math.atan2(diagonal, fx + fy) * 180 / math.pi
camera.set_projection_type("fisheyePolynomial")
camera.set_rational_polynomial_properties(width, height, cx, cy, diagonal_fov, D)
# Get the rendered frame and save it to a file
for i in range(100):
world.step(render=True)
camera.get_current_frame()
img = Image.fromarray(camera.get_rgba()[:, :, :3])
# Optional step, draw the 3D points to the image plane using the OpenCV fisheye model
def draw_points_opencv(points3d):
try:
# To install, run python.sh -m pip install opencv-python
import cv2
rvecs, tvecs = np.array([0.0, 0.0, 0.0]), np.array([0.0, 0.0, 0.0])
points, jac = cv2.projectPoints(
np.expand_dims(points3d, 1), rvecs, tvecs, np.array(K).reshape(3, 3), np.array(D)
)
draw = ImageDraw.Draw(img)
for pt in points:
x, y = pt[0]
print("Drawing point at: ", x, y)
draw.ellipse((x - 4, y - 4, x + 4, y + 4), fill="orange", outline="orange")
except:
print("OpenCV is not installed, skipping OpenCV overlay")
print("To install OpenCV, run: python.sh -m pip install opencv-python")
# Draw the 3D points to the image plane
draw_points_opencv(points3d=np.array([[0.5, 0.5, 4.0], [-0.5, 0.5, 4.0], [0.5, -0.5, 4.0], [-0.5, -0.5, 4.0]]))
print("Saving the rendered image to: camera_ros.png")
img.save("camera_ros.png")
print("Saving the asset to camera_ros.usd")
world.scene.stage.Export("camera_ros.usd")
simulation_app.close()
| 6,319 | Python | 39.774193 | 120 | 0.700902 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.ros2_bridge/carter_multiple_robot_navigation.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import argparse
import sys
import carb
from omni.isaac.kit import SimulationApp
HOSPITAL_USD_PATH = "/Isaac/Samples/ROS2/Scenario/multiple_robot_carter_hospital_navigation.usd"
OFFICE_USD_PATH = "/Isaac/Samples/ROS2/Scenario/multiple_robot_carter_office_navigation.usd"
# Default environment: Hospital
ENV_USD_PATH = HOSPITAL_USD_PATH
if len(sys.argv) > 1:
if sys.argv[1] == "office":
# Choosing Office environment
ENV_USD_PATH = OFFICE_USD_PATH
elif sys.argv[1] != "hospital":
carb.log_warn("Environment name is invalid. Choosing default Hospital environment.")
else:
carb.log_warn("Environment name not specified. Choosing default Hospital environment.")
CONFIG = {"renderer": "RayTracedLighting", "headless": False}
# Example ROS2 bridge sample demonstrating the manual loading of Multiple Robot Navigation scenario
simulation_app = SimulationApp(CONFIG)
import omni
from omni.isaac.core import SimulationContext
from omni.isaac.core.utils import extensions, nucleus, prims, rotations, stage, viewports
from omni.isaac.core.utils.extensions import enable_extension
from pxr import Sdf
# enable ROS2 bridge extension
enable_extension("omni.isaac.ros2_bridge")
# Locate assets root folder to load sample
assets_root_path = nucleus.get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
simulation_app.close()
sys.exit()
usd_path = assets_root_path + ENV_USD_PATH
omni.usd.get_context().open_stage(usd_path, None)
# Wait two frames so that stage starts loading
simulation_app.update()
simulation_app.update()
print("Loading stage...")
from omni.isaac.core.utils.stage import is_stage_loading
while is_stage_loading():
simulation_app.update()
print("Loading Complete")
simulation_context = SimulationContext(stage_units_in_meters=1.0)
simulation_app.update()
simulation_context.play()
simulation_app.update()
while simulation_app.is_running():
# runs with a realtime clock
simulation_app.update()
simulation_context.stop()
simulation_app.close()
| 2,512 | Python | 29.646341 | 99 | 0.766322 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.ros2_bridge/rtx_lidar.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import argparse
import sys
import carb
from omni.isaac.kit import SimulationApp
# Example for creating a RTX lidar sensor and publishing PointCloud2 data
simulation_app = SimulationApp({"headless": False})
import omni
import omni.kit.viewport.utility
import omni.replicator.core as rep
from omni.isaac.core import SimulationContext
from omni.isaac.core.utils import nucleus, stage
from omni.isaac.core.utils.extensions import enable_extension
from pxr import Gf
# enable ROS2 bridge extension
enable_extension("omni.isaac.ros2_bridge")
simulation_app.update()
# Locate Isaac Sim assets folder to load environment and robot stages
assets_root_path = nucleus.get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
simulation_app.close()
sys.exit()
simulation_app.update()
# Loading the simple_room environment
stage.add_reference_to_stage(
assets_root_path + "/Isaac/Environments/Simple_Warehouse/full_warehouse.usd", "/background"
)
simulation_app.update()
# Create the lidar sensor that generates data into "RtxSensorCpu"
# Sensor needs to be rotated 90 degrees about X so that its Z up
# Possible options are Example_Rotary and Example_Solid_State
# drive sim applies 0.5,-0.5,-0.5,w(-0.5), we have to apply the reverse
_, sensor = omni.kit.commands.execute(
"IsaacSensorCreateRtxLidar",
path="/sensor",
parent=None,
config="Example_Rotary",
translation=(0, 0, 1.0),
orientation=Gf.Quatd(1.0, 0.0, 0.0, 0.0), # Gf.Quatd is w,i,j,k
)
# RTX sensors are cameras and must be assigned to their own render product
hydra_texture = rep.create.render_product(sensor.GetPath(), [1, 1], name="Isaac")
simulation_context = SimulationContext(physics_dt=1.0 / 60.0, rendering_dt=1.0 / 60.0, stage_units_in_meters=1.0)
simulation_app.update()
# Create Point cloud publisher pipeline in the post process graph
writer = rep.writers.get("RtxLidar" + "ROS2PublishPointCloud")
writer.initialize(topicName="point_cloud", frameId="sim_lidar")
writer.attach([hydra_texture])
# Create the debug draw pipeline in the post process graph
writer = rep.writers.get("RtxLidar" + "DebugDrawPointCloud")
writer.attach([hydra_texture])
# Create LaserScan publisher pipeline in the post process graph
writer = rep.writers.get("RtxLidar" + "ROS2PublishLaserScan")
writer.initialize(topicName="laser_scan", frameId="sim_lidar")
writer.attach([hydra_texture])
simulation_app.update()
simulation_context.play()
while simulation_app.is_running():
simulation_app.update()
# cleanup and shutdown
simulation_context.stop()
simulation_app.close()
| 3,055 | Python | 32.582417 | 113 | 0.765303 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.ros2_bridge/clock.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import argparse
import time
import carb
from omni.isaac.kit import SimulationApp
# Example ROS2 bridge sample showing rclpy and rosclock interaction
simulation_app = SimulationApp({"renderer": "RayTracedLighting", "headless": True})
import omni
import omni.graph.core as og
from omni.isaac.core import SimulationContext
from omni.isaac.core.utils.extensions import enable_extension
# enable ROS2 bridge extension
enable_extension("omni.isaac.ros2_bridge")
simulation_app.update()
# Note that this is not the system level rclpy, but one compiled for omniverse
import rclpy
from rosgraph_msgs.msg import Clock
rclpy.init()
clock_topic = "sim_time"
manual_clock_topic = "manual_time"
# Creating a action graph with ROS component nodes
try:
og.Controller.edit(
{"graph_path": "/ActionGraph", "evaluator_name": "execution"},
{
og.Controller.Keys.CREATE_NODES: [
("ReadSimTime", "omni.isaac.core_nodes.IsaacReadSimulationTime"),
("OnPlaybackTick", "omni.graph.action.OnPlaybackTick"),
("PublishClock", "omni.isaac.ros2_bridge.ROS2PublishClock"),
("OnImpulseEvent", "omni.graph.action.OnImpulseEvent"),
("PublishManualClock", "omni.isaac.ros2_bridge.ROS2PublishClock"),
],
og.Controller.Keys.CONNECT: [
# Connecting execution of OnPlaybackTick node to PublishClock to automatically publish each frame
("OnPlaybackTick.outputs:tick", "PublishClock.inputs:execIn"),
# Connecting execution of OnImpulseEvent node to PublishManualClock so it will only publish when an impulse event is triggered
("OnImpulseEvent.outputs:execOut", "PublishManualClock.inputs:execIn"),
# Connecting simulationTime data of ReadSimTime to the clock publisher nodes
("ReadSimTime.outputs:simulationTime", "PublishClock.inputs:timeStamp"),
("ReadSimTime.outputs:simulationTime", "PublishManualClock.inputs:timeStamp"),
],
og.Controller.Keys.SET_VALUES: [
# Assigning topic names to clock publishers
("PublishClock.inputs:topicName", clock_topic),
("PublishManualClock.inputs:topicName", manual_clock_topic),
],
},
)
except Exception as e:
print(e)
simulation_app.update()
simulation_app.update()
# Define ROS2 callbacks
def sim_clock_callback(data):
print("sim time:", data.clock)
def manual_clock_callback(data):
print("manual stepped sim time:", data.clock)
# Create rclpy ndoe
node = rclpy.create_node("isaac_sim_clock")
# create subscribers
sim_clock_sub = node.create_subscription(Clock, clock_topic, sim_clock_callback, 1)
manual_clock_sub = node.create_subscription(Clock, manual_clock_topic, manual_clock_callback, 1)
time.sleep(1.0)
simulation_context = SimulationContext(physics_dt=1.0 / 60.0, rendering_dt=1.0 / 60.0, stage_units_in_meters=1.0)
# need to initialize physics getting any articulation..etc
simulation_context.initialize_physics()
simulation_context.play()
# perform a fixed number of steps with fixed step size
for frame in range(20):
# publish manual clock every 10 frames
if frame % 10 == 0:
og.Controller.set(og.Controller.attribute("/ActionGraph/OnImpulseEvent.state:enableImpulse"), True)
simulation_context.render() # This updates rendering/app loop which calls the sim clock
simulation_context.step(render=False) # runs with a non-realtime clock
rclpy.spin_once(node, timeout_sec=0.0) # Spin node once
# This sleep is to make this sample run a bit more deterministically for the subscriber callback
# In general this sleep is not needed
time.sleep(0.1)
# perform a fixed number of steps with realtime clock
for frame in range(20):
# publish manual clock every 10 frames
if frame % 10 == 0:
og.Controller.set(og.Controller.attribute("/ActionGraph/OnImpulseEvent.state:enableImpulse"), True)
simulation_app.update() # runs with a realtime clock
rclpy.spin_once(node, timeout_sec=0.0) # Spin node once
# This sleep is to make this sample run a bit more deterministically for the subscriber callback
# In general this sleep is not needed
time.sleep(0.1)
# shutdown
rclpy.shutdown()
simulation_context.stop()
simulation_app.close()
| 4,822 | Python | 37.584 | 142 | 0.710908 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.ros2_bridge/carter_stereo.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import argparse
import carb
from omni.isaac.kit import SimulationApp
parser = argparse.ArgumentParser(description="Carter Stereo Example")
parser.add_argument("--test", action="store_true")
args, unknown = parser.parse_known_args()
# Example ROS2 bridge sample showing manual control over messages
simulation_app = SimulationApp({"renderer": "RayTracedLighting", "headless": False})
import omni
import omni.graph.core as og
from omni.isaac.core import SimulationContext
from omni.isaac.core.utils.extensions import enable_extension
from omni.isaac.core.utils.nucleus import get_assets_root_path
from pxr import Sdf
# enable ROS2 bridge extension
enable_extension("omni.isaac.ros2_bridge")
# Locate assets root folder to load sample
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
simulation_app.close()
exit()
usd_path = assets_root_path + "/Isaac/Samples/ROS2/Scenario/carter_warehouse_navigation.usd"
omni.usd.get_context().open_stage(usd_path, None)
# Wait two frames so that stage starts loading
simulation_app.update()
simulation_app.update()
print("Loading stage...")
from omni.isaac.core.utils.stage import is_stage_loading
while is_stage_loading():
simulation_app.update()
print("Loading Complete")
simulation_context = SimulationContext(stage_units_in_meters=1.0)
ros_cameras_graph_path = "/World/Nova_Carter_ROS/front_hawk"
# Enabling rgb image publishers for left camera. Cameras will automatically publish images each frame
og.Controller.set(og.Controller.attribute(ros_cameras_graph_path + "/left_camera_render_product.inputs:enabled"), True)
simulation_context.play()
simulation_context.step()
# Enabling rgb image publishers for right camera after left cameras are initialized. Cameras will automatically publish images each frame
og.Controller.set(og.Controller.attribute(ros_cameras_graph_path + "/right_camera_render_product.inputs:enabled"), True)
# Simulate for one second to warm up sim and let everything settle
for frame in range(60):
simulation_context.step()
# Create a ROS publisher to publish message to spin robot in place
# If system level rclpy is sourced in bashrc or terminal, it is imported otherwise backup rclpy libraries shipped with Isaac sim is used
import rclpy
rclpy.init()
from geometry_msgs.msg import Twist
node = rclpy.create_node("carter_stereo")
publisher = node.create_publisher(Twist, "cmd_vel", 10)
frame = 0
while simulation_app.is_running():
# Run with a fixed step size
simulation_context.step(render=True)
# Publish the ROS Twist message every 2 frames
if frame % 2 == 0:
message = Twist()
message.angular.z = 0.5 # spin in place
publisher.publish(message)
if args.test and frame > 120:
break
frame = frame + 1
node.destroy_node()
rclpy.shutdown()
simulation_context.stop()
simulation_app.close()
| 3,363 | Python | 32.64 | 137 | 0.765685 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.ros2_bridge/camera_manual.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import argparse
import sys
import carb
from omni.isaac.kit import SimulationApp
CAMERA_STAGE_PATH = "/Camera"
ROS_CAMERA_GRAPH_PATH = "/ROS_Camera"
BACKGROUND_STAGE_PATH = "/background"
BACKGROUND_USD_PATH = "/Isaac/Environments/Simple_Warehouse/warehouse_with_forklifts.usd"
CONFIG = {"renderer": "RayTracedLighting", "headless": False}
# Example ROS2 bridge sample demonstrating the manual loading of stages and manual publishing of images
simulation_app = SimulationApp(CONFIG)
import omni
import omni.graph.core as og
import usdrt.Sdf
from omni.isaac.core import SimulationContext
from omni.isaac.core.utils import extensions, nucleus, stage
from omni.kit.viewport.utility import get_active_viewport
from pxr import Gf, Usd, UsdGeom
# enable ROS2 bridge extension
extensions.enable_extension("omni.isaac.ros2_bridge")
simulation_app.update()
simulation_context = SimulationContext(stage_units_in_meters=1.0)
# Locate Isaac Sim assets folder to load environment and robot stages
assets_root_path = nucleus.get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
simulation_app.close()
sys.exit()
# Loading the simple_room environment
stage.add_reference_to_stage(assets_root_path + BACKGROUND_USD_PATH, BACKGROUND_STAGE_PATH)
# Creating a Camera prim
camera_prim = UsdGeom.Camera(omni.usd.get_context().get_stage().DefinePrim(CAMERA_STAGE_PATH, "Camera"))
xform_api = UsdGeom.XformCommonAPI(camera_prim)
xform_api.SetTranslate(Gf.Vec3d(-1, 5, 1))
xform_api.SetRotate((90, 0, 0), UsdGeom.XformCommonAPI.RotationOrderXYZ)
camera_prim.GetHorizontalApertureAttr().Set(21)
camera_prim.GetVerticalApertureAttr().Set(16)
camera_prim.GetProjectionAttr().Set("perspective")
camera_prim.GetFocalLengthAttr().Set(24)
camera_prim.GetFocusDistanceAttr().Set(400)
simulation_app.update()
# Creating an on-demand push graph with cameraHelper nodes to generate ROS image publishers
keys = og.Controller.Keys
(ros_camera_graph, _, _, _) = og.Controller.edit(
{
"graph_path": ROS_CAMERA_GRAPH_PATH,
"evaluator_name": "push",
"pipeline_stage": og.GraphPipelineStage.GRAPH_PIPELINE_STAGE_ONDEMAND,
},
{
keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("createViewport", "omni.isaac.core_nodes.IsaacCreateViewport"),
("getRenderProduct", "omni.isaac.core_nodes.IsaacGetViewportRenderProduct"),
("setCamera", "omni.isaac.core_nodes.IsaacSetCameraOnRenderProduct"),
("cameraHelperRgb", "omni.isaac.ros2_bridge.ROS2CameraHelper"),
("cameraHelperInfo", "omni.isaac.ros2_bridge.ROS2CameraHelper"),
("cameraHelperDepth", "omni.isaac.ros2_bridge.ROS2CameraHelper"),
],
keys.CONNECT: [
("OnTick.outputs:tick", "createViewport.inputs:execIn"),
("createViewport.outputs:execOut", "getRenderProduct.inputs:execIn"),
("createViewport.outputs:viewport", "getRenderProduct.inputs:viewport"),
("getRenderProduct.outputs:execOut", "setCamera.inputs:execIn"),
("getRenderProduct.outputs:renderProductPath", "setCamera.inputs:renderProductPath"),
("setCamera.outputs:execOut", "cameraHelperRgb.inputs:execIn"),
("setCamera.outputs:execOut", "cameraHelperInfo.inputs:execIn"),
("setCamera.outputs:execOut", "cameraHelperDepth.inputs:execIn"),
("getRenderProduct.outputs:renderProductPath", "cameraHelperRgb.inputs:renderProductPath"),
("getRenderProduct.outputs:renderProductPath", "cameraHelperInfo.inputs:renderProductPath"),
("getRenderProduct.outputs:renderProductPath", "cameraHelperDepth.inputs:renderProductPath"),
],
keys.SET_VALUES: [
("createViewport.inputs:viewportId", 0),
("cameraHelperRgb.inputs:frameId", "sim_camera"),
("cameraHelperRgb.inputs:topicName", "rgb"),
("cameraHelperRgb.inputs:type", "rgb"),
("cameraHelperInfo.inputs:frameId", "sim_camera"),
("cameraHelperInfo.inputs:topicName", "camera_info"),
("cameraHelperInfo.inputs:type", "camera_info"),
("cameraHelperDepth.inputs:frameId", "sim_camera"),
("cameraHelperDepth.inputs:topicName", "depth"),
("cameraHelperDepth.inputs:type", "depth"),
("setCamera.inputs:cameraPrim", [usdrt.Sdf.Path(CAMERA_STAGE_PATH)]),
],
},
)
# Run the ROS Camera graph once to generate ROS image publishers in SDGPipeline
og.Controller.evaluate_sync(ros_camera_graph)
simulation_app.update()
# Use the IsaacSimulationGate step value to block execution on specific frames
SD_GRAPH_PATH = "/Render/PostProcess/SDGPipeline"
viewport_api = get_active_viewport()
if viewport_api is not None:
import omni.syntheticdata._syntheticdata as sd
curr_stage = omni.usd.get_context().get_stage()
# Required for editing the SDGPipeline graph which exists in the Session Layer
with Usd.EditContext(curr_stage, curr_stage.GetSessionLayer()):
# Get name of rendervar for RGB sensor type
rv_rgb = omni.syntheticdata.SyntheticData.convert_sensor_type_to_rendervar(sd.SensorType.Rgb.name)
# Get path to IsaacSimulationGate node in RGB pipeline
rgb_camera_gate_path = omni.syntheticdata.SyntheticData._get_node_path(
rv_rgb + "IsaacSimulationGate", viewport_api.get_render_product_path()
)
rv_depth = omni.syntheticdata.SyntheticData.convert_sensor_type_to_rendervar(
sd.SensorType.DistanceToImagePlane.name
)
# Get path to IsaacSimulationGate node in Depth pipeline
depth_camera_gate_path = omni.syntheticdata.SyntheticData._get_node_path(
rv_depth + "IsaacSimulationGate", viewport_api.get_render_product_path()
)
# Get path to IsaacSimulationGate node in CameraInfo pipeline
camera_info_gate_path = omni.syntheticdata.SyntheticData._get_node_path(
"PostProcessDispatch" + "IsaacSimulationGate", viewport_api.get_render_product_path()
)
# Need to initialize physics getting any articulation..etc
simulation_context.initialize_physics()
simulation_context.play()
frame = 0
while simulation_app.is_running() and simulation_context.is_playing():
# Run with a fixed step size
simulation_context.step(render=True)
if simulation_context.is_playing():
# Rotate camera by 0.5 degree every frame
xform_api.SetRotate((90, 0, frame / 4.0), UsdGeom.XformCommonAPI.RotationOrderXYZ)
# Set the step value for the simulation gates to zero to stop execution
og.Controller.attribute(rgb_camera_gate_path + ".inputs:step").set(0)
og.Controller.attribute(depth_camera_gate_path + ".inputs:step").set(0)
og.Controller.attribute(camera_info_gate_path + ".inputs:step").set(0)
# Publish the ROS rgb image message every 5 frames
if frame % 5 == 0:
# Enable rgb Branch node to start publishing rgb image
og.Controller.attribute(rgb_camera_gate_path + ".inputs:step").set(1)
# Publish the ROS Depth image message every 60 frames
if frame % 60 == 0:
# Enable depth Branch node to start publishing depth image
og.Controller.attribute(depth_camera_gate_path + ".inputs:step").set(1)
# Publish the ROS Camera Info message every frame
og.Controller.attribute(camera_info_gate_path + ".inputs:step").set(1)
frame = frame + 1
simulation_context.stop()
simulation_app.close()
| 8,054 | Python | 42.074866 | 106 | 0.706109 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.ros2_bridge/moveit.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import argparse
import sys
import carb
import numpy as np
from omni.isaac.kit import SimulationApp
FRANKA_STAGE_PATH = "/Franka"
FRANKA_USD_PATH = "/Isaac/Robots/Franka/franka_alt_fingers.usd"
BACKGROUND_STAGE_PATH = "/background"
BACKGROUND_USD_PATH = "/Isaac/Environments/Simple_Room/simple_room.usd"
CONFIG = {"renderer": "RayTracedLighting", "headless": False}
# Example ROS2 bridge sample demonstrating the manual loading of stages
# and creation of ROS components
simulation_app = SimulationApp(CONFIG)
import omni.graph.core as og
import usdrt.Sdf
from omni.isaac.core import SimulationContext
from omni.isaac.core.utils import extensions, nucleus, prims, rotations, stage, viewports
from pxr import Gf
# enable ROS2 bridge extension
extensions.enable_extension("omni.isaac.ros2_bridge")
simulation_context = SimulationContext(stage_units_in_meters=1.0)
# Locate Isaac Sim assets folder to load environment and robot stages
assets_root_path = nucleus.get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
simulation_app.close()
sys.exit()
# Preparing stage
viewports.set_camera_view(eye=np.array([1.2, 1.2, 0.8]), target=np.array([0, 0, 0.5]))
# Loading the simple_room environment
stage.add_reference_to_stage(assets_root_path + BACKGROUND_USD_PATH, BACKGROUND_STAGE_PATH)
# Loading the franka robot USD
prims.create_prim(
FRANKA_STAGE_PATH,
"Xform",
position=np.array([0, -0.64, 0]),
orientation=rotations.gf_rotation_to_np_array(Gf.Rotation(Gf.Vec3d(0, 0, 1), 90)),
usd_path=assets_root_path + FRANKA_USD_PATH,
)
simulation_app.update()
# Creating a action graph with ROS component nodes
try:
og.Controller.edit(
{"graph_path": "/ActionGraph", "evaluator_name": "execution"},
{
og.Controller.Keys.CREATE_NODES: [
("OnImpulseEvent", "omni.graph.action.OnImpulseEvent"),
("ReadSimTime", "omni.isaac.core_nodes.IsaacReadSimulationTime"),
("Context", "omni.isaac.ros2_bridge.ROS2Context"),
("PublishJointState", "omni.isaac.ros2_bridge.ROS2PublishJointState"),
("SubscribeJointState", "omni.isaac.ros2_bridge.ROS2SubscribeJointState"),
("ArticulationController", "omni.isaac.core_nodes.IsaacArticulationController"),
("PublishClock", "omni.isaac.ros2_bridge.ROS2PublishClock"),
],
og.Controller.Keys.CONNECT: [
("OnImpulseEvent.outputs:execOut", "PublishJointState.inputs:execIn"),
("OnImpulseEvent.outputs:execOut", "SubscribeJointState.inputs:execIn"),
("OnImpulseEvent.outputs:execOut", "PublishClock.inputs:execIn"),
("OnImpulseEvent.outputs:execOut", "ArticulationController.inputs:execIn"),
("Context.outputs:context", "PublishJointState.inputs:context"),
("Context.outputs:context", "SubscribeJointState.inputs:context"),
("Context.outputs:context", "PublishClock.inputs:context"),
("ReadSimTime.outputs:simulationTime", "PublishJointState.inputs:timeStamp"),
("ReadSimTime.outputs:simulationTime", "PublishClock.inputs:timeStamp"),
("SubscribeJointState.outputs:jointNames", "ArticulationController.inputs:jointNames"),
(
"SubscribeJointState.outputs:positionCommand",
"ArticulationController.inputs:positionCommand",
),
(
"SubscribeJointState.outputs:velocityCommand",
"ArticulationController.inputs:velocityCommand",
),
("SubscribeJointState.outputs:effortCommand", "ArticulationController.inputs:effortCommand"),
],
og.Controller.Keys.SET_VALUES: [
# Setting the /Franka target prim to Articulation Controller node
("ArticulationController.inputs:usePath", True),
("ArticulationController.inputs:robotPath", FRANKA_STAGE_PATH),
("PublishJointState.inputs:topicName", "isaac_joint_states"),
("SubscribeJointState.inputs:topicName", "isaac_joint_commands"),
("PublishJointState.inputs:targetPrim", [usdrt.Sdf.Path(FRANKA_STAGE_PATH)]),
("PublishTF.inputs:targetPrims", [usdrt.Sdf.Path(FRANKA_STAGE_PATH)]),
],
},
)
except Exception as e:
print(e)
simulation_app.update()
# need to initialize physics getting any articulation..etc
simulation_context.initialize_physics()
simulation_context.play()
while simulation_app.is_running():
# Run with a fixed step size
simulation_context.step(render=True)
# Tick the Publish/Subscribe JointState and Publish Clock nodes each frame
og.Controller.set(og.Controller.attribute("/ActionGraph/OnImpulseEvent.state:enableImpulse"), True)
simulation_context.stop()
simulation_app.close()
| 5,442 | Python | 41.523437 | 109 | 0.684491 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.ros2_bridge/subscriber.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import argparse
import carb
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"renderer": "RayTracedLighting", "headless": False})
import omni
from omni.isaac.core import World
from omni.isaac.core.objects import VisualCuboid
from omni.isaac.core.utils.extensions import enable_extension
# enable ROS2 bridge extension
enable_extension("omni.isaac.ros2_bridge")
simulation_app.update()
import time
# Note that this is not the system level rclpy, but one compiled for omniverse
import numpy as np
import rclpy
from rclpy.node import Node
from std_msgs.msg import Empty
class Subscriber(Node):
def __init__(self):
super().__init__("tutorial_subscriber")
# setting up the world with a cube
self.timeline = omni.timeline.get_timeline_interface()
self.ros_world = World(stage_units_in_meters=1.0)
self.ros_world.scene.add_default_ground_plane()
# add a cube in the world
cube_path = "/cube"
self.ros_world.scene.add(
VisualCuboid(prim_path=cube_path, name="cube_1", position=np.array([0, 0, 10]), size=0.2)
)
self._cube_position = np.array([0, 0, 0])
# setup the ROS2 subscriber here
self.ros_sub = self.create_subscription(Empty, "move_cube", self.move_cube_callback, 10)
self.ros_world.reset()
def move_cube_callback(self, data):
# callback function to set the cube position to a new one upon receiving a (empty) ROS2 message
if self.ros_world.is_playing():
self._cube_position = np.array([np.random.rand() * 0.40, np.random.rand() * 0.40, 0.10])
def run_simulation(self):
self.timeline.play()
while simulation_app.is_running():
self.ros_world.step(render=True)
rclpy.spin_once(self, timeout_sec=0.0)
if self.ros_world.is_playing():
if self.ros_world.current_time_step_index == 0:
self.ros_world.reset()
# the actual setting the cube pose is done here
self.ros_world.scene.get_object("cube_1").set_world_pose(self._cube_position)
# Cleanup
self.timeline.stop()
self.destroy_node()
simulation_app.close()
if __name__ == "__main__":
rclpy.init()
subscriber = Subscriber()
subscriber.run_simulation()
| 2,787 | Python | 33 | 103 | 0.670255 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.universal_robots/multiple_tasks.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import carb
import numpy as np
from omni.isaac.core import World
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.dofbot.controllers import PickPlaceController
from omni.isaac.dofbot.tasks import PickPlace
from omni.isaac.franka.controllers.stacking_controller import StackingController as FrankaStackingController
from omni.isaac.franka.tasks import Stacking as FrankaStacking
from omni.isaac.universal_robots.controllers import StackingController as UR10StackingController
from omni.isaac.universal_robots.tasks import Stacking as UR10Stacking
from omni.isaac.wheeled_robots.controllers.differential_controller import DifferentialController
from omni.isaac.wheeled_robots.controllers.holonomic_controller import HolonomicController
from omni.isaac.wheeled_robots.robots import WheeledRobot
from omni.isaac.wheeled_robots.robots.holonomic_robot_usd_setup import HolonomicRobotUsdSetup
my_world = World(stage_units_in_meters=1.0)
tasks = []
num_of_tasks = 3
tasks.append(FrankaStacking(name="task_0", offset=np.array([0, -2, 0])))
my_world.add_task(tasks[-1])
tasks.append(UR10Stacking(name="task_1", offset=np.array([0.5, 0.5, 0])))
my_world.add_task(tasks[-1])
tasks.append(PickPlace(offset=np.array([0, -1, 0])))
my_world.add_task(tasks[-1])
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
kaya_asset_path = assets_root_path + "/Isaac/Robots/Kaya/kaya.usd"
my_kaya = my_world.scene.add(
WheeledRobot(
prim_path="/World/Kaya",
name="my_kaya",
wheel_dof_names=["axle_0_joint", "axle_1_joint", "axle_2_joint"],
create_robot=True,
usd_path=kaya_asset_path,
position=np.array([-1, 0, 0]),
)
)
jetbot_asset_path = assets_root_path + "/Isaac/Robots/Jetbot/jetbot.usd"
my_jetbot = my_world.scene.add(
WheeledRobot(
prim_path="/World/Jetbot",
name="my_jetbot",
wheel_dof_names=["left_wheel_joint", "right_wheel_joint"],
create_robot=True,
usd_path=jetbot_asset_path,
position=np.array([-1.5, -1.5, 0]),
)
)
my_world.reset()
robots = []
for i in range(num_of_tasks):
task_params = tasks[i].get_params()
robots.append(my_world.scene.get_object(task_params["robot_name"]["value"]))
controllers = []
controllers.append(
FrankaStackingController(
name="pick_place_controller",
gripper=robots[0].gripper,
robot_articulation=robots[0],
picking_order_cube_names=tasks[0].get_cube_names(),
robot_observation_name=robots[0].name,
)
)
controllers[-1].reset()
controllers.append(
UR10StackingController(
name="pick_place_controller",
gripper=robots[1].gripper,
robot_articulation=robots[1],
picking_order_cube_names=tasks[1].get_cube_names(),
robot_observation_name=robots[1].name,
)
)
controllers[-1].reset()
controllers.append(
PickPlaceController(name="pick_place_controller", gripper=robots[2].gripper, robot_articulation=robots[2])
)
kaya_setup = HolonomicRobotUsdSetup(
robot_prim_path=my_kaya.prim_path, com_prim_path="/World/Kaya/base_link/control_offset"
)
(
wheel_radius,
wheel_positions,
wheel_orientations,
mecanum_angles,
wheel_axis,
up_axis,
) = kaya_setup.get_holonomic_controller_params()
kaya_controller = HolonomicController(
name="holonomic_controller",
wheel_radius=wheel_radius,
wheel_positions=wheel_positions,
wheel_orientations=wheel_orientations,
mecanum_angles=mecanum_angles,
wheel_axis=wheel_axis,
up_axis=up_axis,
)
jetbot_controller = DifferentialController(name="simple_control", wheel_radius=0.03, wheel_base=0.1125)
pick_place_task_params = tasks[2].get_params()
articulation_controllers = []
for i in range(num_of_tasks):
articulation_controllers.append(robots[i].get_articulation_controller())
i = 0
my_world.pause()
while simulation_app.is_running():
my_world.step(render=True)
if my_world.is_playing():
if my_world.current_time_step_index == 0:
my_world.reset()
controllers[0].reset()
controllers[1].reset()
controllers[2].reset()
kaya_controller.reset()
jetbot_controller.reset()
observations = my_world.get_observations()
actions = controllers[0].forward(observations=observations, end_effector_offset=np.array([0, 0, 0]))
articulation_controllers[0].apply_action(actions)
actions = controllers[1].forward(observations=observations, end_effector_offset=np.array([0, 0, 0.02]))
articulation_controllers[1].apply_action(actions)
actions = controllers[2].forward(
picking_position=observations[pick_place_task_params["cube_name"]["value"]]["position"],
placing_position=observations[pick_place_task_params["cube_name"]["value"]]["target_position"],
current_joint_positions=observations[pick_place_task_params["robot_name"]["value"]]["joint_positions"],
end_effector_offset=np.array([0, -0.06, 0]),
)
articulation_controllers[2].apply_action(actions)
if i >= 0 and i < 500:
my_kaya.apply_wheel_actions(kaya_controller.forward(command=[0.2, 0.0, 0.0]))
my_jetbot.apply_wheel_actions(jetbot_controller.forward(command=[0.1, 0]))
elif i >= 500 and i < 1000:
# TODO: change with new USD
my_kaya.apply_wheel_actions(kaya_controller.forward(command=[0, 0.2, 0.0]))
my_jetbot.apply_wheel_actions(jetbot_controller.forward(command=[0.0, np.pi / 10]))
elif i >= 1000 and i < 1500:
# TODO: change with new USD
my_kaya.apply_wheel_actions(kaya_controller.forward(command=[0, 0.0, 0.6]))
my_jetbot.apply_wheel_actions(jetbot_controller.forward(command=[0.1, 0]))
i += 1
simulation_app.close()
| 6,457 | Python | 38.619632 | 115 | 0.696144 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.universal_robots/stacking.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import numpy as np
from omni.isaac.core import World
from omni.isaac.universal_robots.controllers import StackingController
from omni.isaac.universal_robots.tasks import Stacking
my_world = World(stage_units_in_meters=1.0)
my_task = Stacking()
my_world.add_task(my_task)
my_world.reset()
robot_name = my_task.get_params()["robot_name"]["value"]
my_ur10 = my_world.scene.get_object(robot_name)
my_controller = StackingController(
name="stacking_controller",
gripper=my_ur10.gripper,
robot_articulation=my_ur10,
picking_order_cube_names=my_task.get_cube_names(),
robot_observation_name=robot_name,
)
articulation_controller = my_ur10.get_articulation_controller()
i = 0
while simulation_app.is_running():
my_world.step(render=True)
if my_world.is_playing():
if my_world.current_time_step_index == 0:
my_world.reset()
my_controller.reset()
observations = my_world.get_observations()
actions = my_controller.forward(observations=observations, end_effector_offset=np.array([0.0, 0.0, 0.02]))
articulation_controller.apply_action(actions)
simulation_app.close()
| 1,669 | Python | 36.11111 | 114 | 0.745956 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.kaya/kaya_move.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import carb
import numpy as np
from omni.isaac.core import World
from omni.isaac.core.prims.xform_prim import XFormPrim
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.wheeled_robots.controllers.holonomic_controller import HolonomicController
from omni.isaac.wheeled_robots.robots import WheeledRobot
from omni.isaac.wheeled_robots.robots.holonomic_robot_usd_setup import HolonomicRobotUsdSetup
my_world = World(stage_units_in_meters=1.0)
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
kaya_asset_path = assets_root_path + "/Isaac/Robots/Kaya/kaya.usd"
my_kaya = my_world.scene.add(
WheeledRobot(
prim_path="/World/Kaya",
name="my_kaya",
wheel_dof_names=["axle_0_joint", "axle_1_joint", "axle_2_joint"],
create_robot=True,
usd_path=kaya_asset_path,
position=np.array([0, 0.0, 0.02]),
orientation=np.array([1.0, 0.0, 0.0, 0.0]),
)
)
my_world.scene.add_default_ground_plane()
kaya_setup = HolonomicRobotUsdSetup(
robot_prim_path=my_kaya.prim_path, com_prim_path="/World/Kaya/base_link/control_offset"
)
(
wheel_radius,
wheel_positions,
wheel_orientations,
mecanum_angles,
wheel_axis,
up_axis,
) = kaya_setup.get_holonomic_controller_params()
my_controller = HolonomicController(
name="holonomic_controller",
wheel_radius=wheel_radius,
wheel_positions=wheel_positions,
wheel_orientations=wheel_orientations,
mecanum_angles=mecanum_angles,
wheel_axis=wheel_axis,
up_axis=up_axis,
)
my_world.reset()
i = 0
while simulation_app.is_running():
my_world.step(render=True)
if my_world.is_playing():
if my_world.current_time_step_index == 0:
my_world.reset()
my_controller.reset()
if i >= 0 and i < 500:
my_kaya.apply_wheel_actions(my_controller.forward(command=[0.4, 0.0, 0.0]))
elif i >= 500 and i < 1000:
my_kaya.apply_wheel_actions(my_controller.forward(command=[0.0, 0.4, 0.0]))
elif i >= 1000 and i < 1200:
my_kaya.apply_wheel_actions(my_controller.forward(command=[0.0, 0.0, 0.05]))
elif i == 1200:
i = 0
i += 1
simulation_app.close()
| 2,828 | Python | 33.084337 | 93 | 0.694484 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.franka/multiple_tasks.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import numpy as np
from omni.isaac.core import World
from omni.isaac.franka.controllers.pick_place_controller import PickPlaceController
from omni.isaac.franka.tasks import PickPlace
my_world = World(stage_units_in_meters=1.0)
tasks = []
num_of_tasks = 2
for i in range(num_of_tasks):
tasks.append(PickPlace(name="task" + str(i), offset=np.array([0, (i * 2) - 3, 0])))
my_world.add_task(tasks[-1])
my_world.reset()
frankas = []
cube_names = []
for i in range(num_of_tasks):
task_params = tasks[i].get_params()
frankas.append(my_world.scene.get_object(task_params["robot_name"]["value"]))
cube_names.append(task_params["cube_name"]["value"])
controllers = []
for i in range(num_of_tasks):
controllers.append(
PickPlaceController(name="pick_place_controller", gripper=frankas[i].gripper, robot_articulation=frankas[i])
)
controllers[-1].reset()
articulation_controllers = []
for i in range(num_of_tasks):
articulation_controllers.append(frankas[i].get_articulation_controller())
my_world.pause()
while simulation_app.is_running():
my_world.step(render=True)
if my_world.is_playing():
if my_world.current_time_step_index == 0:
my_world.reset()
for i in range(num_of_tasks):
controllers[i].reset()
observations = my_world.get_observations()
for i in range(num_of_tasks):
articulation_controllers.append(frankas[i].get_articulation_controller())
actions = controllers[i].forward(
picking_position=observations[cube_names[i]]["position"],
placing_position=observations[cube_names[i]]["target_position"],
current_joint_positions=observations[frankas[i].name]["joint_positions"],
end_effector_offset=np.array([0, 0, 0]),
)
articulation_controllers[i].apply_action(actions)
simulation_app.close()
| 2,447 | Python | 37.857142 | 116 | 0.691868 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.franka/franka_gripper.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import argparse
from omni.isaac.core import World
from omni.isaac.core.utils.types import ArticulationAction
from omni.isaac.franka import Franka
parser = argparse.ArgumentParser()
parser.add_argument("--test", default=False, action="store_true", help="Run in test mode")
args, unknown = parser.parse_known_args()
my_world = World(stage_units_in_meters=1.0)
my_franka = my_world.scene.add(Franka(prim_path="/World/Franka", name="my_franka"))
my_world.scene.add_default_ground_plane()
my_world.reset()
i = 0
while simulation_app.is_running():
my_world.step(render=True)
if my_world.is_playing():
if my_world.current_time_step_index == 0:
my_world.reset()
i += 1
gripper_positions = my_franka.gripper.get_joint_positions()
if i < 500:
my_franka.gripper.apply_action(
ArticulationAction(joint_positions=[gripper_positions[0] - (0.005), gripper_positions[1] - (0.005)])
)
if i > 500:
my_franka.gripper.apply_action(
ArticulationAction(joint_positions=[gripper_positions[0] + (0.005), gripper_positions[1] + (0.005)])
)
if i == 1000:
i = 0
if args.test is True:
break
simulation_app.close()
| 1,791 | Python | 33.461538 | 116 | 0.686209 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.cortex/example_command_api_main.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import time
import numpy as np
import omni
from omni.isaac.core.objects import DynamicCuboid, VisualCuboid
from omni.isaac.cortex.cortex_world import CortexWorld
from omni.isaac.cortex.df import DfNetwork, DfState, DfStateMachineDecider, DfStateSequence
from omni.isaac.cortex.dfb import DfBasicContext
from omni.isaac.cortex.robot import add_franka_to_stage
class NullspaceShiftState(DfState):
def __init__(self):
super().__init__()
self.config_mean = np.array([0.00, -1.3, 0.00, -2.87, 0.00, 2.00, 0.75])
self.target_p = np.array([0.7, 0.0, 0.5])
self.construction_time = time.time()
def enter(self):
# Change the posture configuration while maintaining a consistent target.
posture_config = self.config_mean + np.random.randn(7)
self.context.robot.arm.send_end_effector(target_position=self.target_p, posture_config=posture_config)
self.entry_time = time.time()
# Close the gripper if open and open the gripper if closed. It closes more quickly than it
# opens.
gripper = self.context.robot.gripper
if gripper.get_width() > 0.05:
gripper.close(speed=0.5)
else:
gripper.open(speed=0.1)
print("[%f] <enter> sampling posture config" % (self.entry_time - self.construction_time))
def step(self):
if time.time() - self.entry_time < 2.0:
return self
return None
def main():
world = CortexWorld()
robot = world.add_robot(add_franka_to_stage(name="franka", prim_path="/World/franka"))
world.scene.add_default_ground_plane()
decider_network = DfNetwork(
DfStateMachineDecider(DfStateSequence([NullspaceShiftState()], loop=True)), context=DfBasicContext(robot)
)
world.add_decider_network(decider_network)
world.run(simulation_app)
simulation_app.close()
if __name__ == "__main__":
main()
| 2,443 | Python | 33.422535 | 113 | 0.695456 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.cortex/example_cortex_sync_belief_main.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import argparse
from omni.isaac.kit import SimulationApp
parser = argparse.ArgumentParser("example_cortex_sync_belief")
parser.add_argument(
"--behavior",
type=str,
default=None,
help="Which behavior to run. See behavior/franka for available behavior files. By default, it launches no behavior.",
)
parser.add_argument(
"--auto_sync_objects", action="store_true", help="Automatically sync the objects with their measured poses."
)
args, _ = parser.parse_known_args()
simulation_app = SimulationApp({"headless": False})
import numpy as np
from behaviors.franka.franka_behaviors import ContextStateMonitor, behaviors
from omni.isaac.core.objects import DynamicCuboid
from omni.isaac.core.utils.extensions import enable_extension
from omni.isaac.cortex.cortex_object import CortexObject
from omni.isaac.cortex.cortex_utils import load_behavior_module
from omni.isaac.cortex.cortex_world import CortexWorld
from omni.isaac.cortex.robot import add_franka_to_stage
enable_extension("omni.isaac.cortex_sync")
from omni.isaac.cortex_sync.cortex_ros import CortexControlRos, CortexObjectsRos, cortex_init_ros_node
class CubeSpec:
def __init__(self, name, color):
self.name = name
self.color = np.array(color)
def main():
cortex_init_ros_node("example_cortex_sync_belief")
world = CortexWorld()
robot = world.add_robot(add_franka_to_stage(name="franka", prim_path="/World/Franka"))
obs_specs = [
CubeSpec("RedCube", [0.7, 0.0, 0.0]),
CubeSpec("BlueCube", [0.0, 0.0, 0.7]),
CubeSpec("YellowCube", [0.7, 0.7, 0.0]),
CubeSpec("GreenCube", [0.0, 0.7, 0.0]),
]
width = 0.0515
cortex_objects = {}
for i, (x, spec) in enumerate(zip(np.linspace(0.3, 0.7, len(obs_specs)), obs_specs)):
obj = world.scene.add(
DynamicCuboid(
prim_path="/World/Obs/{}".format(spec.name),
name=spec.name,
size=width,
color=spec.color,
translation=np.array([x, -0.4, width / 2]),
)
)
cortex_objects[spec.name] = CortexObject(obj)
robot.register_obstacle(cortex_objects[spec.name])
world.scene.add_default_ground_plane()
cortex_control = CortexControlRos(robot)
cortex_objects_ros = CortexObjectsRos(cortex_objects, auto_sync_objects=args.auto_sync_objects)
decider_network = None
context_monitor = ContextStateMonitor(print_dt=0.25)
if args.behavior in behaviors:
decider_network = behaviors[args.behavior].make_decider_network(robot)
elif args.behavior is not None:
decider_network = load_behavior_module(args.behavior).make_decider_network(robot)
if decider_network:
decider_network.context.add_monitor(context_monitor.monitor)
world.run(simulation_app)
simulation_app.close()
if __name__ == "__main__":
main()
| 3,330 | Python | 34.43617 | 121 | 0.694895 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.cortex/follow_example_main.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import numpy as np
from omni.isaac.core.objects import VisualSphere
from omni.isaac.cortex.cortex_world import CortexWorld
from omni.isaac.cortex.df import DfNetwork, DfState, DfStateMachineDecider
from omni.isaac.cortex.dfb import DfBasicContext
from omni.isaac.cortex.robot import add_franka_to_stage
class FollowState(DfState):
"""The context object is available as self.context. We have access to everything in the context
object, which in this case is everything in the robot object (the command API and the follow
sphere).
"""
@property
def robot(self):
return self.context.robot
@property
def follow_sphere(self):
return self.context.robot.follow_sphere
def enter(self):
self.robot.gripper.close()
self.follow_sphere.set_world_pose(*self.robot.arm.get_fk_pq().as_tuple())
def step(self):
target_position, _ = self.follow_sphere.get_world_pose()
self.robot.arm.send_end_effector(target_position=target_position)
return self # Always transition back to this state.
def main():
world = CortexWorld()
robot = world.add_robot(add_franka_to_stage(name="franka", prim_path="/World/Franka"))
# Add a sphere to the scene to follow, and store it off in a new member as part of the robot.
robot.follow_sphere = world.scene.add(
VisualSphere(
name="follow_sphere", prim_path="/World/FollowSphere", radius=0.02, color=np.array([0.7, 0.0, 0.7])
)
)
world.scene.add_default_ground_plane()
# Add a simple state machine decider network with the single state defined above. This state
# will be persistently stepped because it always returns itself.
world.add_decider_network(DfNetwork(DfStateMachineDecider(FollowState()), context=DfBasicContext(robot)))
world.run(simulation_app)
simulation_app.close()
if __name__ == "__main__":
main()
| 2,441 | Python | 34.911764 | 111 | 0.722655 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.cortex/example_cortex_sync_main.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import argparse
from omni.isaac.kit import SimulationApp
parser = argparse.ArgumentParser("example_cortex_sync")
parser.add_argument(
"--behavior",
type=str,
default="block_stacking_behavior",
help="Which behavior to run. See behavior/franka for available behavior files.",
)
parser.add_argument(
"--auto_sync_objects", action="store_true", help="Automatically sync the objects with their measured poses."
)
args, _ = parser.parse_known_args()
simulation_app = SimulationApp({"headless": False})
import numpy as np
from behaviors.franka.franka_behaviors import ContextStateMonitor, behaviors
from omni.isaac.core.objects import DynamicCuboid
from omni.isaac.core.prims.xform_prim import XFormPrim
from omni.isaac.core.utils.extensions import enable_extension
from omni.isaac.cortex.cortex_object import CortexObject
from omni.isaac.cortex.cortex_utils import load_behavior_module
from omni.isaac.cortex.cortex_world import CortexWorld
from omni.isaac.cortex.robot import add_franka_to_stage
enable_extension("omni.isaac.cortex_sync")
from omni.isaac.cortex_sync.cortex_ros import (
CortexControlRos,
CortexObjectsRos,
CortexSimObjectsRos,
CortexSimRobotRos,
cortex_init_ros_node,
)
class CubeSpec:
def __init__(self, name, color):
self.name = name
self.color = np.array(color)
def main():
cortex_init_ros_node("example_cortex_sync")
world = CortexWorld()
robot = world.add_robot(add_franka_to_stage(name="franka", prim_path="/World/Franka"))
sim_prim = XFormPrim(prim_path="/Sim")
sim_prim.set_world_pose(position=np.array([-2.0, 0.0, 0.0]))
sim_robot = world.add_robot(
add_franka_to_stage(name="franka_sim", prim_path="/Sim/Franka", use_motion_commander=False)
)
obs_specs = [
CubeSpec("RedCube", [0.7, 0.0, 0.0]),
CubeSpec("BlueCube", [0.0, 0.0, 0.7]),
CubeSpec("YellowCube", [0.7, 0.7, 0.0]),
CubeSpec("GreenCube", [0.0, 0.7, 0.0]),
]
width = 0.0515
cortex_objects = {}
sim_objects = {}
for i, (x, spec) in enumerate(zip(np.linspace(0.3, 0.7, len(obs_specs)), obs_specs)):
obj = world.scene.add(
DynamicCuboid(
prim_path="/World/Obs/{}".format(spec.name),
name=spec.name,
size=width,
color=spec.color,
translation=np.array([x, -0.4, width / 2]),
)
)
cortex_objects[spec.name] = CortexObject(obj)
robot.register_obstacle(cortex_objects[spec.name])
sim_obj = world.scene.add(
DynamicCuboid(
prim_path="/Sim/Obs/{}".format(spec.name),
name="{}_sim".format(spec.name),
size=width,
color=spec.color,
translation=np.array([x, -0.4, width / 2]),
)
)
sim_objects[spec.name] = sim_obj
world.scene.add_default_ground_plane()
cortex_sim = CortexSimRobotRos(sim_robot)
cortex_sim_objects_ros = CortexSimObjectsRos(sim_objects)
cortex_control = CortexControlRos(robot)
cortex_objects_ros = CortexObjectsRos(cortex_objects, auto_sync_objects=args.auto_sync_objects)
decider_network = None
context_monitor = ContextStateMonitor(print_dt=0.25)
if args.behavior in behaviors:
decider_network = behaviors[args.behavior].make_decider_network(robot)
elif args.behavior is not None:
decider_network = load_behavior_module(args.behavior).make_decider_network(robot)
if decider_network:
decider_network.context.add_monitor(context_monitor.monitor)
world.add_decider_network(decider_network)
world.run(simulation_app)
simulation_app.close()
if __name__ == "__main__":
main()
| 4,202 | Python | 33.450819 | 112 | 0.669205 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.cortex/demo_ur10_conveyor_main.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import random
import numpy as np
import omni.isaac.cortex.math_util as math_util
import omni.isaac.cortex.sample_behaviors.ur10.bin_stacking_behavior as behavior
from omni.isaac.core.objects import VisualCapsule, VisualSphere
from omni.isaac.core.prims.xform_prim import XFormPrim
from omni.isaac.core.tasks import BaseTask
from omni.isaac.core.utils.stage import add_reference_to_stage
from omni.isaac.cortex.cortex_rigid_prim import CortexRigidPrim
from omni.isaac.cortex.cortex_utils import get_assets_root_path_or_die
from omni.isaac.cortex.cortex_world import CortexWorld
from omni.isaac.cortex.robot import CortexUr10
class Ur10Assets:
def __init__(self):
self.assets_root_path = get_assets_root_path_or_die()
self.ur10_table_usd = (
self.assets_root_path + "/Isaac/Samples/Leonardo/Stage/ur10_bin_stacking_short_suction.usd"
)
self.small_klt_usd = self.assets_root_path + "/Isaac/Props/KLT_Bin/small_KLT.usd"
self.background_usd = self.assets_root_path + "/Isaac/Environments/Simple_Warehouse/warehouse.usd"
self.rubiks_cube_usd = self.assets_root_path + "/Isaac/Props/Rubiks_Cube/rubiks_cube.usd"
def print_diagnostics(diagnostic):
print("=========== logical state ==========")
if diagnostic.bin_name:
print("active bin info:")
print("- bin_obj.name: {}".format(diagnostic.bin_name))
print("- bin_base: {}".format(diagnostic.bin_base))
print("- grasp_T:\n{}".format(diagnostic.grasp))
print("- is_grasp_reached: {}".format(diagnostic.grasp_reached))
print("- is_attached: {}".format(diagnostic.attached))
print("- needs_flip: {}".format(diagnostic.needs_flip))
else:
print("<no active bin>")
print("------------------------------------")
def random_bin_spawn_transform():
x = random.uniform(-0.15, 0.15)
y = 1.5
z = -0.15
position = np.array([x, y, z])
z = random.random() * 0.02 - 0.01
w = random.random() * 0.02 - 0.01
norm = np.sqrt(z**2 + w**2)
quat = math_util.Quaternion([w / norm, 0, 0, z / norm])
if random.random() > 0.5:
print("<flip>")
# flip the bin so it's upside down
quat = quat * math_util.Quaternion([0, 0, 1, 0])
else:
print("<no flip>")
return position, quat.vals
class BinStackingTask(BaseTask):
def __init__(self, env_path, assets):
super().__init__("bin_stacking")
self.assets = assets
self.env_path = "/World/Ur10Table"
self.bins = []
self.stashed_bins = []
self.on_conveyor = None
def _spawn_bin(self, rigid_bin):
x, q = random_bin_spawn_transform()
rigid_bin.set_world_pose(position=x, orientation=q)
rigid_bin.set_linear_velocity(np.array([0, -0.30, 0]))
rigid_bin.set_visibility(True)
def post_reset(self) -> None:
if len(self.bins) > 0:
for rigid_bin in self.bins:
self.scene.remove_object(rigid_bin.name)
self.bins.clear()
self.on_conveyor = None
def pre_step(self, time_step_index, simulation_time) -> None:
"""Spawn a new randomly oriented bin if the previous bin has been placed."""
spawn_new = False
if self.on_conveyor is None:
spawn_new = True
else:
(x, y, z), _ = self.on_conveyor.get_world_pose()
is_on_conveyor = y > 0.0 and -0.4 < x and x < 0.4
if not is_on_conveyor:
spawn_new = True
if spawn_new:
name = "bin_{}".format(len(self.bins))
prim_path = self.env_path + "/bins/{}".format(name)
add_reference_to_stage(usd_path=self.assets.small_klt_usd, prim_path=prim_path)
self.on_conveyor = self.scene.add(CortexRigidPrim(name=name, prim_path=prim_path))
self._spawn_bin(self.on_conveyor)
self.bins.append(self.on_conveyor)
def main():
world = CortexWorld()
env_path = "/World/Ur10Table"
ur10_assets = Ur10Assets()
add_reference_to_stage(usd_path=ur10_assets.ur10_table_usd, prim_path=env_path)
add_reference_to_stage(usd_path=ur10_assets.background_usd, prim_path="/World/Background")
background_prim = XFormPrim(
"/World/Background", position=[10.00, 2.00, -1.18180], orientation=[0.7071, 0, 0, 0.7071]
)
robot = world.add_robot(CortexUr10(name="robot", prim_path="{}/ur10".format(env_path)))
obs = world.scene.add(
VisualSphere(
"/World/Ur10Table/Obstacles/FlipStationSphere",
name="flip_station_sphere",
position=np.array([0.73, 0.76, -0.13]),
radius=0.2,
visible=False,
)
)
robot.register_obstacle(obs)
obs = world.scene.add(
VisualSphere(
"/World/Ur10Table/Obstacles/NavigationDome",
name="navigation_dome_obs",
position=[-0.031, -0.018, -1.086],
radius=1.1,
visible=False,
)
)
robot.register_obstacle(obs)
az = np.array([1.0, 0.0, -0.3])
ax = np.array([0.0, 1.0, 0.0])
ay = np.cross(az, ax)
R = math_util.pack_R(ax, ay, az)
quat = math_util.matrix_to_quat(R)
obs = world.scene.add(
VisualCapsule(
"/World/Ur10Table/Obstacles/NavigationBarrier",
name="navigation_barrier_obs",
position=[0.471, 0.276, -0.463 - 0.1],
orientation=quat,
radius=0.5,
height=0.9,
visible=False,
)
)
robot.register_obstacle(obs)
obs = world.scene.add(
VisualCapsule(
"/World/Ur10Table/Obstacles/NavigationFlipStation",
name="navigation_flip_station_obs",
position=np.array([0.766, 0.755, -0.5]),
radius=0.5,
height=0.5,
visible=False,
)
)
robot.register_obstacle(obs)
world.add_task(BinStackingTask(env_path, ur10_assets))
world.add_decider_network(behavior.make_decider_network(robot, print_diagnostics))
world.run(simulation_app)
simulation_app.close()
if __name__ == "__main__":
main()
| 6,687 | Python | 33.474227 | 106 | 0.612233 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.cortex/franka_examples_main.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import argparse
from omni.isaac.kit import SimulationApp
parser = argparse.ArgumentParser("franka_examples")
parser.add_argument(
"--behavior",
type=str,
default="block_stacking_behavior",
help="Which behavior to run. See behavior/franka for available behavior files.",
)
args, _ = parser.parse_known_args()
simulation_app = SimulationApp({"headless": False})
import numpy as np
from behaviors.franka.franka_behaviors import ContextStateMonitor, behaviors
from omni.isaac.core.objects import DynamicCuboid, VisualCuboid
from omni.isaac.cortex.cortex_utils import load_behavior_module
from omni.isaac.cortex.cortex_world import Behavior, CortexWorld, LogicalStateMonitor
from omni.isaac.cortex.robot import add_franka_to_stage
from omni.isaac.cortex.tools import SteadyRate
class CubeSpec:
def __init__(self, name, color):
self.name = name
self.color = np.array(color)
def main():
world = CortexWorld()
context_monitor = ContextStateMonitor(print_dt=0.25)
robot = world.add_robot(add_franka_to_stage(name="franka", prim_path="/World/Franka"))
obs_specs = [
CubeSpec("RedCube", [0.7, 0.0, 0.0]),
CubeSpec("BlueCube", [0.0, 0.0, 0.7]),
CubeSpec("YellowCube", [0.7, 0.7, 0.0]),
CubeSpec("GreenCube", [0.0, 0.7, 0.0]),
]
width = 0.0515
for i, (x, spec) in enumerate(zip(np.linspace(0.3, 0.7, len(obs_specs)), obs_specs)):
obj = world.scene.add(
DynamicCuboid(
prim_path="/World/Obs/{}".format(spec.name),
name=spec.name,
size=width,
color=spec.color,
position=np.array([x, -0.4, width / 2]),
)
)
robot.register_obstacle(obj)
world.scene.add_default_ground_plane()
print()
print("loading behavior: {}".format(args.behavior))
print()
if args.behavior in behaviors:
decider_network = behaviors[args.behavior].make_decider_network(robot)
else:
decider_network = load_behavior_module(args.behavior).make_decider_network(robot)
decider_network.context.add_monitor(context_monitor.monitor)
world.add_decider_network(decider_network)
world.run(simulation_app)
simulation_app.close()
if __name__ == "__main__":
main()
| 2,733 | Python | 32.753086 | 90 | 0.678741 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.cortex/example_cortex_sync_sim_main.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import argparse
from omni.isaac.kit import SimulationApp
parser = argparse.ArgumentParser("example_cortex_sync_sim")
args, _ = parser.parse_known_args()
simulation_app = SimulationApp({"headless": False})
import numpy as np
from omni.isaac.core.objects import DynamicCuboid
from omni.isaac.core.utils.extensions import enable_extension
from omni.isaac.cortex.cortex_utils import load_behavior_module
from omni.isaac.cortex.cortex_world import CortexWorld
from omni.isaac.cortex.robot import add_franka_to_stage
enable_extension("omni.isaac.cortex_sync")
from omni.isaac.cortex_sync.cortex_ros import CortexSimObjectsRos, CortexSimRobotRos, cortex_init_ros_node
class CubeSpec:
def __init__(self, name, color):
self.name = name
self.color = np.array(color)
def main():
cortex_init_ros_node("example_cortex_sync_sim")
world = CortexWorld()
sim_robot = world.add_robot(
add_franka_to_stage(name="franka_sim", prim_path="/Sim/Franka", use_motion_commander=False)
)
obs_specs = [
CubeSpec("RedCube", [0.7, 0.0, 0.0]),
CubeSpec("BlueCube", [0.0, 0.0, 0.7]),
CubeSpec("YellowCube", [0.7, 0.7, 0.0]),
CubeSpec("GreenCube", [0.0, 0.7, 0.0]),
]
width = 0.0515
sim_objects = {}
for i, (x, spec) in enumerate(zip(np.linspace(0.3, 0.7, len(obs_specs)), obs_specs)):
sim_obj = world.scene.add(
DynamicCuboid(
prim_path="/Sim/Obs/{}".format(spec.name),
name="{}_sim".format(spec.name),
size=width,
color=spec.color,
translation=np.array([x, -0.4, width / 2]),
)
)
sim_objects[spec.name] = sim_obj
world.scene.add_default_ground_plane()
cortex_sim = CortexSimRobotRos(sim_robot)
cortex_sim_objects_ros = CortexSimObjectsRos(sim_objects)
world.run(simulation_app)
simulation_app.close()
if __name__ == "__main__":
main()
| 2,400 | Python | 31.013333 | 106 | 0.668333 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.cortex/follow_example_modified_main.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import numpy as np
from omni.isaac.core.objects import VisualSphere
from omni.isaac.cortex.cortex_world import CortexWorld
from omni.isaac.cortex.df import DfNetwork, DfState, DfStateMachineDecider
from omni.isaac.cortex.dfb import DfRobotApiContext
from omni.isaac.cortex.robot import add_franka_to_stage
class FollowState(DfState):
"""The context object is available as self.context. We have access to everything in the context
object, which in this case is everything in the robot object (the command API and the follow
sphere).
"""
@property
def robot(self):
return self.context.robot
@property
def follow_sphere(self):
return self.context.robot.follow_sphere
def enter(self):
self.follow_sphere.set_world_pose(*self.robot.arm.get_fk_pq().as_tuple())
def step(self):
target_position, _ = self.follow_sphere.get_world_pose()
self.robot.arm.send_end_effector(target_position=target_position)
return self # Always transition back to this state.
class FollowContext(DfRobotApiContext):
def __init__(self, robot):
super().__init__(robot)
self.reset()
self.add_monitors(
[FollowContext.monitor_end_effector, FollowContext.monitor_gripper, FollowContext.monitor_diagnostics]
)
def reset(self):
self.is_target_reached = False
def monitor_end_effector(self):
eff_p = self.robot.arm.get_fk_p()
target_p, _ = self.robot.follow_sphere.get_world_pose()
self.is_target_reached = np.linalg.norm(target_p - eff_p) < 0.01
def monitor_gripper(self):
if self.is_target_reached:
self.robot.gripper.close()
else:
self.robot.gripper.open()
def monitor_diagnostics(self):
print("is_target_reached: {}".format(self.is_target_reached))
def main():
world = CortexWorld()
robot = world.add_robot(add_franka_to_stage(name="franka", prim_path="/World/Franka"))
# Add a sphere to the scene to follow, and store it off in a new member as part of the robot.
robot.follow_sphere = world.scene.add(
VisualSphere(
name="follow_sphere", prim_path="/World/FollowSphere", radius=0.02, color=np.array([0.7, 0.0, 0.7])
)
)
world.scene.add_default_ground_plane()
# Add a simple state machine decider network with the single state defined above. This state
# will be persistently stepped because it always returns itself.
world.add_decider_network(DfNetwork(DfStateMachineDecider(FollowState()), context=FollowContext(robot)))
world.run(simulation_app)
simulation_app.close()
if __name__ == "__main__":
main()
| 3,228 | Python | 33.351063 | 114 | 0.696406 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.cortex/behaviors/franka/franka_behaviors.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.cortex.dfb import DfDiagnosticsMonitor
from omni.isaac.cortex.sample_behaviors.franka import (
block_stacking_behavior,
peck_decider_network,
peck_game,
peck_state_machine,
)
from omni.isaac.cortex.sample_behaviors.franka.simple import simple_decider_network, simple_state_machine
behaviors = {
"block_stacking_behavior": block_stacking_behavior,
"peck_decider_network": peck_decider_network,
"peck_game": peck_game,
"peck_state_machine": peck_state_machine,
"simple_decider_network": simple_decider_network,
"simple_state_machine": simple_state_machine,
}
class ContextStateMonitor(DfDiagnosticsMonitor):
"""
State monitor to read the context and pass it to the UI.
For these behaviors, the context has a `diagnostic_message` that contains the text to be displayed, and each
behavior implements its own monitor to update that.
"""
def __init__(self, print_dt, diagnostic_fn=None):
super().__init__(print_dt=print_dt)
def print_diagnostics(self, context):
if hasattr(context, "diagnostics_message"):
print("====================================")
print(context.diagnostics_message)
| 1,637 | Python | 37.093022 | 112 | 0.719609 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.jetbot/jetbot_move.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import argparse
from omni.isaac.kit import SimulationApp
parser = argparse.ArgumentParser()
parser.add_argument("--test", default=False, action="store_true", help="Run in test mode")
args, unknown = parser.parse_known_args()
simulation_app = SimulationApp({"headless": False})
import carb
import numpy as np
from omni.isaac.core import World
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.wheeled_robots.controllers.differential_controller import DifferentialController
from omni.isaac.wheeled_robots.robots import WheeledRobot
my_world = World(stage_units_in_meters=1.0)
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
jetbot_asset_path = assets_root_path + "/Isaac/Robots/Jetbot/jetbot.usd"
my_jetbot = my_world.scene.add(
WheeledRobot(
prim_path="/World/Jetbot",
name="my_jetbot",
wheel_dof_names=["left_wheel_joint", "right_wheel_joint"],
create_robot=True,
usd_path=jetbot_asset_path,
position=np.array([0, 0.0, 2.0]),
)
)
my_world.scene.add_default_ground_plane()
my_controller = DifferentialController(name="simple_control", wheel_radius=0.03, wheel_base=0.1125)
my_world.reset()
i = 0
while simulation_app.is_running():
my_world.step(render=True)
if my_world.is_playing():
if my_world.current_time_step_index == 0:
my_world.reset()
my_controller.reset()
if i >= 0 and i < 1000:
# forward
my_jetbot.apply_wheel_actions(my_controller.forward(command=[0.05, 0]))
print(my_jetbot.get_linear_velocity())
elif i >= 1000 and i < 1300:
# rotate
my_jetbot.apply_wheel_actions(my_controller.forward(command=[0.0, np.pi / 12]))
print(my_jetbot.get_angular_velocity())
elif i >= 1300 and i < 2000:
# forward
my_jetbot.apply_wheel_actions(my_controller.forward(command=[0.05, 0]))
elif i == 2000:
i = 0
i += 1
if args.test is True:
break
simulation_app.close()
| 2,568 | Python | 34.680555 | 99 | 0.682243 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.jetbot/stable_baselines_example/eval.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import carb
from env import JetBotEnv
try:
from stable_baselines3 import PPO
except Exception as e:
carb.log_error(e)
carb.log_error(
"please install stable-baselines3 in the current python environment or run the following to install into the builtin python environment ./python.sh -m pip install stable-baselines3 "
)
exit()
policy_path = "./cnn_policy/jetbot_policy.zip"
my_env = JetBotEnv(headless=False)
model = PPO.load(policy_path)
for _ in range(20):
obs, _ = my_env.reset()
done = False
while not done:
actions, _ = model.predict(observation=obs, deterministic=True)
obs, reward, done, truncated, info = my_env.step(actions)
my_env.render()
my_env.close()
| 1,166 | Python | 30.54054 | 190 | 0.731561 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.jetbot/stable_baselines_example/env.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import math
import carb
import gymnasium
import numpy as np
from gymnasium import spaces
class JetBotEnv(gymnasium.Env):
metadata = {"render.modes": ["human"]}
def __init__(
self,
skip_frame=1,
physics_dt=1.0 / 60.0,
rendering_dt=1.0 / 60.0,
max_episode_length=256,
seed=0,
headless=True,
) -> None:
from omni.isaac.kit import SimulationApp
self.headless = headless
self._simulation_app = SimulationApp({"headless": self.headless, "anti_aliasing": 0})
self._skip_frame = skip_frame
self._dt = physics_dt * self._skip_frame
self._max_episode_length = max_episode_length
self._steps_after_reset = int(rendering_dt / physics_dt)
from omni.isaac.core import World
from omni.isaac.core.objects import VisualCuboid
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.wheeled_robots.controllers.differential_controller import DifferentialController
from omni.isaac.wheeled_robots.robots import WheeledRobot
self._my_world = World(physics_dt=physics_dt, rendering_dt=rendering_dt, stage_units_in_meters=1.0)
self._my_world.scene.add_default_ground_plane()
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
return
jetbot_asset_path = assets_root_path + "/Isaac/Robots/Jetbot/jetbot.usd"
self.jetbot = self._my_world.scene.add(
WheeledRobot(
prim_path="/jetbot",
name="my_jetbot",
wheel_dof_names=["left_wheel_joint", "right_wheel_joint"],
create_robot=True,
usd_path=jetbot_asset_path,
position=np.array([0, 0.0, 0.03]),
orientation=np.array([1.0, 0.0, 0.0, 0.0]),
)
)
self.jetbot_controller = DifferentialController(name="simple_control", wheel_radius=0.0325, wheel_base=0.1125)
self.goal = self._my_world.scene.add(
VisualCuboid(
prim_path="/new_cube_1",
name="visual_cube",
position=np.array([0.60, 0.30, 0.05]),
size=0.1,
color=np.array([1.0, 0, 0]),
)
)
self.seed(seed)
self.reward_range = (-float("inf"), float("inf"))
gymnasium.Env.__init__(self)
self.action_space = spaces.Box(low=-1, high=1.0, shape=(2,), dtype=np.float32)
self.observation_space = spaces.Box(low=float("inf"), high=float("inf"), shape=(16,), dtype=np.float32)
self.max_velocity = 1
self.max_angular_velocity = math.pi
self.reset_counter = 0
return
def get_dt(self):
return self._dt
def step(self, action):
previous_jetbot_position, _ = self.jetbot.get_world_pose()
# action forward velocity , angular velocity on [-1, 1]
raw_forward = action[0]
raw_angular = action[1]
# we want to force the jetbot to always drive forward
# so we transform to [0,1]. we also scale by our max velocity
forward = (raw_forward + 1.0) / 2.0
forward_velocity = forward * self.max_velocity
# we scale the angular, but leave it on [-1,1] so the
# jetbot can remain an ambiturner.
angular_velocity = raw_angular * self.max_angular_velocity
# we apply our actions to the jetbot
for i in range(self._skip_frame):
self.jetbot.apply_wheel_actions(
self.jetbot_controller.forward(command=[forward_velocity, angular_velocity])
)
self._my_world.step(render=False)
observations = self.get_observations()
info = {}
done = False
truncated = False
if self._my_world.current_time_step_index - self._steps_after_reset >= self._max_episode_length:
done = True
truncated = True
goal_world_position, _ = self.goal.get_world_pose()
current_jetbot_position, _ = self.jetbot.get_world_pose()
previous_dist_to_goal = np.linalg.norm(goal_world_position - previous_jetbot_position)
current_dist_to_goal = np.linalg.norm(goal_world_position - current_jetbot_position)
reward = previous_dist_to_goal - current_dist_to_goal
if current_dist_to_goal < 0.1:
done = True
return observations, reward, done, truncated, info
def reset(self, seed=None):
self._my_world.reset()
self.reset_counter = 0
# randomize goal location in circle around robot
alpha = 2 * math.pi * np.random.rand()
r = 1.00 * math.sqrt(np.random.rand()) + 0.20
self.goal.set_world_pose(np.array([math.sin(alpha) * r, math.cos(alpha) * r, 0.05]))
observations = self.get_observations()
return observations, {}
def get_observations(self):
self._my_world.render()
jetbot_world_position, jetbot_world_orientation = self.jetbot.get_world_pose()
jetbot_linear_velocity = self.jetbot.get_linear_velocity()
jetbot_angular_velocity = self.jetbot.get_angular_velocity()
goal_world_position, _ = self.goal.get_world_pose()
obs = np.concatenate(
[
jetbot_world_position,
jetbot_world_orientation,
jetbot_linear_velocity,
jetbot_angular_velocity,
goal_world_position,
]
)
return obs
def render(self, mode="human"):
return
def close(self):
self._simulation_app.close()
return
def seed(self, seed=None):
self.np_random, seed = gymnasium.utils.seeding.np_random(seed)
np.random.seed(seed)
return [seed]
| 6,310 | Python | 38.198758 | 118 | 0.606339 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.jetbot/stable_baselines_example/train.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import argparse
import carb
import torch as th
from env import JetBotEnv
parser = argparse.ArgumentParser()
parser.add_argument("--test", default=False, action="store_true", help="Run in test mode")
args, unknown = parser.parse_known_args()
log_dir = "./cnn_policy"
# set headles to false to visualize training
my_env = JetBotEnv(headless=True)
# in test mode we manually install sb3
if args.test is True:
import omni.kit.pipapi
omni.kit.pipapi.install("stable-baselines3==2.0.0", module="stable_baselines3")
omni.kit.pipapi.install("tensorboard")
# import stable baselines
try:
from stable_baselines3 import PPO
from stable_baselines3.common.callbacks import CheckpointCallback
from stable_baselines3.ppo import MlpPolicy
except Exception as e:
carb.log_error(e)
carb.log_error(
"please install stable-baselines3 in the current python environment or run the following to install into the builtin python environment ./python.sh -m pip install stable-baselines3"
)
exit()
try:
import tensorboard
except Exception as e:
carb.log_error(e)
carb.log_error(
"please install tensorboard in the current python environment or run the following to install into the builtin python environment ./python.sh -m pip install tensorboard"
)
exit()
policy_kwargs = dict(activation_fn=th.nn.ReLU, net_arch=[dict(vf=[128, 128, 128], pi=[128, 128, 128])])
policy = MlpPolicy
total_timesteps = 500000
if args.test is True:
total_timesteps = 10000
checkpoint_callback = CheckpointCallback(save_freq=10000, save_path=log_dir, name_prefix="jetbot_policy_checkpoint")
model = PPO(
policy,
my_env,
policy_kwargs=policy_kwargs,
verbose=1,
n_steps=2560,
batch_size=64,
learning_rate=0.000125,
gamma=0.9,
ent_coef=7.5e-08,
clip_range=0.3,
n_epochs=5,
gae_lambda=1.0,
max_grad_norm=0.9,
vf_coef=0.95,
device="cuda:0",
tensorboard_log=log_dir,
)
model.learn(total_timesteps=total_timesteps, callback=[checkpoint_callback])
model.save(log_dir + "/jetbot_policy")
my_env.close()
| 2,533 | Python | 29.53012 | 189 | 0.730754 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.ocs2/franka_arm_ocs2.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
"""Launch Omniverse Toolkit first."""
# kit
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
"""Rest everything follows."""
# python
import os
import numpy as np
from omni.isaac.core.objects.sphere import VisualSphere
from omni.isaac.core.utils.extensions import enable_extension
from omni.isaac.core.utils.types import ArticulationAction
from omni.isaac.core.utils.viewports import set_camera_view
# isaac-core
from omni.isaac.core.world import World
# isaac-franka
from omni.isaac.franka import Franka
# isaac-ocs2
enable_extension("omni.isaac.ocs2")
from omni.isaac.ocs2.end_effector_pose_tracking_mpc import EndEffectorPoseTrackingMpc
# print settings
np.set_printoptions(formatter={"float_kind": "{:.2f}".format})
def main():
"""Sets the Franka control mode to "velocity" and tests the MPC."""
# Add MPC
config = {
"urdf_path": "data/franka/urdf/panda.urdf",
"lib_folder": "/tmp/ocs2/auto_generated/franka",
"mpc_config_path": "data/franka/mpc/task.info",
}
mpc_interface = EndEffectorPoseTrackingMpc(config["mpc_config_path"], config["lib_folder"], config["urdf_path"])
# Receive the number of arm dimensions
arm_num_dof = mpc_interface.state_dim
# print info about MPC
print(mpc_interface)
# Load kit helper
my_world = World(stage_units_in_meters=1.0, physics_dt=0.01)
# Set main camera
set_camera_view([1.5, 1.5, 1.5], [0.0, 0.0, 0.0])
# Spawn things into stage
# -- ground
my_world.scene.add_default_ground_plane()
# -- robot
robot = my_world.scene.add(Franka("/World/Robot"))
# -- markers
goal_vis_prim = my_world.scene.add(
VisualSphere("/World/Vis/ee_goal", name="ee_goal", radius=0.01, color=np.asarray([1.0, 0.0, 0.0]))
)
ee_vis_prim = my_world.scene.add(
VisualSphere("/World/Vis/ee_curr", name="ee_curr", radius=0.01, color=np.asarray([0.0, 0.0, 1.0]))
)
# Play the simulator
my_world.reset()
# Set control mode
robot._articulation_view.switch_control_mode("velocity")
robot.disable_gravity()
# Now we are ready!
print("[INFO]: Setup complete...")
# Define simulation stepping
dt = 0.01
sim_time = 0.0
# Define goals for the arm
ee_goal_index = 0
ee_goals = [
[0.5, 0.5, 0.7, 0.707, 0, 0.707, 0],
[0.5, -0.4, 0.6, 0.707, 0.707, 0.0, 0.0],
[0.5, 0, 0.5, 0.0, 1.0, 0.0, 0.0],
]
# Define a goal for the arm
ee_goal_pose = np.array(ee_goals[ee_goal_index])
# Obtain measurements
arm_joint_pos = robot.get_joint_positions()[:arm_num_dof]
ee_curr_pose = robot.end_effector.get_world_pose()
ee_curr_pose = np.concatenate((ee_curr_pose[0], ee_curr_pose[1]), axis=0)
# Update visualization
goal_vis_prim.set_world_pose(ee_goal_pose[:3], ee_goal_pose[3:])
ee_vis_prim.set_world_pose(ee_curr_pose[:3], ee_curr_pose[3:])
# Define target trajectory
mpc_interface.set_target_trajectory(
time_traj=[sim_time, sim_time + 2], state_traj=[ee_curr_pose, ee_goal_pose], input_traj=[None, None]
)
# Reset the MPC
mpc_interface.reset(sim_time, arm_joint_pos)
# Simulate physics
for count in range(100000):
# obtain current measurements
arm_joint_pos = robot.get_joint_positions()[:arm_num_dof]
# compute arm's optimal control command
arm_cmd = mpc_interface.advance(sim_time, arm_joint_pos)
# print mpc cost
# perform actions
action = ArticulationAction(joint_velocities=arm_cmd, joint_indices=[range(arm_num_dof)])
robot.apply_action(action)
# perform step
my_world.step()
# update sim-time
sim_time += dt
# obtain new measurements
ee_curr_pose = robot.end_effector.get_world_pose()
ee_curr_pose = np.concatenate((ee_curr_pose[0], ee_curr_pose[1]), axis=0)
# compute the waypoint error
error = np.linalg.norm(ee_curr_pose[:3] - ee_goal_pose[:3])
# update visualization
ee_vis_prim.set_world_pose(ee_curr_pose[:3], ee_curr_pose[3:])
# get next waypoint
if error < 0.014:
# print goal state
print(
f"\tMPC cost: { mpc_interface.get_current_cost()}\n",
f"\tCurrent EE state:\n"
f"\t\tI_r_IE : {ee_curr_pose[:3]} \n"
f"\t\tq_IE : {ee_curr_pose[3:]} \n"
f"\tGoal EE state:\n"
f"\t\tI_r_IE_des: {ee_goals[ee_goal_index][:3]} \n"
f"\t\tq_IE_des : {ee_goals[ee_goal_index][3:]} \n"
"----------------------------------------------",
)
# next goal
ee_goal_index += 1
if ee_goal_index >= len(ee_goals):
ee_goal_index = 0
# Define a goal for the arm
ee_goal_pose = np.array(ee_goals[ee_goal_index])
# Update prims
goal_vis_prim.set_world_pose(ee_goal_pose[:3], ee_goal_pose[3:])
# Define target trajectory
mpc_interface.set_target_trajectory(
time_traj=[sim_time, sim_time + 2], state_traj=[ee_curr_pose, ee_goal_pose], input_traj=[None, None]
)
if __name__ == "__main__":
# Run OCS2 example with Franka
main()
# Close the simulator
simulation_app.close()
# EOF
| 5,836 | Python | 34.16265 | 116 | 0.61292 |
2820207922/isaac_ws/standalone_examples/api/omni.importer.urdf/urdf_import.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
# URDF import, configuration and simulation sample
kit = SimulationApp({"renderer": "RayTracedLighting", "headless": True})
import omni.kit.commands
from omni.isaac.core.articulations import Articulation
from omni.isaac.core.utils.extensions import get_extension_path_from_name
from pxr import Gf, PhysxSchema, Sdf, UsdLux, UsdPhysics
# Setting up import configuration:
status, import_config = omni.kit.commands.execute("URDFCreateImportConfig")
import_config.merge_fixed_joints = False
import_config.convex_decomp = False
import_config.import_inertia_tensor = True
import_config.fix_base = False
import_config.distance_scale = 100
# Get path to extension data:
extension_path = get_extension_path_from_name("omni.importer.urdf")
# Import URDF, stage_path contains the path the path to the usd prim in the stage.
status, stage_path = omni.kit.commands.execute(
"URDFParseAndImportFile",
urdf_path=extension_path + "/data/urdf/robots/carter/urdf/carter.urdf",
import_config=import_config,
get_articulation_root=True,
)
# Get stage handle
stage = omni.usd.get_context().get_stage()
# Enable physics
scene = UsdPhysics.Scene.Define(stage, Sdf.Path("/physicsScene"))
# Set gravity
scene.CreateGravityDirectionAttr().Set(Gf.Vec3f(0.0, 0.0, -1.0))
scene.CreateGravityMagnitudeAttr().Set(9.81)
# Set solver settings
PhysxSchema.PhysxSceneAPI.Apply(stage.GetPrimAtPath("/physicsScene"))
physxSceneAPI = PhysxSchema.PhysxSceneAPI.Get(stage, "/physicsScene")
physxSceneAPI.CreateEnableCCDAttr(True)
physxSceneAPI.CreateEnableStabilizationAttr(True)
physxSceneAPI.CreateEnableGPUDynamicsAttr(False)
physxSceneAPI.CreateBroadphaseTypeAttr("MBP")
physxSceneAPI.CreateSolverTypeAttr("TGS")
# Add ground plane
omni.kit.commands.execute(
"AddGroundPlaneCommand",
stage=stage,
planePath="/groundPlane",
axis="Z",
size=1500.0,
position=Gf.Vec3f(0, 0, -50),
color=Gf.Vec3f(0.5),
)
# Add lighting
distantLight = UsdLux.DistantLight.Define(stage, Sdf.Path("/DistantLight"))
distantLight.CreateIntensityAttr(500)
# Get handle to the Drive API for both wheels
left_wheel_drive = UsdPhysics.DriveAPI.Get(stage.GetPrimAtPath("/carter/chassis_link/left_wheel"), "angular")
right_wheel_drive = UsdPhysics.DriveAPI.Get(stage.GetPrimAtPath("/carter/chassis_link/right_wheel"), "angular")
# Set the velocity drive target in degrees/second
left_wheel_drive.GetTargetVelocityAttr().Set(150)
right_wheel_drive.GetTargetVelocityAttr().Set(150)
# Set the drive damping, which controls the strength of the velocity drive
left_wheel_drive.GetDampingAttr().Set(15000)
right_wheel_drive.GetDampingAttr().Set(15000)
# Set the drive stiffness, which controls the strength of the position drive
# In this case because we want to do velocity control this should be set to zero
left_wheel_drive.GetStiffnessAttr().Set(0)
right_wheel_drive.GetStiffnessAttr().Set(0)
# Start simulation
omni.timeline.get_timeline_interface().play()
# perform one simulation step so physics is loaded and dynamic control works.
kit.update()
art = Articulation(prim_path=stage_path)
art.initialize()
if not art.handles_initialized:
print(f"{stage_path} is not an articulation")
else:
print(f"Got articulation {stage_path} with handle {art.articulation_handle}")
# perform simulation
for frame in range(100):
kit.update()
# Shutdown and exit
omni.timeline.get_timeline_interface().stop()
kit.close()
| 3,877 | Python | 36.288461 | 111 | 0.780242 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.ros_bridge/clock.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import argparse
import time
import carb
from omni.isaac.kit import SimulationApp
parser = argparse.ArgumentParser(description="ROS Clock Example")
parser.add_argument("--test", action="store_true")
args, unknown = parser.parse_known_args()
# Example ROS bridge sample showing rospy and rosclock interaction
simulation_app = SimulationApp({"renderer": "RayTracedLighting", "headless": True})
import omni
import omni.graph.core as og
from omni.isaac.core import SimulationContext
from omni.isaac.core.utils.extensions import enable_extension
if args.test:
from omni.isaac.ros_bridge.scripts.roscore import Roscore
from omni.isaac.ros_bridge.tests.common import wait_for_rosmaster
roscore = Roscore()
wait_for_rosmaster()
# enable ROS bridge extension
enable_extension("omni.isaac.ros_bridge")
simulation_app.update()
# check if rosmaster node is running
# this is to prevent this sample from waiting indefinetly if roscore is not running
# can be removed in regular usage
import rosgraph
if not rosgraph.is_master_online():
carb.log_error("Please run roscore before executing this script")
simulation_app.close()
exit()
import rospy
# Note that this is not the system level rospy, but one compiled for omniverse
from rosgraph_msgs.msg import Clock
clock_topic = "sim_time"
manual_clock_topic = "manual_time"
simulation_context = SimulationContext(physics_dt=1.0 / 60.0, rendering_dt=1.0 / 60.0, stage_units_in_meters=1.0)
# Creating a action graph with ROS component nodes
try:
og.Controller.edit(
{"graph_path": "/ActionGraph", "evaluator_name": "execution"},
{
og.Controller.Keys.CREATE_NODES: [
("ReadSimTime", "omni.isaac.core_nodes.IsaacReadSimulationTime"),
("OnPlaybackTick", "omni.graph.action.OnPlaybackTick"),
("PublishClock", "omni.isaac.ros_bridge.ROS1PublishClock"),
("OnImpulseEvent", "omni.graph.action.OnImpulseEvent"),
("PublishManualClock", "omni.isaac.ros_bridge.ROS1PublishClock"),
],
og.Controller.Keys.CONNECT: [
# Connecting execution of OnPlaybackTick node to PublishClock to automatically publish each frame
("OnPlaybackTick.outputs:tick", "PublishClock.inputs:execIn"),
# Connecting execution of OnImpulseEvent node to PublishManualClock so it will only publish when an impulse event is triggered
("OnImpulseEvent.outputs:execOut", "PublishManualClock.inputs:execIn"),
# Connecting simulationTime data of ReadSimTime to the clock publisher nodes
("ReadSimTime.outputs:simulationTime", "PublishClock.inputs:timeStamp"),
("ReadSimTime.outputs:simulationTime", "PublishManualClock.inputs:timeStamp"),
],
og.Controller.Keys.SET_VALUES: [
# Assigning topic names to clock publishers
("PublishClock.inputs:topicName", clock_topic),
("PublishManualClock.inputs:topicName", manual_clock_topic),
],
},
)
except Exception as e:
print(e)
simulation_app.update()
simulation_app.update()
# Define ROS callbacks
def sim_clock_callback(data):
print("sim time:", data.clock.to_sec())
def manual_clock_callback(data):
print("manual stepped sim time:", data.clock.to_sec())
# Create rospy ndoe
rospy.init_node("isaac_sim_clock", anonymous=True, disable_signals=True, log_level=rospy.ERROR)
# create subscribers
sim_clock_sub = rospy.Subscriber(clock_topic, Clock, sim_clock_callback)
manual_clock_sub = rospy.Subscriber(manual_clock_topic, Clock, manual_clock_callback)
time.sleep(1.0)
# need to initialize physics getting any articulation..etc
simulation_context.initialize_physics()
simulation_context.play()
# perform a fixed number of steps with fixed step size
for frame in range(20):
# publish manual clock every 10 frames
if frame % 10 == 0:
og.Controller.set(og.Controller.attribute("/ActionGraph/OnImpulseEvent.state:enableImpulse"), True)
simulation_context.render() # This updates rendering/app loop which calls the sim clock
simulation_context.step(render=False) # runs with a non-realtime clock
# This sleep is to make this sample run a bit more deterministically for the subscriber callback
# In general this sleep is not needed
time.sleep(0.1)
# perform a fixed number of steps with realtime clock
for frame in range(20):
# publish manual clock every 10 frames
if frame % 10 == 0:
og.Controller.set(og.Controller.attribute("/ActionGraph/OnImpulseEvent.state:enableImpulse"), True)
simulation_app.update() # runs with a realtime clock
# This sleep is to make this sample run a bit more deterministically for the subscriber callback
# In general this sleep is not needed
time.sleep(0.1)
# cleanup and shutdown
sim_clock_sub.unregister()
manual_clock_sub.unregister()
simulation_context.stop()
if args.test:
roscore = None
simulation_app.close()
| 5,492 | Python | 35.865772 | 142 | 0.716497 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.ros_bridge/carter_stereo.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import argparse
import carb
from omni.isaac.kit import SimulationApp
parser = argparse.ArgumentParser(description="Carter Stereo Example")
parser.add_argument("--test", action="store_true")
args, unknown = parser.parse_known_args()
# Example ROS bridge sample showing manual control over messages
simulation_app = SimulationApp({"renderer": "RayTracedLighting", "headless": False})
import omni
import omni.graph.core as og
from omni.isaac.core import SimulationContext
from omni.isaac.core.utils.extensions import enable_extension
from omni.isaac.core.utils.nucleus import get_assets_root_path
from pxr import Sdf
# enable ROS bridge extension
enable_extension("omni.isaac.ros_bridge")
# check if rosmaster node is running
# this is to prevent this sample from waiting indefinetly if roscore is not running
# can be removed in regular usage
import rosgraph
if not rosgraph.is_master_online():
carb.log_error("Please run roscore before executing this script")
simulation_app.close()
exit()
# Locate assets root folder to load sample
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
simulation_app.close()
exit()
usd_path = assets_root_path + "/Isaac/Samples/ROS/Scenario/carter_warehouse_navigation.usd"
omni.usd.get_context().open_stage(usd_path, None)
# Wait two frames so that stage starts loading
simulation_app.update()
simulation_app.update()
print("Loading stage...")
from omni.isaac.core.utils.stage import is_stage_loading
while is_stage_loading():
simulation_app.update()
print("Loading Complete")
simulation_context = SimulationContext(stage_units_in_meters=1.0)
ros_cameras_graph_path = "/World/Carter_ROS/ROS_Cameras"
# Enabling rgb and depth image publishers for left camera. Cameras will automatically publish images each frame
og.Controller.set(
og.Controller.attribute(ros_cameras_graph_path + "/isaac_create_render_product_left.inputs:enabled"), True
)
# Enabling rgb and depth image publishers for right camera. Cameras will automatically publish images each frame
og.Controller.set(
og.Controller.attribute(ros_cameras_graph_path + "/isaac_create_render_product_right.inputs:enabled"), True
)
simulation_context.play()
simulation_context.step()
# Simulate for one second to warm up sim and let everything settle
for frame in range(60):
simulation_context.step()
# Dock the second camera window
left_viewport = omni.ui.Workspace.get_window("Viewport")
right_viewport = omni.ui.Workspace.get_window("Viewport 2")
if right_viewport is not None and left_viewport is not None:
right_viewport.dock_in(left_viewport, omni.ui.DockPosition.RIGHT)
right_viewport = None
left_viewport = None
import rosgraph
if not rosgraph.is_master_online():
carb.log_error("Please run roscore before executing this script")
simulation_app.close()
exit()
import rospy
# Create a rostopic to publish message to spin robot in place
# Note that this is not the system level rospy, but one compiled for omniverse
from geometry_msgs.msg import Twist
rospy.init_node("carter_stereo", anonymous=True, disable_signals=True, log_level=rospy.ERROR)
pub = rospy.Publisher("cmd_vel", Twist, queue_size=10)
frame = 0
while simulation_app.is_running():
# Run with a fixed step size
simulation_context.step(render=True)
# Publish the ROS Twist message every 2 frames
if frame % 2 == 0:
message = Twist()
message.angular.z = 0.5 # spin in place
pub.publish(message)
if args.test and frame > 120:
break
frame = frame + 1
pub.unregister()
rospy.signal_shutdown("carter_stereo complete")
simulation_context.stop()
simulation_app.close()
| 4,162 | Python | 32.039682 | 112 | 0.760692 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.ros_bridge/contact.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import carb
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"renderer": "RayTracedLighting", "headless": True})
import omni
import omni.kit.commands
from omni.isaac.core import World
from omni.isaac.core.objects import DynamicCuboid
from omni.isaac.core.utils.extensions import enable_extension
from omni.isaac.sensor import _sensor
from pxr import Gf
# enable ROS bridge extension
enable_extension("omni.isaac.ros_bridge")
# check if rosmaster node is running
# this is to prevent this sample from waiting indefinetly if roscore is not running
# can be removed in regular usage
import rosgraph
if not rosgraph.is_master_online():
carb.log_error("Please run roscore before executing this script")
simulation_app.close()
exit()
# Note that this is not the system level rospy, but one compiled for omniverse
import numpy as np
import rospy
try:
from isaac_tutorials.msg import ContactSensor
except ModuleNotFoundError:
carb.log_error("isaac_tutorials message definition was not found, please source the ros workspace")
simulation_app.close()
exit()
rospy.init_node("contact_sample", anonymous=True, disable_signals=True, log_level=rospy.ERROR)
timeline = omni.timeline.get_timeline_interface()
contact_pub = rospy.Publisher("/contact_report", ContactSensor, queue_size=0)
cs = _sensor.acquire_contact_sensor_interface()
meters_per_unit = 1.0
ros_world = World(stage_units_in_meters=1.0)
# add a cube in the world
cube_path = "/cube"
cube_1 = ros_world.scene.add(
DynamicCuboid(prim_path=cube_path, name="cube_1", position=np.array([0, 0, 1.5]), size=1.0)
)
simulation_app.update()
# Add a plane for cube to collide with
ros_world.scene.add_default_ground_plane()
simulation_app.update()
# putting contact sensor in the ContactSensor Message format
def format_contact(c_out, contact):
c_out.time = float(contact["time"])
c_out.value = float(contact["value"] * meters_per_unit)
c_out.in_contact = bool(contact["inContact"])
return c_out
# Setup contact sensor on cube
result, sensor = omni.kit.commands.execute(
"IsaacSensorCreateContactSensor",
path="/Contact_Sensor",
parent=cube_path,
min_threshold=0,
max_threshold=100000000,
color=Gf.Vec4f(1, 1, 1, 1),
radius=-1,
sensor_period=1.0 / 60.0,
translation=Gf.Vec3d(0, 0, 0),
)
simulation_app.update()
# initiate the message handle
c_out = ContactSensor()
# start simulation
timeline.play()
for frame in range(10000):
ros_world.step(render=False)
# Get processed contact data
reading = cs.get_sensor_readings(cube_path + "/Contact_Sensor")
if reading.shape[0]:
for r in reading:
print(r)
# pack the raw data into ContactSensor format and publish it
c = format_contact(c_out, r)
contact_pub.publish(c)
# Cleanup
timeline.stop()
contact_pub.unregister()
rospy.signal_shutdown("contact_sample complete")
simulation_app.close()
| 3,408 | Python | 28.136752 | 103 | 0.734742 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.ros_bridge/camera_noise.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import sys
import carb
from omni.isaac.kit import SimulationApp
CAMERA_STAGE_PATH = "/Camera"
ROS_CAMERA_GRAPH_PATH = "/ROS_Camera"
BACKGROUND_STAGE_PATH = "/background"
BACKGROUND_USD_PATH = "/Isaac/Environments/Simple_Warehouse/warehouse_with_forklifts.usd"
CONFIG = {"renderer": "RayTracedLighting", "headless": False}
simulation_app = SimulationApp(CONFIG)
import numpy as np
import omni
import omni.graph.core as og
import omni.replicator.core as rep
import omni.syntheticdata._syntheticdata as sd
import warp as wp
from omni.isaac.core import SimulationContext
from omni.isaac.core.utils import extensions, nucleus, stage
from omni.isaac.core.utils.render_product import set_camera_prim_path
from omni.kit.viewport.utility import get_active_viewport
from pxr import Gf, Usd, UsdGeom
# enable ROS bridge extension
extensions.enable_extension("omni.isaac.ros_bridge")
simulation_app.update()
# check if rosmaster node is running
# this is to prevent this sample from waiting indefinetly if roscore is not running
# can be removed in regular usage
import rosgraph
if not rosgraph.is_master_online():
carb.log_error("Please run roscore before executing this script")
simulation_app.close()
exit()
simulation_context = SimulationContext(stage_units_in_meters=1.0)
# Locate Isaac Sim assets folder to load environment and robot stages
assets_root_path = nucleus.get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
simulation_app.close()
sys.exit()
# Loading the simple_room environment
stage.add_reference_to_stage(assets_root_path + BACKGROUND_USD_PATH, BACKGROUND_STAGE_PATH)
# Creating a Camera prim
camera_prim = UsdGeom.Camera(omni.usd.get_context().get_stage().DefinePrim(CAMERA_STAGE_PATH, "Camera"))
xform_api = UsdGeom.XformCommonAPI(camera_prim)
xform_api.SetTranslate(Gf.Vec3d(-1, 5, 1))
xform_api.SetRotate((90, 0, 0), UsdGeom.XformCommonAPI.RotationOrderXYZ)
camera_prim.GetHorizontalApertureAttr().Set(21)
camera_prim.GetVerticalApertureAttr().Set(16)
camera_prim.GetProjectionAttr().Set("perspective")
camera_prim.GetFocalLengthAttr().Set(24)
camera_prim.GetFocusDistanceAttr().Set(400)
simulation_app.update()
# grab our render product and directly set the camera prim
render_product_path = get_active_viewport().get_render_product_path()
set_camera_prim_path(render_product_path, CAMERA_STAGE_PATH)
# GPU Noise Kernel for illustrative purposes, input is rgba, outputs rgb
@wp.kernel
def image_gaussian_noise_warp(
data_in: wp.array3d(dtype=wp.uint8), data_out: wp.array3d(dtype=wp.uint8), seed: int, sigma: float = 0.5
):
i, j = wp.tid()
dim_i = data_out.shape[0]
dim_j = data_out.shape[1]
pixel_id = i * dim_i + j
state_r = wp.rand_init(seed, pixel_id + (dim_i * dim_j * 0))
state_g = wp.rand_init(seed, pixel_id + (dim_i * dim_j * 1))
state_b = wp.rand_init(seed, pixel_id + (dim_i * dim_j * 2))
data_out[i, j, 0] = wp.uint8(float(data_in[i, j, 0]) + (255.0 * sigma * wp.randn(state_r)))
data_out[i, j, 1] = wp.uint8(float(data_in[i, j, 1]) + (255.0 * sigma * wp.randn(state_g)))
data_out[i, j, 2] = wp.uint8(float(data_in[i, j, 2]) + (255.0 * sigma * wp.randn(state_b)))
# register new augmented annotator that adds noise to rgba and then outputs to rgb to the ROS publisher can publish
rep.annotators.register(
name="rgb_gaussian_noise",
annotator=rep.annotators.augment_compose(
source_annotator=rep.annotators.get("rgb", device="cuda"),
augmentations=[
rep.annotators.Augmentation.from_function(
image_gaussian_noise_warp, sigma=0.1, seed=1234, data_out_shape=(-1, -1, 3)
),
],
),
)
# Create a new writer with the augmented image
rep.writers.register_node_writer(
name=f"CustomROS1PublishImage",
node_type_id="omni.isaac.ros_bridge.ROS1PublishImage",
annotators=[
"rgb_gaussian_noise",
omni.syntheticdata.SyntheticData.NodeConnectionTemplate(
"IsaacReadSimulationTime", attributes_mapping={"outputs:simulationTime": "inputs:timeStamp"}
),
],
category="custom",
)
# Create the new writer and attach to our render product
writer = rep.writers.get(f"CustomROS1PublishImage")
writer.initialize(topicName="rgb_augmented", frameId="sim_camera")
writer.attach([render_product_path])
simulation_app.update()
# Need to initialize physics getting any articulation..etc
simulation_context.initialize_physics()
simulation_context.play()
frame = 0
while simulation_app.is_running():
# Run with a fixed step size
simulation_context.step(render=True)
if simulation_context.is_playing():
# Rotate camera by 0.5 degree every frame
xform_api.SetRotate((90, 0, frame / 4.0), UsdGeom.XformCommonAPI.RotationOrderXYZ)
frame = frame + 1
simulation_context.stop()
simulation_app.close()
| 5,338 | Python | 35.319728 | 115 | 0.729299 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.ros_bridge/subscriber.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import carb
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"renderer": "RayTracedLighting", "headless": False})
import omni
from omni.isaac.core import World
from omni.isaac.core.objects import VisualCuboid
from omni.isaac.core.utils.extensions import enable_extension
# enable ROS bridge extension
enable_extension("omni.isaac.ros_bridge")
simulation_app.update()
# check if rosmaster node is running
# this is to prevent this sample from waiting indefinetly if roscore is not running
# can be removed in regular usage
import rosgraph
if not rosgraph.is_master_online():
carb.log_error("Please run roscore before executing this script")
simulation_app.close()
exit()
import time
# Note that this is not the system level rospy, but one compiled for omniverse
import numpy as np
import rospy
from std_msgs.msg import Empty
class Subscriber:
def __init__(self):
# setting up the world with a cube
self.timeline = omni.timeline.get_timeline_interface()
self.ros_world = World(stage_units_in_meters=1.0)
self.ros_world.scene.add_default_ground_plane()
# add a cube in the world
cube_path = "/cube"
self.ros_world.scene.add(
VisualCuboid(prim_path=cube_path, name="cube_1", position=np.array([0, 0, 10]), size=0.2)
)
self._cube_position = np.array([0, 0, 0])
# setup the ros subscriber here
self.ros_sub = rospy.Subscriber("/move_cube", Empty, self.move_cube_callback, queue_size=10)
self.ros_world.reset()
def move_cube_callback(self, data):
# callback function to set the cube position to a new one upon receiving a (empty) ros message
if self.ros_world.is_playing():
self._cube_position = np.array([np.random.rand() * 0.40, np.random.rand() * 0.40, 0.10])
def run_simulation(self):
self.timeline.play()
while simulation_app.is_running():
self.ros_world.step(render=True)
if self.ros_world.is_playing():
if self.ros_world.current_time_step_index == 0:
self.ros_world.reset()
# the actual setting the cube pose is done here
self.ros_world.scene.get_object("cube_1").set_world_pose(self._cube_position)
# Cleanup
self.ros_sub.unregister()
rospy.signal_shutdown("subscriber example complete")
self.timeline.stop()
simulation_app.close()
if __name__ == "__main__":
rospy.init_node("tutorial_subscriber", anonymous=True, disable_signals=True, log_level=rospy.ERROR)
subscriber = Subscriber()
subscriber.run_simulation()
| 3,108 | Python | 34.329545 | 103 | 0.684685 |
codemaster0407/ICECTCI-Hackathon/README.md |
# ICECTCI-Hackathon
# PROBLEM STATEMENT
Problem Statement 3– Natural language processing
Title: AI-Assisted Learning for NVIDIA SDKs and Toolkits
Problem Statement: Develop an AI-powered language model (LLM) that assists users in
understanding and effectively using various NVIDIA SDKs (Software Development Kits) and
toolkits. The objective of this hackathon is to create an interactive and user-friendly platform
that provides comprehensive information, examples, and guidance on NVIDIA's SDKs and
toolkits. By leveraging the power of language models and NVIDIA toolkits, participants aim to
simplify the learning curve for developers and empower them to utilize NVIDIA's technologies
more efficiently.
### Chatbot_final.ipynb
This notebook can be used for inference on queries as per the user's interest.
### Evaluate_1.ipynb
Notebook to evaluate the fine-tuned models.
### FALCON7B_r32_a64_gen_tot
This directory contains the finetuned Falcon-7B LLM with PEFT adapters.
### pup_gorilla_model
This directory contains the finetuned Gorilla-7B LLM with PEFT adapters.
### DATA_EXTRACTION
This directory contains all the code and extracted data files from different sources.
| 1,197 | Markdown | 35.303029 | 97 | 0.802005 |
codemaster0407/ICECTCI-Hackathon/FALCON7B_r32_a64_gen_tot/README.md | ---
library_name: peft
---
## Training procedure
The following `bitsandbytes` quantization config was used during training:
- load_in_8bit: True
- llm_int8_threshold: 6.0
- llm_int8_skip_modules: None
- llm_int8_enable_fp32_cpu_offload: False
The following `bitsandbytes` quantization config was used during training:
- load_in_8bit: True
- llm_int8_threshold: 6.0
- llm_int8_skip_modules: None
- llm_int8_enable_fp32_cpu_offload: False
### Framework versions
- PEFT 0.5.0
- PEFT 0.5.0
| 491 | Markdown | 20.391303 | 74 | 0.735234 |
KhaledSharif/omniverse-gym/setup.py | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from setuptools import setup, find_packages
INSTALL_REQUIRES = [
"numpy==1.23.5",
"protobuf==3.20.2",
"omegaconf==2.3.0",
"hydra-core==1.3.2",
"urllib3==1.26.16",
"rl-games==1.6.1",
"moviepy==1.0.3"
]
setup(
name="omniisaacgymenvs",
author="[email protected]",
version="1.0.0",
description="Omniverse Isaac Gym Envs for Robot Learning in NVIDIA Isaac Sim",
keywords=["robotics", "rl"],
include_package_data=True,
install_requires=INSTALL_REQUIRES,
packages=find_packages("."),
classifiers=["Natural Language :: English", "Programming Language :: Python :: 3.7, 3.8"],
zip_safe=False,
)
| 765 | Python | 25.413792 | 94 | 0.647059 |
KhaledSharif/omniverse-gym/run.py | import os
import gym
import hydra
import torch
from omegaconf import DictConfig
from omniisaacgymenvs.envs.vec_env_rlgames import VecEnvRLGames
from omniisaacgymenvs.utils.config_utils.path_utils import (
retrieve_checkpoint_path,
get_experience,
)
from omniisaacgymenvs.utils.hydra_cfg.hydra_utils import *
from omniisaacgymenvs.utils.hydra_cfg.reformat import omegaconf_to_dict, print_dict
from omniisaacgymenvs.utils.rlgames.rlgames_utils import RLGPUAlgoObserver, RLGPUEnv
from omniisaacgymenvs.utils.task_util import initialize_task
from rl_games.common import env_configurations, vecenv
from rl_games.torch_runner import Runner
class RLGTrainer:
def __init__(self, cfg, cfg_dict):
self.cfg = cfg
self.cfg_dict = cfg_dict
def launch_rlg_hydra(self, env):
# `create_rlgpu_env` is environment construction function which is passed to RL Games and called internally.
# We use the helper function here to specify the environment config.
self.cfg_dict["task"]["test"] = self.cfg.test
# register the rl-games adapter to use inside the runner
vecenv.register(
"RLGPU",
lambda config_name, num_actors, **kwargs: RLGPUEnv(
config_name, num_actors, **kwargs
),
)
env_configurations.register(
"rlgpu", {"vecenv_type": "RLGPU", "env_creator": lambda **kwargs: env}
)
self.rlg_config_dict = omegaconf_to_dict(self.cfg.train)
def run(self, module_path, experiment_dir):
self.rlg_config_dict["params"]["config"]["train_dir"] = os.path.join(
module_path, "runs"
)
# create runner and set the settings
runner = Runner(RLGPUAlgoObserver())
runner.load(self.rlg_config_dict)
runner.reset()
# dump config dict
os.makedirs(experiment_dir, exist_ok=True)
with open(os.path.join(experiment_dir, "config.yaml"), "w") as f:
f.write(OmegaConf.to_yaml(self.cfg))
runner.run(
{
"train": not self.cfg.test,
"play": self.cfg.test,
"checkpoint": self.cfg.checkpoint,
"sigma": None,
}
)
@hydra.main(version_base=None, config_name="config", config_path="./cfg")
def parse_hydra_configs(cfg: DictConfig):
headless = cfg.headless
# local rank (GPU id) in a current multi-gpu mode
local_rank = int(os.getenv("LOCAL_RANK", "0"))
# global rank (GPU id) in multi-gpu multi-node mode
global_rank = int(os.getenv("RANK", "0"))
if cfg.multi_gpu:
cfg.device_id = local_rank
cfg.rl_device = f"cuda:{local_rank}"
enable_viewport = "enable_cameras" in cfg.task.sim and cfg.task.sim.enable_cameras
# select kit app file
experience = get_experience(
headless,
cfg.enable_livestream,
enable_viewport,
cfg.enable_recording,
cfg.kit_app,
)
env = VecEnvRLGames(
headless=headless,
sim_device=cfg.device_id,
enable_livestream=cfg.enable_livestream,
enable_viewport=enable_viewport or cfg.enable_recording,
experience=experience,
)
# parse experiment directory
module_path = os.path.abspath(os.curdir)
experiment_dir = os.path.join(module_path, "runs", cfg.train.params.config.name)
# use gym RecordVideo wrapper for viewport recording
if cfg.enable_recording:
if cfg.recording_dir == "":
videos_dir = os.path.join(experiment_dir, "videos")
else:
videos_dir = cfg.recording_dir
video_interval = lambda step: step % cfg.recording_interval == 0
video_length = cfg.recording_length
env.is_vector_env = True
if env.metadata is None:
env.metadata = {
"render_modes": ["rgb_array"],
"render_fps": cfg.recording_fps,
}
else:
env.metadata["render_modes"] = ["rgb_array"]
env.metadata["render_fps"] = cfg.recording_fps
env = gym.wrappers.RecordVideo(
env,
video_folder=videos_dir,
step_trigger=video_interval,
video_length=video_length,
)
# ensure checkpoints can be specified as relative paths
if cfg.checkpoint:
cfg.checkpoint = retrieve_checkpoint_path(cfg.checkpoint)
if cfg.checkpoint is None:
quit()
cfg_dict = omegaconf_to_dict(cfg)
print_dict(cfg_dict)
from omni.isaac.core.utils.torch.maths import set_seed
cfg.seed = cfg.seed + global_rank if cfg.seed != -1 else cfg.seed
cfg.seed = set_seed(cfg.seed, torch_deterministic=cfg.torch_deterministic)
cfg_dict["seed"] = cfg.seed
initialize_task(cfg_dict, env)
torch.cuda.set_device(local_rank)
rlg_trainer = RLGTrainer(cfg, cfg_dict)
rlg_trainer.launch_rlg_hydra(env)
rlg_trainer.run(module_path, experiment_dir)
env.close()
if __name__ == "__main__":
parse_hydra_configs()
| 5,048 | Python | 32.437086 | 116 | 0.627377 |
KhaledSharif/omniverse-gym/README.md | # omniverse-gym
Examples of how to use NVIDIA Omniverse Isaac Sim for to solve Reinforcement Learning Games (RL-Games)
## Installation
Follow the Isaac Sim [documentation](https://docs.omniverse.nvidia.com/isaacsim/latest/installation/install_workstation.html) to install the latest Isaac Sim release (2023.1.1)
To install `omniisaacgymenvs`, first clone this repository:
```bash
git clone https://github.com/KhaledSharif/omniverse-gym.git
```
Once cloned, locate the [python executable in Isaac Sim](https://docs.omniverse.nvidia.com/isaacsim/latest/installation/install_python.html). By default, this should be `python.sh`. We will refer to this path as `PYTHON_PATH`.
To set a `PYTHON_PATH` variable in the terminal that links to the python executable, we can run a command that resembles the following. Make sure to update the paths to your local path. For Linux:
```bash
alias PYTHON_PATH=~/.local/share/ov/pkg/isaac_sim-2023.1.1/python.sh
```
Install the repository and its dependencies:
```bash
PYTHON_PATH -m pip install -e .
```
To run a simple form of PPO from `rl_games`, use the single-threaded training script:
```bash
PYTHON_PATH run.py task=Cartpole
```
The result is saved to the current working directory in a new directory called `runs`.
You can now evaluate your model by running the same environment in test (inference) mode using the saved model checkpoint.
```bash
PYTHON_PATH run.py task=Cartpole test=True checkpoint=runs/Cartpole/nn/Cartpole.pth
```
| 1,491 | Markdown | 35.390243 | 226 | 0.768612 |
KhaledSharif/omniverse-gym/cfg/README.md | ## Reinforcement Learning Configuration
### What is Hydra?
Hydra is an open-source Python framework that simplifies the development of research and other complex applications. The key feature is the ability to dynamically create a hierarchical configuration by composition and override it through config files and the command line.
### What is ./config.yaml?
- Task Configuration: This section specifies the task name, experiment name, the number of environments to use for training, the random seed, and whether to use deterministic PyTorch operations.
- Device Configuration: This section configures the physics engine (PhysX), the pipeline (CPU or GPU), the device to be used for simulation (CPU or GPU), the device for running the RL algorithm, and whether to enable multi-GPU training.
- PhysX Arguments: This section sets the number of worker threads and the solver type for the PhysX physics engine.
- RL Training Arguments: These arguments control various aspects of the RL training process, such as running in test mode, loading a checkpoint, evaluation mode, headless rendering, live streaming, timeout settings, recording settings (e.g., interval, length, FPS, directory), and wandb (Weights & Biases) integration for logging and monitoring.
- Default Settings: This section sets the default task and training configuration based on the specified task (in this case, Cartpole).
Hydra Configuration: This section configures the output directory for the training logs and results using the Hydra configuration management framework.
### What is ./task/*.yaml?
- Environment Settings: This section defines the number of parallel environments, episode length, observation and action clipping, control frequency, noise in initial conditions, number of props, aggregation mode, and reward scales for different objectives (e.g., distance, rotation, finger positions).
- Simulation Settings: This section configures the simulation parameters, such as time step, gravity, ground plane, lighting, fabric usage, and whether to use GPU acceleration. It also sets the default physics material properties (friction, restitution).
- Physics Engine Settings: These settings are specific to the PhysX physics engine, including worker thread count, solver type, GPU usage, solver iteration counts, contact offsets, bounce thresholds, friction parameters, sleeping and stabilization settings, and GPU buffer capacities.
- Object-Specific Settings: These sections override specific parameters for individual objects or actors within the environment, such as the robot arm (e.g., Franka), cabinets, and props. These settings include enabling self-collisions, gyroscopic forces, solver iteration counts, sleep and stabilization thresholds, density, maximum depenetration velocity, and shape-specific parameters like contact and rest offsets.
### What is ./train/*.yaml?
Params: This section contains the main parameters for the RL algorithm and neural network architecture.
- seed: Random seed value for reproducibility.
- algo: The algorithm to be used, in this case, a2c_continuous (Advantage Actor-Critic for continuous actions).
- model: The model type, typically continuous_a2c_logstd for continuous action spaces.
- network: Configuration for the neural network architecture, including the type (actor-critic), activation functions, initialization methods, and layer sizes.
Load Checkpoint: Parameters related to loading a pre-trained model checkpoint.
- load_checkpoint: A flag to determine whether to load a checkpoint or not.
- load_path: The path to the checkpoint file to be loaded.
Config: This section contains various configuration settings for the training process.
- name: The name of the experiment or environment.
- full_experiment_name: The full name of the experiment.
- env_name: The name of the environment to be used (in this case, rlgpu).
- device: The device to be used for training (e.g., CPU or GPU).
- multi_gpu: A flag to enable multi-GPU training.
- ppo: A flag to indicate that PPO is being used.
- mixed_precision: A flag to enable mixed-precision training (useful for GPU acceleration).
- normalize_input, normalize_value, normalize_advantage: Flags for normalizing input, value, and advantage estimates.
- num_actors: The number of parallel environments to run.
- reward_shaper: Configuration for reward scaling.
- gamma, tau: Discount factors for future rewards.
- learning_rate, lr_schedule: Learning rate and its scheduling strategy.
- kl_threshold: The KL divergence threshold for adaptive KL penalty in PPO.
- score_to_win: The target score to consider the task as solved.
- max_epochs, save_best_after, save_frequency: Parameters for training duration and checkpointing.
- grad_norm, entropy_coef, truncate_grads, e_clip: Gradient-related parameters and entropy regularization.
- horizon_length, minibatch_size, mini_epochs: Parameters for batching and optimization.
- critic_coef, clip_value, seq_length, bounds_loss_coef: Additional parameters for the critic and bounding loss. | 5,022 | Markdown | 85.603447 | 418 | 0.797093 |
KhaledSharif/omniverse-gym/omniisaacgymenvs/README.md | ## Omniverse Isaac Gym Envs
TBD
| 33 | Markdown | 7.499998 | 27 | 0.727273 |
KhaledSharif/omniverse-gym/omniisaacgymenvs/envs/README.md | ## Reinforcement Learning Environments
```python
class VecEnvBase(
headless: bool,
sim_device: int = 0,
enable_livestream: bool = False,
enable_viewport: bool = False,
launch_simulation_app: bool = True,
experience: Optional[str] = None
)
```
- VecEnvBase: A base class that provides an interface for connecting RL policies with task implementations, following the gym.Env interface. It handles simulation initialization, task registration, and basic environment interactions like reset, step, and render.
- TaskStopException: An exception class used to signal task termination.
- TrainerMT: An abstract base class for controlling the start and stop of an RL policy.
- VecEnvMT: A multi-threaded environment wrapper that separates the RL policy execution and simulation tasks into different threads, enabling interaction with the UI during RL training. It uses message queues for inter-thread communication, managing the flow of actions from the policy to the task, and data from the task to the policy.
The code provides methods for setting up the simulation environment, initializing tasks, stepping through the simulation, rendering, resetting the environment, and handling actions and observations. It supports both single-threaded and multi-threaded modes of operation, with the multi-threaded mode allowing for UI interaction during training. | 1,380 | Markdown | 71.684207 | 344 | 0.791304 |
KhaledSharif/omniverse-gym/omniisaacgymenvs/robots/README.md | ## Reinforcement Learning Robots
The Robot class provides a common interface for creating and managing robot articulations in the Isaac Sim simulation environment. It allows for configuring various properties, such as position, orientation, scale, visibility, and articulation controller. It also provides methods for applying actions (joint positions, velocities, and efforts), applying visual materials, handling gravity, and retrieving information about the robot's state, articulation bodies, and applied actions.
### Constructor Parameters
- prim_path: The path of the Prim (Primitive) to encapsulate or create.
- name: A shortname to be used as a key by the Scene class. It needs to be unique if the object is added to the Scene.
- position: The position in the world frame of the Prim. Shape is (3,).
- translation: The translation in the local frame of the Prim (with respect to its parent Prim). Shape is (3,).
- orientation: The quaternion orientation in the world/local frame of the Prim (depends on whether translation or position is specified). The quaternion is scalar-first (w, x, y, z). Shape is (4,).
- scale: The local scale to be applied to the Prim's dimensions. Shape is (3,).
- visible: A boolean indicating whether the Prim should be visible in the stage during rendering.
- articulation_controller: A custom ArticulationController that inherits from the base ArticulationController class. If not provided, a basic ArticulationController is created.
### Properties
- articulation_handle: A handler to the articulation, which is a unique identifier used by the Dynamic Control extension to manage the articulation.
- dof_names: A list of Prim names for each degree of freedom (DoF).
- dof_properties: A NumPy array containing the properties of the articulation DoFs, such as type, limits, drive mode, maximum velocity and effort, stiffness, and damping.
### Methods
- apply_action: Applies joint positions, velocities, and/or efforts to control the articulation.
- apply_visual_material: Applies a visual material (e.g., PreviewSurface, OmniPBR, OmniGlass) to the Prim and optionally its descendants.
- disable_gravity: Keeps gravity from affecting the robot.
- enable_gravity: Allows gravity to affect the robot.
- get_angular_velocity: Gets the angular velocity of the root articulation Prim.
- get_applied_action: Gets the last applied action (joint positions, velocities, and efforts).
- get_applied_joint_efforts: Gets the efforts applied to the joints set by the set_joint_efforts method.
- get_applied_visual_material: Returns the currently applied visual material, if supported.
- get_articulation_body_count: Gets the number of bodies (links) that make up the articulation.
- get_articulation_controller: Gets the articulation controller.
- get_default_state: Gets the default Prim states (spatial position and orientation).
| 2,865 | Markdown | 76.459457 | 484 | 0.788482 |
KazWong/omniverse_sample/README.md | ov_sample: original sample from omniverse
script_window: sample for omniverse script editor
omnihelper: sample for python in command line
isaac is the bash script to run isaac python
replace your path to isaac_sim-2021.1.1
```
source /path/to /isaac_sim-2021.1.1/setup_python_env.sh
/path/to /isaac_sim-2021.1.1/python.sh $@
```
| 333 | Markdown | 26.833331 | 55 | 0.756757 |
KazWong/omniverse_sample/ov_sample/ros2_samples/navigation/carter_navigation/launch/carter_visualization.launch.py | ## Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
## NVIDIA CORPORATION and its licensors retain all intellectual property
## and proprietary rights in and to this software, related documentation
## and any modifications thereto. Any use, reproduction, disclosure or
## distribution of this software and related documentation without an express
## license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import Node
## This launch file is only used for seperately running Rviz2 with the carter_navigation configuration.
def generate_launch_description():
use_sim_time = LaunchConfiguration("use_sim_time", default="true")
rviz_config_dir = os.path.join(get_package_share_directory("carter_navigation"), "rviz2", "carter_navigation.rviz")
return LaunchDescription(
[
DeclareLaunchArgument("use_sim_time", default_value="True", description="Flag to enable use_sim_time"),
Node(
package="rviz2",
executable="rviz2",
name="rviz2",
output="screen",
parameters=[{"use_sim_time": use_sim_time}],
arguments=["-d", rviz_config_dir],
),
]
)
| 1,468 | Python | 38.702702 | 119 | 0.69891 |
KazWong/omniverse_sample/ov_sample/ros2_samples/navigation/carter_navigation/maps/carter_warehouse_navigation.yaml | image: carter_warehouse_navigation.png
resolution: 0.05
origin: [-10.325, -12.225, 0.0000]
negate: 0
occupied_thresh: 0.65
free_thresh: 0.196
| 142 | YAML | 19.428569 | 38 | 0.739437 |
KazWong/omniverse_sample/ov_sample/python_samples/environment.yml | name: isaac-sim
channels:
- pytorch
- defaults
dependencies:
- cudatoolkit=11.0
- matplotlib=3.1.3
- pip=20.3.3
- python=3.7
- pytorch
- requests=2.23.0
- torchaudio=0.7.2
- torchvision=0.8.2
- six=1.12.0
- pip:
- gym==0.17.3
- opencv-python==4.4.0.44
- pillow==8.2.0
- scipy==1.5.4
- stable-baselines3==0.10.0
- tensorboard==2.4.0
- tensorboard-plugin-wit==1.7.0
- tensorflow-estimator==2.3.0
- tensorflow-gpu==2.3.1 | 499 | YAML | 19.833333 | 36 | 0.563126 |
KazWong/omniverse_sample/ov_sample/python_samples/README.md | # Python samples
This folder contains samples that use the omniverse python loop to execute.
## Built in python 3.7 environment
Navigate to the parent directory and execute
```
./python.sh path/to/script.py
```
## Anaconda
- Anaconda
### Setup
Create and activate the conda environment
```
conda env create -f environment.yml
conda activate isaac-sim
```
Use `setenv` script to add the omniverse kit python environment to your active PYTHONPATH
`source setenv.sh`
## How To Run
See the Isaac Sim documentation for how to run the samples in this folder
| 567 | Markdown | 16.212121 | 89 | 0.749559 |
KazWong/omniverse_sample/ov_sample/python_samples/jetbot/jetbot_train.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import torch
import numpy as np
import os
import carb
import signal
import json
import argparse
from argparse import Namespace
from omni.isaac.python_app import OmniKitHelper
from jetbot_model import CustomCNN
from stable_baselines3 import PPO
from stable_baselines3.common.callbacks import BaseCallback, CheckpointCallback
def train(args):
CUSTOM_CONFIG = {
"width": 224,
"height": 224,
"renderer": "RayTracedLighting",
"headless": args.headless,
"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit',
}
omniverse_kit = OmniKitHelper(CUSTOM_CONFIG)
# need to construct OmniKitHelp before importing physics, etc
from jetbot_env import JetbotEnv
import omni.physx
# we disable all anti aliasing in the render because we want to train on the raw camera image.
omniverse_kit.set_setting("/rtx/post/aa/op", 0)
env = JetbotEnv(omniverse_kit, max_resets=args.rand_freq, updates_per_step=3, mirror_mode=args.mirror_mode)
checkpoint_callback = CheckpointCallback(
save_freq=args.save_freq, save_path="./params/", name_prefix=args.checkpoint_name
)
net_arch = [256, 256, dict(pi=[128, 64, 32], vf=[128, 64, 32])]
policy_kwargs = {"net_arch": net_arch, "activation_fn": torch.nn.ReLU}
if args.loaded_checkpoint == "":
model = PPO(
"CnnPolicy",
env,
verbose=1,
tensorboard_log=args.tensorboard_dir,
policy_kwargs=policy_kwargs,
device="cuda",
n_steps=args.step_freq,
batch_size=2048,
n_epochs=50,
learning_rate=0.0001,
)
else:
model = PPO.load(args.loaded_checkpoint, env)
model.learn(
total_timesteps=args.total_steps,
callback=checkpoint_callback,
eval_env=env,
eval_freq=args.eval_freq,
eval_log_path=args.evaluation_dir,
reset_num_timesteps=args.reset_num_timesteps,
)
model.save(args.checkpoint_name)
def runEval(args):
CUSTOM_CONFIG = {
"width": 224,
"height": 224,
"renderer": "RayTracedLighting",
"headless": args.headless,
"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit',
}
# load a zip file to evaluate here
agent = PPO.load(args.evaluation_dir + "/best_model.zip", device="cuda")
omniverse_kit = OmniKitHelper(CUSTOM_CONFIG)
# need to construct OmniKitHelper before importing physics, etc
from jetbot_env import JetbotEnv
import omni.physx
# we disable all anti aliasing in the render because we want to train on the raw camera image.
omniverse_kit.set_setting("/rtx/post/aa/op", 0)
env = JetbotEnv(omniverse_kit, mirror_mode=args.mirror_mode)
obs = env.reset()
while True:
action = agent.predict(obs)
print(action)
obs, rew, done, infos = env.step(action[0])
if done:
obs = env.reset()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--loaded_checkpoint", help="path to checkpoint to be loaded", default="", nargs="?", type=str)
parser.add_argument("-E", "--eval", help="evaluate checkpoint", action="store_true")
parser.add_argument(
"-R", "--reset_num_timesteps", help="reset the current timestep number (used in logging)", action="store_true"
)
parser.add_argument(
"-M", "--mirror_mode", help="reflect images and actions horizontally during training", action="store_true"
)
parser.add_argument("-H", "--headless", help="run in headless mode (no GUI)", action="store_true")
parser.add_argument(
"--checkpoint_name", help="name of checkpoint file (no suffix)", default="checkpoint_25k", type=str
)
parser.add_argument("--tensorboard_dir", help="path to tensorboard log directory", default="tensorboard", type=str)
parser.add_argument("--evaluation_dir", help="path to evaluation log directory", default="eval_log", type=str)
parser.add_argument("--save_freq", help="number of steps before saving a checkpoint", default=4096 * 8, type=int)
parser.add_argument("--eval_freq", help="number of steps before running an evaluation", default=4096 * 8, type=int)
parser.add_argument("--step_freq", help="number of steps before executing a PPO update", default=10240, type=int)
parser.add_argument(
"--rand_freq", help="number of environment resets before domain randomization", default=1, type=int
)
parser.add_argument(
"--total_steps",
help="the total number of steps before exiting and saving a final checkpoint",
default=250000000,
type=int,
)
parser.add_argument(
"--experimentFile", help="specify configuration via JSON. Overrides commandline", default="", type=str
)
args = parser.parse_args()
if args.experimentFile != "":
args_dict = vars(args)
if os.path.exists(args.experimentFile):
with open(args.experimentFile) as f:
json_args_dict = json.load(f)
args_dict.update(json_args_dict)
args = Namespace(**args_dict)
print("running with args: ", args)
def handle_exit(*args, **kwargs):
print("Exiting training...")
quit()
signal.signal(signal.SIGINT, handle_exit)
if args.eval:
runEval(args)
else:
train(args)
| 5,897 | Python | 31.766666 | 119 | 0.652535 |
KazWong/omniverse_sample/ov_sample/python_samples/jetbot/jetbot.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import carb
import omni
from pxr import UsdGeom, Gf, UsdPhysics
import numpy as np
# Camera parameters
FOCAL_LENGTH = 0.75
HORIZONTAL_APERTURE = 2.350
VERTICAL_APERTURE = 2.350
# Drive Parameters
DRIVE_STIFFNESS = 10000.0
# The amount the camera points down at, decrease to raise camera angle
CAMERA_PIVOT = 40.0
class Jetbot:
def __init__(self, omni_kit):
from omni.isaac.dynamic_control import _dynamic_control
from omni.isaac.utils.scripts.nucleus_utils import find_nucleus_server
self.omni_kit = omni_kit
result, nucleus_server = find_nucleus_server()
if result is False:
carb.log_error("Could not find nucleus server with /Isaac folder")
return
self.usd_path = nucleus_server + "/Isaac/Robots/Jetbot/jetbot.usd"
self.robot_prim = None
self._dynamic_control = _dynamic_control
self.dc = _dynamic_control.acquire_dynamic_control_interface()
self.ar = None
# rotation is in degrees
def spawn(self, location, rotation):
stage = self.omni_kit.get_stage()
prefix = "/World/Robot/Jetbot"
prim_path = omni.usd.get_stage_next_free_path(stage, prefix, False)
self.robot_prim = stage.DefinePrim(prim_path, "Xform")
self.robot_prim.GetReferences().AddReference(self.usd_path)
xform = UsdGeom.Xformable(self.robot_prim)
xform_op = xform.AddXformOp(UsdGeom.XformOp.TypeTransform, UsdGeom.XformOp.PrecisionDouble, "")
mat = Gf.Matrix4d().SetTranslate(location)
mat.SetRotateOnly(Gf.Rotation(Gf.Vec3d(0, 0, 1), rotation))
xform_op.Set(mat)
self.camera_path = prim_path + "/chassis/rgb_camera/jetbot_camera"
self.camera_pivot = prim_path + "/chassis/rgb_camera"
# Set joint drive parameters
left_wheel_joint = UsdPhysics.DriveAPI.Apply(
stage.GetPrimAtPath(f"{prim_path}/chassis/left_wheel_joint"), "angular"
)
left_wheel_joint.GetDampingAttr().Set(DRIVE_STIFFNESS)
right_wheel_joint = UsdPhysics.DriveAPI.Apply(
stage.GetPrimAtPath(f"{prim_path}/chassis/right_wheel_joint"), "angular"
)
right_wheel_joint.GetDampingAttr().Set(DRIVE_STIFFNESS)
def teleport(self, location, rotation, settle=False):
if self.ar is None:
self.ar = self.dc.get_articulation(self.robot_prim.GetPath().pathString)
self.chassis = self.dc.get_articulation_root_body(self.ar)
self.dc.wake_up_articulation(self.ar)
rot_quat = Gf.Rotation(Gf.Vec3d(0, 0, 1), rotation).GetQuaternion()
tf = self._dynamic_control.Transform(
location,
(rot_quat.GetImaginary()[0], rot_quat.GetImaginary()[1], rot_quat.GetImaginary()[2], rot_quat.GetReal()),
)
self.dc.set_rigid_body_pose(self.chassis, tf)
self.dc.set_rigid_body_linear_velocity(self.chassis, [0, 0, 0])
self.dc.set_rigid_body_angular_velocity(self.chassis, [0, 0, 0])
self.command((0, 0))
# Settle the robot onto the ground
if settle:
frame = 0
velocity = 1
while velocity > 0.1 and frame < 120:
self.omni_kit.update(1.0 / 60.0)
lin_vel = self.dc.get_rigid_body_linear_velocity(self.chassis)
velocity = np.linalg.norm([lin_vel.x, lin_vel.y, lin_vel.z])
frame = frame + 1
def activate_camera(self):
# Set camera parameters
stage = self.omni_kit.get_stage()
cameraPrim = UsdGeom.Camera(stage.GetPrimAtPath(self.camera_path))
cameraPrim.GetFocalLengthAttr().Set(FOCAL_LENGTH)
cameraPrim.GetHorizontalApertureAttr().Set(HORIZONTAL_APERTURE)
cameraPrim.GetVerticalApertureAttr().Set(VERTICAL_APERTURE)
# Point camera down at road
pivot_prim = stage.GetPrimAtPath(self.camera_pivot)
transform_attr = pivot_prim.GetAttribute("xformOp:transform")
transform_attr.Set(
transform_attr.Get().SetRotateOnly(Gf.Matrix3d(Gf.Rotation(Gf.Vec3d(0, 1, 0), CAMERA_PIVOT)))
)
vpi = omni.kit.viewport.get_viewport_interface()
vpi.get_viewport_window().set_active_camera(str(self.camera_path))
def command(self, motor_value):
if self.ar is None:
self.ar = self.dc.get_articulation(self.robot_prim.GetPath().pathString)
self.chassis = self.dc.get_articulation_root_body(self.ar)
self.wheel_left = self.dc.find_articulation_dof(self.ar, "left_wheel_joint")
self.wheel_right = self.dc.find_articulation_dof(self.ar, "right_wheel_joint")
self.dc.wake_up_articulation(self.ar)
left_speed = self.wheel_speed_from_motor_value(motor_value[0])
right_speed = self.wheel_speed_from_motor_value(motor_value[1])
self.dc.set_dof_velocity_target(self.wheel_left, np.clip(left_speed, -10, 10))
self.dc.set_dof_velocity_target(self.wheel_right, np.clip(right_speed, -10, 10))
# idealized motor model
def wheel_speed_from_motor_value(self, input):
return input
def observations(self):
if self.ar is None:
self.ar = self.dc.get_articulation(self.robot_prim.GetPath().pathString)
self.chassis = self.dc.get_articulation_root_body(self.ar)
dc_pose = self.dc.get_rigid_body_pose(self.chassis)
dc_lin_vel = self.dc.get_rigid_body_linear_velocity(self.chassis)
dc_local_lin_vel = self.dc.get_rigid_body_local_linear_velocity(self.chassis)
dc_ang_vel = self.dc.get_rigid_body_angular_velocity(self.chassis)
return {
"pose": (dc_pose.p.x, dc_pose.p.y, dc_pose.p.z, dc_pose.r.w, dc_pose.r.x, dc_pose.r.y, dc_pose.r.z),
"linear_velocity": (dc_lin_vel.x, dc_lin_vel.y, dc_lin_vel.z),
"local_linear_velocity": (dc_local_lin_vel.x, dc_local_lin_vel.y, dc_local_lin_vel.z),
"angular_velocity": (dc_ang_vel.x, dc_ang_vel.y, dc_ang_vel.z),
}
| 6,442 | Python | 44.373239 | 117 | 0.652903 |
KazWong/omniverse_sample/ov_sample/python_samples/jetbot/jetbot_model.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import torch
import torch.nn as nn
import torch.nn.functional as F
from stable_baselines3.common.torch_layers import BaseFeaturesExtractor
import gym
from gym import spaces
class CustomCNN(BaseFeaturesExtractor):
"""
:param observation_space: (gym.Space)
:param features_dim: (int) Number of features extracted.
This corresponds to the number of unit for the last layer.
"""
def __init__(self, observation_space: gym.spaces.Box, features_dim: int = 512):
super(CustomCNN, self).__init__(observation_space, features_dim)
# We assume CxHxW images (channels first)
# Re-ordering will be done by pre-preprocessing or wrapper
n_input_channels = observation_space.shape[0]
# print(observation_space.shape)
self.cnn = nn.Sequential(
nn.Conv2d(n_input_channels, 32, kernel_size=8, stride=4, padding=0),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=4, stride=2, padding=0),
nn.ReLU(),
nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=0),
nn.ReLU(),
nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=0),
nn.ReLU(),
nn.Flatten(),
)
# Compute shape by doing one forward pass
with torch.no_grad():
n_flatten = self.cnn(torch.as_tensor(observation_space.sample()[None]).float()).shape[1]
print("POST CONV FEATURES = ", n_flatten)
# define the hidden layer to translate to a fixed number of features
self.linear = nn.Sequential(nn.Linear(n_flatten, features_dim), nn.ReLU())
def forward(self, observations: torch.Tensor) -> torch.Tensor:
return self.linear(self.cnn(observations))
| 2,166 | Python | 37.696428 | 100 | 0.669898 |
KazWong/omniverse_sample/ov_sample/python_samples/jetbot/jetbot_env.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import torch
import numpy as np
import carb
from pxr import UsdGeom, Gf
import os
import time
import atexit
import asyncio
import numpy as np
import random
import collections
import matplotlib.pyplot as plt
import omni
from omni.isaac.synthetic_utils import visualization as vis
from omni.isaac.python_app import OmniKitHelper
from omni.isaac.synthetic_utils import SyntheticDataHelper
from jetbot import Jetbot
from road_environment import Environment
import gym
from gym import spaces
class JetbotEnv:
metadata = {"render.modes": ["human"]}
def __init__(
self, omni_kit, z_height=0, max_resets=10, updates_per_step=3, steps_per_rollout=1000, mirror_mode=False
):
self.MIRROR_MODE = mirror_mode
self.action_space = spaces.Box(low=0, high=2.0, shape=(2,), dtype=np.float32)
# IMPORTANT NOTE! SB3 wraps all image spaces in a transposer.
# it assumes the image outputed is of standard form
self.observation_space = spaces.Box(low=0, high=255, shape=(224, 224, 1), dtype=np.uint8)
self.noise = 0.05
# every time we update the stage, this is how much time will be simulated
self.dt = 1 / 30.0
self.omniverse_kit = omni_kit
self.sd_helper = SyntheticDataHelper()
self.roads = Environment(self.omniverse_kit)
# make environment z up
self.omniverse_kit.set_up_axis(UsdGeom.Tokens.z)
# we are going to train on a randomized loop that fits in a 2x2 tile area.
self.shape = [2, 2]
self.roads.generate_road(self.shape)
self.roads.generate_lights()
# spawn robot
self.jetbot = Jetbot(self.omniverse_kit)
self.initial_loc = self.roads.get_valid_location()
self.jetbot.spawn(Gf.Vec3d(self.initial_loc[0], self.initial_loc[1], 5), 0)
# switch kit camera to jetbot camera
self.jetbot.activate_camera()
# start simulation
self.omniverse_kit.play()
# Step simulation so that objects fall to rest
# wait until all materials are loaded
frame = 0
print("simulating physics...")
while frame < 60 or self.omniverse_kit.is_loading():
self.omniverse_kit.update(self.dt)
frame = frame + 1
print("done after frame: ", frame)
self.initialized = False
self.numsteps = 0
self.numresets = 0
self.maxresets = max_resets
self.updates_per_step = updates_per_step
self.steps_per_rollout = steps_per_rollout
self.hist_length = collections.deque([0.0] * 10, maxlen=10)
self.hist_forward_vel = collections.deque([0.0] * 10, maxlen=10)
self.hist_ang_vel = collections.deque([0.0] * 30, maxlen=30)
self.avg_forward_vel = 0
self.dist_traveled = 0
self.total_reward = 0
# Randomly mirror horizontally
self.update_mirror_mode()
def update_mirror_mode(self):
# Mirror if mode is enabled and we randomly sample True
self.mirror_mode = self.MIRROR_MODE & random.choice([False, True])
def calculate_reward(self):
# distance to nearest point on path in units of block. [0,1]
dist = self.roads.distance_to_path_in_tiles(self.current_pose)
self.dist = dist
dist_reward = np.exp(-dist ** 2 / 0.15 ** 2)
reward = self.current_forward_velocity * dist_reward
# if we are driving backwards, large negative reward
# if self.current_forward_velocity < 0:
# reward = self.current_forward_velocity
# THIS IS FOR DEBUGGING ONLY
if self.numsteps % 10 == 0 or reward < 0:
print(
"{:.3f} {:.3f} {:.3f} {:.3f} {:.3f} {:.3f}".format(
reward,
self.current_forward_velocity,
self.avg_forward_vel,
dist_reward,
self.dist_traveled,
self.current_ang_vel,
)
)
self.total_reward += reward
return reward
def is_dead(self):
done = False
# terminate if we leave boundary
if not self.roads.is_inside_path_boundary(self.current_pose):
print("dead not inside boundary", self.numsteps)
done = True
# kill the episode after 500 steps
if self.numsteps > self.steps_per_rollout:
print("dead self.numsteps > self.steps_per_rollout", self.numsteps)
done = True
# terminate if we are driving backwards for too long
if self.avg_forward_vel <= 0 and self.numsteps > 35:
print("dead self.avg_forward_vel <= 1 after 35 steps ", self.avg_forward_vel)
done = True
return done
def transform_action(self, action):
# If mirrored, swap wheel controls
if self.mirror_mode:
action = action[::-1]
return action
def transform_state_image(self, im):
# If enabled, mirror image horizontally
if self.mirror_mode:
return np.flip(im, axis=1)
return im
def step(self, action):
if self.initialized:
self.previous_loc = self.current_loc
transformed_action = self.transform_action(action)
self.jetbot.command(transformed_action)
frame = 0
reward = 0
# every time step is called we actually update the scene by updates_per_step.
while frame < self.updates_per_step:
# render at 1/30, simulate at 1/60, which means 2 substeps per frame
self.omniverse_kit.update(self.dt, 1.0 / 60.0, 2.0)
frame = frame + 1
# compute reward once simulation is complete
obs = self.jetbot.observations()
self.current_pose = obs["pose"]
self.current_speed = np.linalg.norm(np.array(obs["linear_velocity"]))
self.current_forward_velocity = obs["local_linear_velocity"][0]
self.current_ang_vel = obs["angular_velocity"][2]
self.current_loc = self.roads.get_tile_from_pose(self.current_pose)
self.hist_forward_vel.append(self.current_forward_velocity)
self.dist_traveled = self.dist_traveled + self.current_forward_velocity * self.dt
self.hist_ang_vel.append(self.current_ang_vel)
self.avg_forward_vel = sum(self.hist_forward_vel) / len(self.hist_forward_vel)
if not self.initialized:
self.previous_loc = self.roads.get_tile_from_pose(self.current_pose)
reward = self.calculate_reward()
# the synthetic data helper is our way of grabbing the image data we need from the camera. currently the SD helper
# only supports a single camera, however you can use it to access camera data as a cuda tensor directly on the
# device. stable baselines 3 is expecting a numpy array, so we pull the data to the host
# additional sensors that could be of interest and can be added to this list:
# "depth", "instanceSegmentation", "semanticSegmentation"
viewport = omni.kit.viewport.get_default_viewport_window()
gt = self.sd_helper.get_groundtruth(["rgb"], viewport)
# we only need the rgb channels of the rgb image
currentState = gt["rgb"][:, :, :3].astype(np.float)
currentState = self.transform_state_image(currentState)
if not self.initialized:
self.previousState = currentState
img = np.dot(currentState, [0.299, 0.587, 0.114]) # np.concatenate((currentState, self.previousState), axis=2)
img = img.reshape((img.shape[0], img.shape[1], 1))
# the real camera will have noise on each pixel, so we add some uniform noise here to simulate thats
# uncomment below to add noise to image
img = np.clip((255 * self.noise * np.random.randn(224, 224, 1) + img.astype(np.float)), 0, 255).astype(np.uint8)
self.previousState = currentState
self.numsteps += 1
done = self.is_dead()
return img, reward, done, {}
def reset(self):
# Randomly mirror horizontally
self.update_mirror_mode()
# randomize the road configuration every self.maxresets resets.
if self.numresets % self.maxresets == 0:
size = random.randrange(2, 6)
self.shape = [size, size]
self.roads.reset(self.shape)
if not self.initialized:
state, reward, done, info, = self.step([0, 0])
self.initialized = True
# every time we reset, we move the robot to a random location, and pointing along the direction of the road
loc = self.roads.get_valid_location()
# the random angle offset can be increased here
rot = self.roads.get_forward_direction(loc) + random.uniform(-10, 10)
self.jetbot.teleport(
Gf.Vec3d(loc[0] + random.uniform(-2.5, 2.5), loc[1] + random.uniform(-2.5, 2.5), 5), rot, settle=True
)
obs = self.jetbot.observations()
self.current_pose = obs["pose"]
self.current_speed = np.linalg.norm(np.array(obs["linear_velocity"]))
self.current_forward_velocity = obs["local_linear_velocity"][0]
self.current_loc = self.roads.get_tile_from_pose(self.current_pose)
self.previous_loc = self.roads.get_tile_from_pose(self.current_pose)
self.dist = self.roads.distance_to_path_in_tiles(self.current_pose)
# wait for loading
if self.numresets % self.maxresets == 0:
while self.omniverse_kit.is_loading():
self.omniverse_kit.update(self.dt)
viewport = omni.kit.viewport.get_default_viewport_window()
gt = self.sd_helper.get_groundtruth(["rgb"], viewport)
currentState = gt["rgb"][:, :, :3]
currentState = self.transform_state_image(currentState)
img = np.dot(
currentState.astype(np.float), [0.299, 0.587, 0.114]
) # np.concatenate((currentState, currentState), axis=2)
img = img.reshape((img.shape[0], img.shape[1], 1))
# uncomment below to add noise to image
img = np.clip((255 * self.noise * np.random.randn(224, 224, 1) + img.astype(np.float)), 0, 255).astype(np.uint8)
print(
"reset ",
sum(self.hist_length) / len(self.hist_length),
self.numresets,
self.dist_traveled,
self.avg_forward_vel,
self.total_reward,
)
self.numsteps = 0
self.previousState = currentState
self.numresets += 1
self.total_reward = 0
self.dist_traveled = 0
return img
| 11,046 | Python | 37.093103 | 123 | 0.62457 |
KazWong/omniverse_sample/ov_sample/python_samples/jetbot/road_environment.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import carb
import omni
import numpy as np
from pxr import UsdGeom, Gf, Sdf, UsdPhysics
from jetbot_city.road_map import *
from jetbot_city.road_map_path_helper import *
from jetbot_city.road_map_generator import *
from omni.isaac.synthetic_utils import DomainRandomization
import math
class Environment:
def __init__(self, omni_kit, z_height=0):
from omni.isaac.utils.scripts.nucleus_utils import find_nucleus_server
self.omni_kit = omni_kit
result, nucleus_server = find_nucleus_server()
if result is False:
carb.log_error(
"Could not find nucleus server with /Isaac folder. Please specify the correct nucleus server in apps/omni.isaac.sim.python.kit"
)
return
result, nucleus_server = find_nucleus_server("/Library/Props/Road_Tiles/Parts/")
if result is False:
carb.log_error(
"Could not find nucleus server with /Library/Props/Road_Tiles/Parts/ folder. Please refer to the documentation to aquire the road tile assets"
)
return
# 1=I 2=L 3=T, 4=X
self.tile_usd = {
0: None,
1: {"asset": nucleus_server + "/Library/Props/Road_Tiles/Parts/p4336p01.usd", "offset": 180},
2: {"asset": nucleus_server + "/Library/Props/Road_Tiles/Parts/p4342p01.usd", "offset": 180},
3: {"asset": nucleus_server + "/Library/Props/Road_Tiles/Parts/p4341p01.usd", "offset": 180},
4: {"asset": nucleus_server + "/Library/Props/Road_Tiles/Parts/p4343p01.usd", "offset": 180},
} # list of tiles that can be spawned
self.texture_list = [
nucleus_server + "/Isaac/Samples/DR/Materials/Textures/checkered.png",
nucleus_server + "/Isaac/Samples/DR/Materials/Textures/marble_tile.png",
nucleus_server + "/Isaac/Samples/DR/Materials/Textures/picture_a.png",
nucleus_server + "/Isaac/Samples/DR/Materials/Textures/picture_b.png",
nucleus_server + "/Isaac/Samples/DR/Materials/Textures/textured_wall.png",
nucleus_server + "/Isaac/Samples/DR/Materials/Textures/checkered_color.png",
]
self.tile_size = [25.0, 25.0]
# 1=UP, 2 = DOWN, 3 = LEFT, 4= RIGHT
self.direction_map = {1: 180, 2: 0, 3: -90, 4: 90}
self.prims = [] # list of spawned tiles
self.height = z_height # height of the ground tiles
self.tiles = None
self.state = None
# because the ground plane is what the robot drives on, we only do this once. We can then re-generate the road as often as we need without impacting physics
self.setup_physics()
self.road_map = None
self.road_path_helper = None
self.map_generator = LoopRoadMapGenerator()
contents = omni.client.list(nucleus_server + "/Isaac/Props/Sortbot_Housing/Materials/Textures/")[1]
for entry in contents:
self.texture_list.append(
nucleus_server + "/Isaac/Props/Sortbot_Housing/Materials/Textures/" + entry.relative_path
)
contents = omni.client.list(nucleus_server + "/Isaac/Props/YCB/Axis_Aligned/")[1]
names = []
loaded_paths = []
for entry in contents:
if not entry.flags & omni.client.ItemFlags.CAN_HAVE_CHILDREN:
names.append(nucleus_server + "/Isaac/Props/YCB/Axis_Aligned/" + entry.relative_path)
loaded_paths.append("/DR/mesh_component/mesh_" + entry.relative_path[0:-4])
print(loaded_paths)
self.omni_kit.create_prim("/World/Floor", "Xform")
stage = omni.usd.get_context().get_stage()
cubeGeom = UsdGeom.Cube.Define(stage, "/World/Floor/thefloor")
cubeGeom.CreateSizeAttr(300)
offset = Gf.Vec3f(75, 75, -150.1)
cubeGeom.AddTranslateOp().Set(offset)
# Create a sphere room so the world is not black
self.omni_kit.create_prim("/World/Room", "Sphere", attributes={"radius": 1e3})
prims = []
self.dr = DomainRandomization()
self.dr.toggle_manual_mode()
self.dr.create_mesh_comp(prim_paths=prims, mesh_list=names, mesh_range=[1, 1])
self.omni_kit.update(1 / 60.0)
print("waiting for materials to load...")
while self.omni_kit.is_loading():
self.omni_kit.update(1 / 60.0)
lights = []
for i in range(5):
prim_path = "/World/Lights/light_" + str(i)
self.omni_kit.create_prim(
prim_path,
"SphereLight",
translation=(0, 0, 200),
rotation=(0, 0, 0),
attributes={"radius": 10, "intensity": 1000.0, "color": (1.0, 1.0, 1.0)},
)
lights.append(prim_path)
frames = 1
# enable randomization for environment
self.dr.create_movement_comp(prim_paths=loaded_paths, min_range=(0, 0, 15), max_range=(150, 150, 15))
self.dr.create_rotation_comp(prim_paths=loaded_paths)
self.dr.create_visibility_comp(prim_paths=loaded_paths, num_visible_range=(15, 15))
self.dr.create_light_comp(light_paths=lights)
self.dr.create_movement_comp(prim_paths=lights, min_range=(0, 0, 30), max_range=(150, 150, 30))
self.dr.create_texture_comp(
prim_paths=["/World/Floor"], enable_project_uvw=True, texture_list=self.texture_list
)
self.dr.create_color_comp(prim_paths=["/World/Room"])
def generate_lights(self):
prim_path = omni.usd.get_stage_next_free_path(self.omni_kit.get_stage(), "/World/Env/Light", False)
self.prims.append(prim_path)
self.omni_kit.create_prim(
prim_path,
"RectLight",
translation=(75, 75, 100),
rotation=(0, 0, 0),
attributes={"height": 150, "width": 150, "intensity": 2000.0, "color": (1.0, 1.0, 1.0)},
)
def reset(self, shape):
# print(self.prims)
# cmd = omni.kit.builtin.init.DeletePrimsCommand(self.prims)
# cmd.do()
stage = omni.usd.get_context().get_stage()
for layer in stage.GetLayerStack():
edit = Sdf.BatchNamespaceEdit()
for path in self.prims:
prim_spec = layer.GetPrimAtPath(path)
if prim_spec is None:
continue
parent_spec = prim_spec.realNameParent
if parent_spec is not None:
edit.Add(path, Sdf.Path.emptyPath)
layer.Apply(edit)
self.prims = []
self.generate_road(shape)
self.dr.randomize_once()
def generate_road(self, shape):
self.tiles, self.state, self.road_map = self.map_generator.generate(shape)
tiles = self.tiles
state = self.state
self.road_path_helper = RoadMapPathHelper(self.road_map)
if tiles.shape != state.shape:
print("tiles and state sizes don't match")
return
stage = self.omni_kit.get_stage()
rows, cols = tiles.shape
self.valid_tiles = []
for x in range(0, rows):
for y in range(0, cols):
if tiles[x, y] != 0:
pos_x = x * self.tile_size[0] + 12.5
pos_y = y * self.tile_size[1] + 12.5
self.create_tile(
stage,
self.tile_usd[tiles[x, y]]["asset"],
Gf.Vec3d(pos_x, pos_y, self.height),
self.direction_map[state[x, y]] + self.tile_usd[tiles[x, y]]["offset"],
)
for x in range(0, rows):
for y in range(0, cols):
# print(paths[x,y])
if tiles[x, y] != 0:
self.valid_tiles.append([x, y])
def generate_road_from_numpy(self, tiles, state):
self.tiles = tiles
self.state = state
self.road_map = RoadMap.create_from_numpy(self.tiles, self.state)
self.road_path_helper = RoadMapPathHelper(self.road_map)
if tiles.shape != state.shape:
print("tiles and state sizes don't match")
return
stage = self.omni_kit.get_stage()
rows, cols = tiles.shape
self.valid_tiles = []
for x in range(0, rows):
for y in range(0, cols):
if tiles[x, y] != 0:
pos_x = x * self.tile_size[0] + 12.5
pos_y = y * self.tile_size[1] + 12.5
self.create_tile(
stage,
self.tile_usd[tiles[x, y]]["asset"],
Gf.Vec3d(pos_x, pos_y, self.height),
self.direction_map[state[x, y]] + self.tile_usd[tiles[x, y]]["offset"],
)
for x in range(0, rows):
for y in range(0, cols):
# print(paths[x,y])
if tiles[x, y] != 0:
self.valid_tiles.append([x, y])
def create_tile(self, stage, path, location, rotation):
prefix = "/World/Env/Tiles/Tile"
prim_path = omni.usd.get_stage_next_free_path(stage, prefix, False)
self.prims.append(prim_path)
tile_prim = stage.DefinePrim(prim_path, "Xform")
tile_prim.GetReferences().AddReference(path)
xform = UsdGeom.Xformable(tile_prim)
xform_op = xform.AddXformOp(UsdGeom.XformOp.TypeTransform, UsdGeom.XformOp.PrecisionDouble, "")
mat = Gf.Matrix4d().SetTranslate(location)
mat.SetRotateOnly(Gf.Rotation(Gf.Vec3d(0, 0, 1), rotation))
xform_op.Set(mat)
def setup_physics(self):
from pxr import PhysxSchema, PhysicsSchemaTools
stage = self.omni_kit.get_stage()
# Add physics scene
scene = UsdPhysics.Scene.Define(stage, Sdf.Path("/World/Env/PhysicsScene"))
# Set gravity vector
scene.CreateGravityDirectionAttr().Set(Gf.Vec3f(0.0, 0.0, -1.0))
scene.CreateGravityMagnitudeAttr().Set(981.0)
# Set physics scene to use cpu physics
PhysxSchema.PhysxSceneAPI.Apply(stage.GetPrimAtPath("/World/Env/PhysicsScene"))
physxSceneAPI = PhysxSchema.PhysxSceneAPI.Get(stage, "/World/Env/PhysicsScene")
physxSceneAPI.CreateEnableCCDAttr(True)
physxSceneAPI.CreateEnableStabilizationAttr(True)
physxSceneAPI.CreateEnableGPUDynamicsAttr(False)
physxSceneAPI.CreateBroadphaseTypeAttr("MBP")
physxSceneAPI.CreateSolverTypeAttr("TGS")
# Create physics plane for the ground
PhysicsSchemaTools.addGroundPlane(
stage, "/World/Env/GroundPlane", "Z", 100.0, Gf.Vec3f(0, 0, self.height), Gf.Vec3f(1.0)
)
# Hide the visual geometry
imageable = UsdGeom.Imageable(stage.GetPrimAtPath("/World/Env/GroundPlane/geom"))
if imageable:
imageable.MakeInvisible()
def get_valid_location(self):
if self.tiles is None:
print("cannot provide valid location until road is generated")
return (0, 0)
i = np.random.choice(len(self.valid_tiles), 1)[0]
dist, point = self.road_path_helper.distance_to_path(self.valid_tiles[i])
x, y = point
print("get valid location called", self.valid_tiles[i], point)
return (x * self.tile_size[0], y * self.tile_size[1])
# Computes an approximate forward vector based on the current spawn point and nearby valid path point
def get_forward_direction(self, loc):
if self.road_path_helper is not None:
k = 100
dists, pts = self.road_path_helper.get_k_nearest_path_points(np.array([self.get_tile_from_pose(loc)]), k)
pointa = pts[0][0]
pointb = pts[0][k - 1]
if random.choice([False, True]):
pointa, pointb = pointb, pointa
return math.degrees(math.atan2(pointb[1] - pointa[1], pointb[0] - pointa[0]))
# Compute the x,y tile location from the robot pose
def get_tile_from_pose(self, pose):
return (pose[0] / self.tile_size[0], pose[1] / self.tile_size[1])
def distance_to_path(self, robot_pose):
if self.road_path_helper is not None:
distance, point = self.road_path_helper.distance_to_path(self.get_tile_from_pose(robot_pose))
return distance * self.tile_size[0]
def distance_to_path_in_tiles(self, robot_pose):
if self.road_path_helper is not None:
distance, point = self.road_path_helper.distance_to_path(self.get_tile_from_pose(robot_pose))
return distance
def distance_to_boundary(self, robot_pose):
if self.road_path_helper is not None:
distance = self.road_path_helper.distance_to_boundary(self.get_tile_from_pose(robot_pose))
return distance * self.tile_size[0]
def is_inside_path_boundary(self, robot_pose):
if self.road_path_helper is not None:
return self.road_path_helper.is_inside_path_boundary(self.get_tile_from_pose(robot_pose))
| 13,501 | Python | 42.13738 | 164 | 0.595289 |
KazWong/omniverse_sample/ov_sample/python_samples/jetbot/jetbot_city/road_map.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import enum
import random
from collections import deque
import numpy as np
import os
import io
import cv2
import PIL.Image
import pickle
from typing import List, Set, Dict, Tuple, Optional
from .priority_queue import *
DEFAULT_IMAGE_SIZE = (32, 32)
def mask_L(size=256, thickness=1):
mask = np.zeros((size, size), dtype=np.uint8)
cv2.circle(mask, (size, 0), size // 2, (255, 255, 255), thickness)
return PIL.Image.fromarray(mask)
def mask_I(size=256, thickness=1):
mask = np.zeros((size, size), dtype=np.uint8)
cv2.line(mask, (size // 2, 0), (size // 2, size), (255, 255, 255), thickness, cv2.LINE_4)
return PIL.Image.fromarray(mask)
def mask_T(size=256, thickness=1):
mask = np.zeros((size, size), dtype=np.uint8)
mask = np.maximum(mask, cv2.circle(mask, (0, size), size // 2, (255, 255, 255), thickness))
mask = np.maximum(mask, cv2.circle(mask, (size, size), size // 2, (255, 255, 255), thickness))
mask = np.maximum(mask, cv2.line(mask, (0, size // 2), (size, size // 2), (255, 255, 255), thickness, cv2.LINE_4))
return PIL.Image.fromarray(mask)
def mask_X(size=256, thickness=1):
mask = mask_L(size, thickness)
mask = np.maximum(mask, mask_I(size, thickness))
for i in range(4):
mask = np.maximum(mask, cv2.rotate(mask, cv2.ROTATE_90_CLOCKWISE))
return PIL.Image.fromarray(mask)
_I_IMAGE = mask_I()
_L_IMAGE = mask_L()
_T_IMAGE = mask_T()
_X_IMAGE = mask_X()
class RoadBlockType(enum.IntEnum):
EMPTY = 0
I = 1
L = 2
T = 3
X = 4
def ports(self):
if self == RoadBlockType.I:
return [0, 1, 0, 1] # left, top, right, bottom
elif self == RoadBlockType.L:
return [0, 1, 1, 0]
elif self == RoadBlockType.T:
return [1, 0, 1, 1]
elif self == RoadBlockType.X:
return [1, 1, 1, 1]
else:
return [0, 0, 0, 0]
def image(self, size=DEFAULT_IMAGE_SIZE):
if self == RoadBlockType.I:
return _I_IMAGE.resize(size)
elif self == RoadBlockType.L:
return _L_IMAGE.resize(size)
elif self == RoadBlockType.T:
return _T_IMAGE.resize(size)
elif self == RoadBlockType.X:
return _X_IMAGE.resize(size)
else:
return PIL.Image.fromarray(np.zeros(size + (3,), dtype=np.uint8))
def paths_mask(self, size=DEFAULT_IMAGE_SIZE, thickness=1):
if self == RoadBlockType.I:
return mask_I(size[0], thickness)
elif self == RoadBlockType.L:
return mask_L(size[0], thickness)
elif self == RoadBlockType.T:
return mask_T(size[0], thickness)
elif self == RoadBlockType.X:
return mask_X(size[0], thickness)
else:
return PIL.Image.fromarray(np.zeros(size, dtype=np.uint8))
class RoadBlockState(enum.IntEnum):
HIDDEN = 0
UP = 1 # 0
DOWN = 2 # 180
LEFT = 3 # CCW 90
RIGHT = 4 # CW 90
@staticmethod
def random():
return RoadBlockState(np.random.randint(len(RoadBlockState)))
class RoadBlock(object):
def __init__(self, type: RoadBlockType, state: RoadBlockState):
self.type = type
self.state = state
def __iter__(self):
yield self.type
yield self.state
def ports(self):
if self.state == RoadBlockState.HIDDEN:
return [0, 0, 0, 0]
elif self.state == RoadBlockState.UP:
return self.type.ports()
elif self.state == RoadBlockState.DOWN:
return list(np.roll(self.type.ports(), 2))
elif self.state == RoadBlockState.LEFT:
return list(np.roll(self.type.ports(), -1))
else:
return list(np.roll(self.type.ports(), 1))
def has_left_port(self):
return self.ports()[0]
def has_right_port(self):
return self.ports()[2]
def has_top_port(self):
return self.ports()[1]
def has_bottom_port(self):
return self.ports()[3]
def image(self, size=DEFAULT_IMAGE_SIZE):
# if self.state == RoadBlockState.HIDDEN or self.type == RoadBlockType.EMPTY:
# return PIL.Image.fromarray(np.zeros(size + (3,), dtype=np.uint8))
image = self.type.image(size=size)
if self.state == RoadBlockState.LEFT:
image = image.rotate(90)
elif self.state == RoadBlockState.RIGHT:
image = image.rotate(-90)
elif self.state == RoadBlockState.DOWN:
image = image.rotate(180)
return image
def paths_mask(self, size=DEFAULT_IMAGE_SIZE, thickness=1):
# if self.state == RoadBlockState.HIDDEN or self.type == RoadBlockType.EMPTY:
# return PIL.Image.fromarray(np.zeros(size, dtype=np.uint8))
image = self.type.paths_mask(size=size, thickness=thickness)
if self.state == RoadBlockState.LEFT:
image = image.rotate(90)
elif self.state == RoadBlockState.RIGHT:
image = image.rotate(-90)
elif self.state == RoadBlockState.DOWN:
image = image.rotate(180)
return image
def l1_distance(a, b):
return abs(a[0] - b[0]) + abs(a[1] - b[1])
class RoadLocation(object):
def __init__(self, i, j):
self.i = i
self.j = j
def __iter__(self):
yield self.i
yield self.j
class RoadMap(object):
def __init__(self, grid: List[List[RoadBlock]]):
self.grid = grid
@staticmethod
def create_random_from_types(types: List[RoadBlockType], NI, NJ):
grid = []
for i in range(NI):
row = []
for j in range(NJ):
row.append(RoadBlock(RoadBlockType.EMPTY, RoadBlockState.random()))
grid.append(row)
# construct positions
locations = []
for i in range(NI):
for j in range(NJ):
locations.append(RoadLocation(i, j))
np.random.shuffle(locations)
locations = locations[0 : len(types)]
for i, loc in enumerate(locations):
grid[loc.i][loc.j] = RoadBlock(types[i], RoadBlockState.random())
return RoadMap(grid)
@staticmethod
def create_from_numpy(types, states):
grid = []
for i in range(types.shape[0]):
row = []
for j in range(types.shape[1]):
row.append(RoadBlock(RoadBlockType(types[i, j]), RoadBlockState(states[i, j])))
grid.append(row)
return RoadMap(grid)
@property
def NI(self):
return len(self.grid)
@property
def NJ(self):
return len(self.grid[0])
def numpy(self):
types = []
states = []
for i in range(self.NI):
types_i = []
states_i = []
for j in range(self.NJ):
types_i.append(int(self.grid[i][j].type))
states_i.append(int(self.grid[i][j].state))
types.append(types_i)
states.append(states_i)
return np.array(types), np.array(states)
def _children(self, i, j):
block = self.grid[i][j]
children = []
if i > 0:
top = self.grid[i - 1][j]
if top.has_bottom_port() and block.has_top_port():
children.append((i - 1, j))
if i < self.NI - 1:
bottom = self.grid[i + 1][j]
if bottom.has_top_port() and block.has_bottom_port():
children.append((i + 1, j))
if j > 0:
left = self.grid[i][j - 1]
if left.has_right_port() and block.has_left_port():
children.append((i, j - 1))
if j < self.NJ - 1:
right = self.grid[i][j + 1]
if right.has_left_port() and block.has_right_port():
children.append((i, j + 1))
return children
def _search_path(self, i, j, visited):
q = deque()
q.append((i, j))
path = []
while q:
i, j = q.popleft()
path.append((i, j))
for child in self._children(i, j):
if not visited[child[0], child[1]]:
q.append(child)
visited[child[0], child[1]] = True
return path
def find_shortest_path(self, a, b):
visited = np.zeros((self.NI, self.NJ), dtype=np.bool)
q = PriorityQueue()
q.push((l1_distance(a, b), [a]))
visited[a[0], a[1]] = 1
while not q.empty():
cost, path = q.pop()
tail = path[-1]
if tail[0] == b[0] and tail[1] == b[1]:
return path
for child in self._children(tail[0], tail[1]):
if not visited[child[0], child[1]]:
child_path = path + [child]
child_cost = len(child_path) + l1_distance(child, b)
q.push((child_cost, child_path))
visited[child[0], child[1]] = 1
return None
def paths(self):
visited = np.zeros((self.NI, self.NJ), dtype=np.bool)
# set blocks that cannot be path components as visited
for i in range(self.NI):
for j in range(self.NJ):
block = self.grid[i][j]
if block.state == RoadBlockState.HIDDEN or block.type == RoadBlockType.EMPTY:
visited[i, j] = True
paths = []
for i in range(self.NI):
for j in range(self.NJ):
if not visited[i, j]:
visited[i, j] = True
path = self._search_path(i, j, visited)
paths.append(path)
return paths
def num_open_ports(self):
num_open = 0
for i in range(self.NJ):
for j in range(self.NI):
num_open += np.count_nonzero(self.grid[i][j].ports()) - len(self._children(i, j))
return num_open
def num_ports(self):
num_ports = 0
for i in range(self.NJ):
for j in range(self.NI):
num_ports += np.count_nonzero(self.grid[i][j].ports()) # - len(self._children(i, j))
return num_ports
def num_closed_ports(self):
num_ports = 0
for i in range(self.NJ):
for j in range(self.NI):
num_ports += len(self._children(i, j))
return num_ports
def image(self, block_size=DEFAULT_IMAGE_SIZE):
si = block_size[0]
sj = block_size[1]
image = np.zeros((si * self.NI, sj * self.NJ, 3), dtype=np.uint8)
for i in range(self.NJ):
for j in range(self.NI):
image[i * si : i * si + si, j * sj : j * sj + sj] = np.array(self.grid[i][j].image(size=block_size))
return PIL.Image.fromarray(image)
def paths_mask(self, block_size=DEFAULT_IMAGE_SIZE, thickness=1):
si = block_size[0]
sj = block_size[1]
image = np.zeros((si * self.NI, sj * self.NJ), dtype=np.uint8)
for i in range(self.NJ):
for j in range(self.NI):
image[i * si : i * si + si, j * sj : j * sj + sj] = np.array(
self.grid[i][j].paths_mask(size=block_size, thickness=thickness)
)
return PIL.Image.fromarray(image)
def obs(self):
obs = np.zeros((4, self.NI, self.NJ), dtype=np.float32)
for i in range(self.NI):
for j in range(self.NJ):
obs[0, i, j] = self.grid[i][j].has_left_port()
obs[1, i, j] = self.grid[i][j].has_top_port()
obs[2, i, j] = self.grid[i][j].has_right_port()
obs[3, i, j] = self.grid[i][j].has_bottom_port()
return obs
def swap_(self, a, b):
tmp = self.grid[a[0]][a[1]]
self.grid[a[0]][a[1]] = self.grid[b[0]][b[1]]
self.grid[b[0]][b[1]] = tmp
def render(self, widget):
# Render the environment to the screen
imgByteArr = io.BytesIO()
self.image(block_size=(64, 64)).save(imgByteArr, format="PNG")
imgByteArr = imgByteArr.getvalue()
widget.value = imgByteArr
def save(self, f):
types, states = self.numpy()
data = {"types": types, "states": states}
if isinstance(f, str):
with open(f, "wb") as f:
pickle.dump(data, f)
else:
pickle.dump(data, f)
@staticmethod
def load(f):
if isinstance(f, str):
with open(f, "rb") as f:
data = pickle.load(f)
else:
data = pickle.load(f)
return RoadMap.create_from_numpy(data["types"], data["states"])
def ports(self):
ports = np.zeros((self.NI, self.NJ, 4), np.bool)
for i in range(self.NI):
for j in range(self.NJ):
ports[i, j, 0] = self.grid[i][j].has_left_port()
ports[i, j, 1] = self.grid[i][j].has_top_port()
ports[i, j, 2] = self.grid[i][j].has_right_port()
ports[i, j, 3] = self.grid[i][j].has_bottom_port()
return ports
@staticmethod
def create_from_ports(ports):
NI = ports.shape[0]
NJ = ports.shape[1]
types = np.zeros(ports.shape[0:2], dtype=np.int64)
states = np.zeros(ports.shape[0:2], dtype=np.int64)
for i in range(NI):
for j in range(NJ):
pij = ports[i, j]
for typ in RoadBlockType:
if (np.roll(typ.ports(), 0) == pij).all():
types[i, j] = typ
states[i, j] = RoadBlockState.UP
break
elif (np.roll(typ.ports(), 1) == pij).all():
types[i, j] = typ
states[i, j] = RoadBlockState.RIGHT
break
elif (np.roll(typ.ports(), 2) == pij).all():
types[i, j] = typ
states[i, j] = RoadBlockState.DOWN
break
elif (np.roll(typ.ports(), 3) == pij).all():
types[i, j] = typ
states[i, j] = RoadBlockState.LEFT
break
return RoadMap.create_from_numpy(types, states)
| 14,731 | Python | 31.449339 | 118 | 0.530242 |
KazWong/omniverse_sample/ov_sample/python_samples/jetbot/jetbot_city/road_map_generator.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from .road_map import *
from .priority_queue import *
def children(occupancy, point):
NI = occupancy.shape[0]
NJ = occupancy.shape[1]
children = []
if point[0] > 0:
pt = [point[0] - 1, point[1]]
children.append(pt)
if point[0] < NI - 1:
pt = [point[0] + 1, point[1]]
children.append(pt)
if point[1] > 0:
pt = [point[0], point[1] - 1]
children.append(pt)
if point[1] < NJ - 1:
pt = [point[0], point[1] + 1]
children.append(pt)
return children
def l1_distance(a, b):
return abs(a[0] - b[0]) + abs(a[1] - b[1])
def find_path(occupancy, point_a, point_b):
visited = np.copy(occupancy)
visited[point_a[0], point_b[0]] = 1
q = PriorityQueue() # cost heuristic, path...
for child in children(visited, point_a):
if not visited[child[0], child[1]]:
q.push((1 + l1_distance(child, point_b), [child]))
visited[child[0], child[1]] = 1
while not q.empty():
cost, path = q.pop()
tail = path[-1]
for child in children(visited, tail):
if child[0] == point_b[0] and child[1] == point_b[1]:
return path
elif not visited[child[0], child[1]]:
child_cost = len(path) + l1_distance(child, point_b)
child_path = path + [child]
q.push((child_cost, child_path))
visited[child[0], child[1]] = 1
return None
def add_port(ports, a, b):
# port order: left,top,right,bottom
if b[1] > a[1]:
# b to right of a
ports[a[0], a[1], 2] = 1
ports[b[0], b[1], 0] = 1
elif b[1] < a[1]:
# b to left of a
ports[a[0], a[1], 0] = 1
ports[b[0], b[1], 2] = 1
elif b[0] > a[0]:
# b above a
ports[a[0], a[1], 3] = 1
ports[b[0], b[1], 1] = 1
elif b[0] < a[0]:
# b below a
ports[a[0], a[1], 1] = 1
ports[b[0], b[1], 3] = 1
def ports_to_types_states(ports):
NI = ports.shape[0]
NJ = ports.shape[1]
types = np.zeros(ports.shape[0:2], dtype=np.int64)
states = np.zeros(ports.shape[0:2], dtype=np.int64)
for i in range(NI):
for j in range(NJ):
pij = ports[i, j]
for typ in RoadBlockType:
if (np.roll(typ.ports(), 0) == pij).all():
types[i, j] = typ
states[i, j] = RoadBlockState.UP
break
elif (np.roll(typ.ports(), 1) == pij).all():
types[i, j] = typ
states[i, j] = RoadBlockState.RIGHT
break
elif (np.roll(typ.ports(), 2) == pij).all():
types[i, j] = typ
states[i, j] = RoadBlockState.DOWN
break
elif (np.roll(typ.ports(), 3) == pij).all():
types[i, j] = typ
states[i, j] = RoadBlockState.LEFT
break
return types, states
class RoadMapGenerator(object):
def generate(self, shape):
raise NotImplementedError
class LoopRoadMapGenerator(RoadMapGenerator):
def generate(self, shape):
GRID_SIZE = shape
ports = np.zeros((GRID_SIZE[0], GRID_SIZE[1], 4), np.bool)
occupancy = np.zeros(GRID_SIZE, np.uint8)
start = (np.random.randint(GRID_SIZE[0]), np.random.randint(GRID_SIZE[1]))
path = []
path.append(start)
occupancy[start[0], start[1]] = 1
runner = start
while True:
# get valid children
valid_children = []
for child in children(occupancy, runner):
if not occupancy[child[0], child[1]]:
child_occupancy = np.copy(occupancy)
child_occupancy[child[0], child[1]] = 1
child_path = find_path(child_occupancy, child, start)
if child_path is not None:
valid_children.append(child)
# exit if no valid child paths
if len(valid_children) == 0:
break
# navigate to random child
idx = np.random.randint(len(valid_children))
runner = valid_children[idx]
path.append(runner)
occupancy[runner[0], runner[1]] = 1
path = path + find_path(occupancy, runner, start) + [start]
for i in range(len(path) - 1):
add_port(ports, path[i], path[i + 1])
types, states = ports_to_types_states(ports)
road_map = RoadMap.create_from_numpy(types, states)
return types, states, road_map
| 5,124 | Python | 29.505952 | 82 | 0.523224 |
KazWong/omniverse_sample/ov_sample/python_samples/jetbot/jetbot_city/priority_queue.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
class PriorityQueue(object):
def __init__(self):
self.items = []
def push(self, item):
a = 0
b = len(self.items) - 1
while a <= b:
c = a + (b - a) // 2
if self.items[c][0] < item[0]: # 0, 1 (0),
a = c + 1
elif self.items[c][0] > item[0]:
b = c - 1
else:
break
if a >= len(self.items):
idx = len(self.items)
elif b < 0:
idx = 0
else:
idx = a + (b - a) // 2
self.items.insert(idx, item)
def pop(self):
return self.items.pop(0)
def empty(self):
return len(self.items) == 0
| 1,137 | Python | 27.449999 | 76 | 0.550572 |
KazWong/omniverse_sample/ov_sample/python_samples/jetbot/jetbot_city/road_map_path_helper.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from .road_map import *
from scipy.spatial import KDTree
import cv2
import matplotlib.pyplot as plt
import numpy as np
class RoadMapPathHelper(object):
def __init__(self, road_map, block_resolution=128, path_thickness_ratio=19 / 32):
self._road_map = road_map
self._block_resolution = block_resolution
self._map_path_mask = np.array(self._road_map.paths_mask((block_resolution, block_resolution), thickness=1))
self._map_boundary_mask = np.array(
self._road_map.paths_mask(
(block_resolution, block_resolution), thickness=int(block_resolution * path_thickness_ratio)
)
)
mask_pts = np.transpose(np.nonzero(self._map_path_mask))
mask_pts = mask_pts / block_resolution # get points in grid coordinates
self._path_kdtree = KDTree(mask_pts)
boundary_points = np.transpose(np.nonzero(cv2.Laplacian(self._map_boundary_mask, cv2.CV_32F)))
boundary_points = boundary_points / block_resolution
self._boundary_kdtree = KDTree(boundary_points)
# print("boundary points shape! ", boundary_points.shape)
# plt.imshow(self._map_boundary_mask)
# plt.show()
def get_k_nearest_path_points(self, points, k=1):
dists, indices = self._path_kdtree.query(points, k=k)
return dists, self._path_kdtree.data[indices]
def distance_to_path(self, point):
dists, pts = self.get_k_nearest_path_points(np.array([point]))
return (float(dists[0]), pts[0])
def get_k_nearest_boundary_points(self, points, k=1):
dists, indices = self._boundary_kdtree.query(points, k=k)
return dists, self._boundary_kdtree.data[indices]
def distance_to_boundary(self, point):
dists, pts = self.get_k_nearest_boundary_points(np.array([point]))
return float(dists[0])
def is_inside_path_boundary(self, point):
return (
self._map_boundary_mask[int(point[0] * self._block_resolution), int(point[1] * self._block_resolution)] > 0
)
| 2,485 | Python | 41.862068 | 119 | 0.675654 |
KazWong/omniverse_sample/ov_sample/python_samples/ros/clock.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
import time
import carb
from omni.isaac.python_app import OmniKitHelper
CONFIG = {
"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit',
"renderer": "RayTracedLighting",
"headless": True,
}
if __name__ == "__main__":
# Example ROS bridge sample showing rospy and rosclock interaction
kit = OmniKitHelper(config=CONFIG)
import omni
# enable ROS bridge extension
ext_manager = omni.kit.app.get_app().get_extension_manager()
ext_manager.set_extension_enabled_immediate("omni.isaac.ros_bridge", True)
# check if rosmaster node is running
# this is to prevent this sample from waiting indefinetly if roscore is not running
# can be removed in regular usage
kit.update()
result, check = omni.kit.commands.execute("RosBridgeRosMasterCheck")
if not check:
carb.log_error("Please run roscore before executing this script")
kit.stop()
kit.shutdown()
exit()
# Note that this is not the system level rospy, but one compiled for omniverse
from rosgraph_msgs.msg import Clock
import rospy
# create a clock using sim time
result, prim = omni.kit.commands.execute(
"ROSBridgeCreateClock", path="/ROS_Clock_Sim", clock_topic="/sim_time", sim_time=True
)
# create a clock using system time
result, prim = omni.kit.commands.execute(
"ROSBridgeCreateClock", path="/ROS_Clock_System", clock_topic="/system_time", sim_time=False
)
# create a clock which we will publish manually, set enabled to false to make it manually controlled
result, prim = omni.kit.commands.execute(
"ROSBridgeCreateClock", path="/ROS_Clock_Manual", clock_topic="/manual_time", sim_time=True, enabled=False
)
kit.update()
kit.update()
# Define ROS callbacks
def sim_clock_callback(data):
print("sim time:", data.clock.to_sec())
def system_clock_callback(data):
print("system time:", data.clock.to_sec())
def manual_clock_callback(data):
print("manual stepped sim time:", data.clock.to_sec())
# Create rospy ndoe
rospy.init_node("isaac_sim_test_gripper", anonymous=True, disable_signals=True, log_level=rospy.ERROR)
# create subscribers
sim_clock_sub = rospy.Subscriber("sim_time", Clock, sim_clock_callback)
system_clock_sub = rospy.Subscriber("system_time", Clock, system_clock_callback)
manual_clock_sub = rospy.Subscriber("manual_time", Clock, manual_clock_callback)
time.sleep(1.0)
# start simulation
kit.play()
# perform a fixed number of steps with fixed step size
for frame in range(20):
# publish manual clock every 10 frames
if frame % 10 == 0:
result, status = omni.kit.commands.execute("RosBridgeTickComponent", path="/ROS_Clock_Manual")
kit.update(1.0 / 60.0) # runs with a non-realtime clock
# This sleep is to make this sample run a bit more deterministically for the subscriber callback
# In general this sleep is not needed
time.sleep(0.1)
# perform a fixed number of steps with realtime clock
for frame in range(20):
# publish manual clock every 10 frames
if frame % 10 == 0:
result, status = omni.kit.commands.execute("RosBridgeTickComponent", path="/ROS_Clock_Manual")
kit.update() # runs with a realtime clock
# This sleep is to make this sample run a bit more deterministically for the subscriber callback
# In general this sleep is not needed
time.sleep(0.1)
# cleanup and shutdown
sim_clock_sub.unregister()
system_clock_sub.unregister()
manual_clock_sub.unregister()
kit.stop()
kit.shutdown()
| 4,151 | Python | 37.803738 | 114 | 0.687786 |
KazWong/omniverse_sample/ov_sample/python_samples/ros/carter_stereo.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
import time
import carb
from omni.isaac.python_app import OmniKitHelper
CONFIG = {
"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit',
"renderer": "RayTracedLighting",
"headless": False,
}
if __name__ == "__main__":
# Example ROS bridge sample showing manual control over messages
kit = OmniKitHelper(config=CONFIG)
import omni
from omni.isaac.utils.scripts.nucleus_utils import find_nucleus_server
from pxr import Sdf
# enable ROS bridge extension
ext_manager = omni.kit.app.get_app().get_extension_manager()
ext_manager.set_extension_enabled_immediate("omni.isaac.ros_bridge", True)
# Locate /Isaac folder on nucleus server to load sample
result, nucleus_server = find_nucleus_server()
if result is False:
carb.log_error("Could not find nucleus server with /Isaac folder, exiting")
exit()
usd_path = nucleus_server + "/Isaac/Samples/ROS/Scenario/carter_warehouse_navigation.usd"
omni.usd.get_context().open_stage(usd_path, None)
# Wait two frames so that stage starts loading
kit.app.update()
kit.app.update()
print("Loading stage...")
while kit.is_loading():
kit.update(1.0 / 60.0)
print("Loading Complete")
# Disable all ROS components so we can demonstrate publishing manually
# Otherwise, if a component is enabled, it will publish every timestep
omni.kit.commands.execute(
"ChangeProperty",
prop_path=Sdf.Path("/World/Carter_ROS/ROS_Camera_Stereo_Right.enabled"),
value=False,
prev=None,
)
omni.kit.commands.execute(
"ChangeProperty", prop_path=Sdf.Path("/World/Carter_ROS/ROS_Camera_Stereo_Left.enabled"), value=False, prev=None
)
omni.kit.commands.execute(
"ChangeProperty", prop_path=Sdf.Path("/World/Carter_ROS/ROS_Lidar.enabled"), value=False, prev=None
)
omni.kit.commands.execute(
"ChangeProperty", prop_path=Sdf.Path("/World/Carter_ROS/ROS_DifferentialBase.enabled"), value=False, prev=None
)
omni.kit.commands.execute(
"ChangeProperty",
prop_path=Sdf.Path("/World/Carter_ROS/ROS_Carter_Lidar_Broadcaster.enabled"),
value=False,
prev=None,
)
omni.kit.commands.execute(
"ChangeProperty", prop_path=Sdf.Path("/World/Carter_ROS/ROS_Carter_Broadcaster.enabled"), value=False, prev=None
)
omni.kit.commands.execute("ChangeProperty", prop_path=Sdf.Path("/World/ROS_Clock.enabled"), value=False, prev=None)
kit.play()
kit.update(1.0 / 60.0)
# Tick all of the components once to make sure all of the ROS nodes are initialized
# For cameras this also handles viewport initialization etc.
omni.kit.commands.execute("RosBridgeTickComponent", path="/World/Carter_ROS/ROS_Camera_Stereo_Right")
omni.kit.commands.execute("RosBridgeTickComponent", path="/World/Carter_ROS/ROS_Camera_Stereo_Left")
omni.kit.commands.execute("RosBridgeTickComponent", path="/World/Carter_ROS/ROS_Lidar")
omni.kit.commands.execute("RosBridgeTickComponent", path="/World/Carter_ROS/ROS_DifferentialBase")
omni.kit.commands.execute("RosBridgeTickComponent", path="/World/Carter_ROS/ROS_Carter_Lidar_Broadcaster")
omni.kit.commands.execute("RosBridgeTickComponent", path="/World/Carter_ROS/ROS_Carter_Broadcaster")
omni.kit.commands.execute("RosBridgeTickComponent", path="/World/ROS_Clock")
# Simulate for one second to warm up sim and let everything settle
for frame in range(60):
kit.update(1.0 / 60.0)
# Dock the second camera window
right_viewport = omni.ui.Workspace.get_window("Viewport")
left_viewport = omni.ui.Workspace.get_window("Viewport_2")
if right_viewport is not None and left_viewport is not None:
left_viewport.dock_in(right_viewport, omni.ui.DockPosition.LEFT)
# Create a rostopic to publish message to spin robot in place
# Note that this is not the system level rospy, but one compiled for omniverse
from geometry_msgs.msg import Twist
import rospy
rospy.init_node("carter_stereo", anonymous=True, disable_signals=True, log_level=rospy.ERROR)
pub = rospy.Publisher("cmd_vel", Twist, queue_size=10)
frame = 0
while kit.app.is_running():
# Run with a fixed step size
kit.update(1.0 / 60.0)
# Publish clock every frame
omni.kit.commands.execute("RosBridgeTickComponent", path="/World/ROS_Clock")
# publish TF and Lidar every 2 frames
if frame % 2 == 0:
omni.kit.commands.execute("RosBridgeTickComponent", path="/World/Carter_ROS/ROS_Lidar")
omni.kit.commands.execute("RosBridgeTickComponent", path="/World/Carter_ROS/ROS_DifferentialBase")
omni.kit.commands.execute("RosBridgeTickComponent", path="/World/Carter_ROS/ROS_Carter_Lidar_Broadcaster")
omni.kit.commands.execute("RosBridgeTickComponent", path="/World/Carter_ROS/ROS_Carter_Broadcaster")
# because we only tick the differential base component every two frames, we can also publish the ROS message at the same rate
message = Twist()
message.angular.z = 0.2 # spin in place
pub.publish(message)
# Publish cameras every 60 frames or one second of simulation
if frame % 60 == 0:
omni.kit.commands.execute("RosBridgeTickComponent", path="/World/Carter_ROS/ROS_Camera_Stereo_Right")
omni.kit.commands.execute("RosBridgeTickComponent", path="/World/Carter_ROS/ROS_Camera_Stereo_Left")
frame = frame + 1
pub.unregister()
rospy.signal_shutdown("carter_stereo complete")
kit.stop()
kit.shutdown()
| 6,107 | Python | 45.984615 | 137 | 0.700508 |
KazWong/omniverse_sample/ov_sample/python_samples/ros/moveit.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
import carb
from omni.isaac.python_app import OmniKitHelper
FRANKA_STAGE_PATH = "/Franka"
FRANKA_USD_PATH = "/Isaac/Robots/Franka/franka_alt_fingers.usd"
BACKGROUND_STAGE_PATH = "/background"
BACKGROUND_USD_PATH = "/Isaac/Environments/Simple_Room/simple_room.usd"
CONFIG = {
"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit',
"renderer": "RayTracedLighting",
"headless": False,
}
def wait_load_stage():
# Wait two frames so stage starts loading
kit.app.update()
kit.app.update()
print("Loading stage...")
while kit.is_loading():
kit.update(1.0 / 60.0)
print("Loading Complete")
if __name__ == "__main__":
# Example ROS bridge sample demonstrating the manual loading of stages
# and creation of ROS components
kit = OmniKitHelper(config=CONFIG)
import omni
from omni.isaac.utils.scripts.nucleus_utils import find_nucleus_server
from omni.isaac.utils.scripts.scene_utils import create_background
from pxr import Gf
# enable ROS bridge extension
ext_manager = omni.kit.app.get_app().get_extension_manager()
ext_manager.set_extension_enabled_immediate("omni.isaac.ros_bridge", True)
# Locate /Isaac folder on nucleus server to load environment and robot stages
result, _nucleus_path = find_nucleus_server()
if result is False:
carb.log_error("Could not find nucleus server with /Isaac folder, exiting")
exit()
# Initialize extension and UI elements
_viewport = omni.kit.viewport.get_default_viewport_window()
_usd_context = omni.usd.get_context()
# Preparing stage
_viewport.set_camera_position("/OmniverseKit_Persp", 120, 120, 80, True)
_viewport.set_camera_target("/OmniverseKit_Persp", 0, 0, 50, True)
_stage = _usd_context.get_stage()
# Loading the simple_room environment
background_asset_path = _nucleus_path + BACKGROUND_USD_PATH
create_background(_stage, background_asset_path, background_path=BACKGROUND_STAGE_PATH, offset=Gf.Vec3d(0, 0, 0))
wait_load_stage()
# Loading the franka robot USD
franka_asset_path = _nucleus_path + FRANKA_USD_PATH
prim = _stage.DefinePrim(FRANKA_STAGE_PATH, "Xform")
prim.GetReferences().AddReference(franka_asset_path)
rot_mat = Gf.Matrix3d(Gf.Rotation((0, 0, 1), 90))
omni.kit.commands.execute(
"TransformPrimCommand",
path=prim.GetPath(),
old_transform_matrix=None,
new_transform_matrix=Gf.Matrix4d().SetRotate(rot_mat).SetTranslateOnly(Gf.Vec3d(0, -64, 0)),
)
wait_load_stage()
# Loading all ROS components initially as disabled so we can demonstrate publishing manually
# Otherwise, if a component is enabled, it will publish every timestep
# Load ROS Clock
omni.kit.commands.execute("ROSBridgeCreateClock", path="/ROS_Clock", enabled=False)
# Load Joint State
omni.kit.commands.execute(
"ROSBridgeCreateJointState", path="/ROS_JointState", articulation_prim_rel=[FRANKA_STAGE_PATH], enabled=False
)
# Load Pose Tree
omni.kit.commands.execute(
"ROSBridgeCreatePoseTree", path="/ROS_PoseTree", target_prims_rel=[FRANKA_STAGE_PATH], enabled=False
)
kit.play()
kit.update(1.0 / 60.0)
# Tick all of the components once to make sure all of the ROS nodes are initialized
omni.kit.commands.execute("RosBridgeTickComponent", path="/ROS_JointState")
omni.kit.commands.execute("RosBridgeTickComponent", path="/ROS_PoseTree")
omni.kit.commands.execute("RosBridgeTickComponent", path="/ROS_Clock")
# Simulate for one second to warm up sim and let everything settle
for frame in range(60):
kit.update(1.0 / 60.0)
kit.play()
while kit.app.is_running():
# Run with a fixed step size
kit.update(1.0 / 60.0)
# Publish clock, TF and JointState each frame
omni.kit.commands.execute("RosBridgeTickComponent", path="/ROS_Clock")
omni.kit.commands.execute("RosBridgeTickComponent", path="/ROS_JointState")
omni.kit.commands.execute("RosBridgeTickComponent", path="/ROS_PoseTree")
kit.stop()
kit.shutdown()
| 4,589 | Python | 35.72 | 117 | 0.70146 |
KazWong/omniverse_sample/ov_sample/python_samples/simple/franka_articulation.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
import carb
from omni.isaac.python_app import OmniKitHelper
CONFIG = {
"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit',
"renderer": "RayTracedLighting",
"headless": True,
}
if __name__ == "__main__":
# This sample loads an articulation and prints its information
kit = OmniKitHelper(config=CONFIG)
import omni
from omni.isaac.dynamic_control import _dynamic_control
from omni.isaac.utils.scripts.nucleus_utils import find_nucleus_server
stage = kit.get_stage()
result, nucleus_server = find_nucleus_server()
if result is False:
carb.log_error("Could not find nucleus server with /Isaac folder")
asset_path = nucleus_server + "/Isaac/Robots/Franka/franka_alt_fingers.usd"
omni.usd.get_context().open_stage(asset_path)
# start simulation
kit.play()
# perform timestep
kit.update(1.0 / 60.0)
dc = _dynamic_control.acquire_dynamic_control_interface()
# Get handle to articulation
art = dc.get_articulation("/panda")
if art == _dynamic_control.INVALID_HANDLE:
print("*** '%s' is not an articulation" % "/panda")
else:
# Print information about articulation
root = dc.get_articulation_root_body(art)
print(str("Got articulation handle %d \n" % art) + str("--- Hierarchy\n"))
body_states = dc.get_articulation_body_states(art, _dynamic_control.STATE_ALL)
print(str("--- Body states:\n") + str(body_states) + "\n")
dof_states = dc.get_articulation_dof_states(art, _dynamic_control.STATE_ALL)
print(str("--- DOF states:\n") + str(dof_states) + "\n")
dof_props = dc.get_articulation_dof_properties(art)
print(str("--- DOF properties:\n") + str(dof_props) + "\n")
# Simulate robot coming to a rest configuration
for i in range(100):
kit.update(1.0 / 60.0)
# Simulate robot for a fixed number of frames and specify a joint position target
for i in range(100):
dof_ptr = dc.find_articulation_dof(art, "panda_joint2")
# This should be called each frame of simulation if state on the articulation is being changed.
dc.wake_up_articulation(art)
# Set joint position target
dc.set_dof_position_target(dof_ptr, -1.5)
kit.update(1.0 / 60.0)
kit.stop()
kit.shutdown()
| 2,778 | Python | 37.068493 | 103 | 0.674946 |
KazWong/omniverse_sample/ov_sample/python_samples/simple/control_your_robot.py | #https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/controlling_robot.html
import os
import omni
from omni.isaac.python_app import OmniKitHelper
omni.timeline.get_timeline_interface().play()
| 202 | Python | 24.374997 | 83 | 0.811881 |
KazWong/omniverse_sample/ov_sample/python_samples/simple/load_stage.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
from omni.isaac.python_app import OmniKitHelper
import carb
import omni
# This sample loads a usd stage and starts simulation
CONFIG = {
"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit',
"width": 1280,
"height": 720,
"sync_loads": True,
"headless": False,
"renderer": "RayTracedLighting",
}
if __name__ == "__main__":
import argparse
# Set up command line arguments
parser = argparse.ArgumentParser("Usd Load sample")
parser.add_argument("--usd_path", type=str, help="Path to usd file", required=True)
parser.add_argument("--headless", default=False, action="store_true", help="Run stage headless")
parser.add_argument("--test", default=False, action="store_true", help="Run in test mode")
args, unknown = parser.parse_known_args()
# Start the omniverse application
CONFIG["headless"] = args.headless
kit = OmniKitHelper(config=CONFIG)
# Locate /Isaac folder on nucleus server to load sample
from omni.isaac.utils.scripts.nucleus_utils import find_nucleus_server
result, nucleus_server = find_nucleus_server()
if result is False:
carb.log_error("Could not find nucleus server with /Isaac folder, exiting")
exit()
asset_path = nucleus_server + "/Isaac"
usd_path = asset_path + args.usd_path
omni.usd.get_context().open_stage(usd_path, None)
# Wait two frames so that stage starts loading
kit.app.update()
kit.app.update()
print("Loading stage...")
while kit.is_loading():
kit.update(1.0 / 60.0)
print("Loading Complete")
kit.play()
# Run in test mode, exit after a fixed number of steps
if args.test is True:
for i in range(10):
# Run in realtime mode, we don't specify the step size
kit.update()
else:
while kit.app.is_running():
# Run in realtime mode, we don't specify the step size
kit.update()
kit.stop()
kit.shutdown()
| 2,421 | Python | 34.101449 | 100 | 0.678232 |
KazWong/omniverse_sample/ov_sample/python_samples/simple/livestream.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
from omni.isaac.python_app import OmniKitHelper
import omni
# This sample enables a livestream server to connect to when running headless
CONFIG = {
"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit',
"width": 1280,
"height": 720,
"window_width": 1920,
"window_height": 1080,
"headless": True,
"renderer": "RayTracedLighting",
"display_options": 3807, # Set display options to show default grid
}
if __name__ == "__main__":
# Start the omniverse application
kit = OmniKitHelper(config=CONFIG)
# Enable Livestream extension
ext_manager = omni.kit.app.get_app().get_extension_manager()
kit.set_setting("/app/window/drawMouse", True)
kit.set_setting("/app/livestream/proto", "ws")
ext_manager.set_extension_enabled_immediate("omni.kit.livestream.core", True)
ext_manager.set_extension_enabled_immediate("omni.kit.livestream.native", True)
# Run until closed
while kit.app.is_running():
# Run in realtime mode, we don't specify the step size
kit.update()
kit.stop()
kit.shutdown()
| 1,538 | Python | 34.790697 | 83 | 0.714564 |
KazWong/omniverse_sample/ov_sample/python_samples/simple/change_resolution.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
from omni.isaac.python_app import OmniKitHelper
import random
CONFIG = {
"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit',
"renderer": "RayTracedLighting",
"headless": True,
}
if __name__ == "__main__":
# Simple example showing how to change resolution
kit = OmniKitHelper(config=CONFIG)
kit.update(1.0 / 60.0)
for i in range(100):
width = random.randint(128, 1980)
height = random.randint(128, 1980)
kit.set_setting("/app/renderer/resolution/width", width)
kit.set_setting("/app/renderer/resolution/height", height)
kit.update(1.0 / 60.0)
print(f"resolution set to: {width}, {height}")
# cleanup
kit.shutdown()
| 1,160 | Python | 34.181817 | 76 | 0.701724 |
KazWong/omniverse_sample/ov_sample/python_samples/simple/urdf_import.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
from omni.isaac.python_app import OmniKitHelper
CONFIG = {
"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit',
"renderer": "RayTracedLighting",
"headless": True,
}
if __name__ == "__main__":
# URDF import, configuration and simualtion sample
kit = OmniKitHelper(config=CONFIG)
import omni.kit.commands
from pxr import Sdf, Gf, UsdPhysics, UsdLux, PhysxSchema
# Setting up import configuration:
status, import_config = omni.kit.commands.execute("URDFCreateImportConfig")
import_config.merge_fixed_joints = False
import_config.convex_decomp = False
import_config.import_inertia_tensor = True
import_config.fix_base = False
# Get path to extension data:
ext_manager = omni.kit.app.get_app().get_extension_manager()
ext_id = ext_manager.get_enabled_extension_id("omni.isaac.urdf")
extension_path = ext_manager.get_extension_path(ext_id)
# Import URDF
omni.kit.commands.execute(
"URDFParseAndImportFile",
urdf_path=extension_path + "/data/urdf/robots/carter/urdf/carter.urdf",
import_config=import_config,
)
# Get stage handle
stage = omni.usd.get_context().get_stage()
# Enable physics
scene = UsdPhysics.Scene.Define(stage, Sdf.Path("/physicsScene"))
# Set gravity
scene.CreateGravityDirectionAttr().Set(Gf.Vec3f(0.0, 0.0, -1.0))
scene.CreateGravityMagnitudeAttr().Set(981.0)
# Set solver settings
PhysxSchema.PhysxSceneAPI.Apply(stage.GetPrimAtPath("/physicsScene"))
physxSceneAPI = PhysxSchema.PhysxSceneAPI.Get(stage, "/physicsScene")
physxSceneAPI.CreateEnableCCDAttr(True)
physxSceneAPI.CreateEnableStabilizationAttr(True)
physxSceneAPI.CreateEnableGPUDynamicsAttr(False)
physxSceneAPI.CreateBroadphaseTypeAttr("MBP")
physxSceneAPI.CreateSolverTypeAttr("TGS")
# Add ground plane
omni.kit.commands.execute(
"AddGroundPlaneCommand",
stage=stage,
planePath="/groundPlane",
axis="Z",
size=1500.0,
position=Gf.Vec3f(0, 0, -50),
color=Gf.Vec3f(0.5),
)
# Add lighting
distantLight = UsdLux.DistantLight.Define(stage, Sdf.Path("/DistantLight"))
distantLight.CreateIntensityAttr(500)
# Get handle to the Drive API for both wheels
left_wheel_drive = UsdPhysics.DriveAPI.Get(stage.GetPrimAtPath("/carter/chassis_link/left_wheel"), "angular")
right_wheel_drive = UsdPhysics.DriveAPI.Get(stage.GetPrimAtPath("/carter/chassis_link/right_wheel"), "angular")
# Set the velocity drive target in degrees/second
left_wheel_drive.GetTargetVelocityAttr().Set(150)
right_wheel_drive.GetTargetVelocityAttr().Set(150)
# Set the drive damping, which controls the strength of the velocity drive
left_wheel_drive.GetDampingAttr().Set(15000)
right_wheel_drive.GetDampingAttr().Set(15000)
# Set the drive stiffness, which controls the strength of the position drive
# In this case because we want to do velocity control this should be set to zero
left_wheel_drive.GetStiffnessAttr().Set(0)
right_wheel_drive.GetStiffnessAttr().Set(0)
# Start simulation
kit.play()
# perform simulation
for frame in range(100):
kit.update(1.0 / 60.0)
# Shutdown and exit
kit.stop()
kit.shutdown()
| 3,757 | Python | 36.58 | 115 | 0.713335 |
KazWong/omniverse_sample/ov_sample/python_samples/simple/time_stepping.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
import carb
from omni.isaac.python_app import OmniKitHelper
CONFIG = {
"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit',
"renderer": "RayTracedLighting",
"headless": True,
}
if __name__ == "__main__":
# Example usage, with step size test
kit = OmniKitHelper(config=CONFIG)
import omni.physx
from pxr import UsdPhysics, Sdf
UsdPhysics.Scene.Define(kit.get_stage(), Sdf.Path("/World/physicsScene"))
# Create callbacks to both editor and physics step callbacks
def editor_update(e: carb.events.IEvent):
dt = e.payload["dt"]
print("kit update step:", dt, "seconds")
def physics_update(dt: float):
print("physics update step:", dt, "seconds")
# start simulation
kit.play()
# assign callbacks
update_sub = omni.kit.app.get_app().get_update_event_stream().create_subscription_to_pop(editor_update)
physics_sub = omni.physx.acquire_physx_interface().subscribe_physics_step_events(physics_update)
# perform step experiments
print(f"Rendering and Physics with {1} second step size:")
kit.update(1.0)
print(f"Rendering and Physics with {1/60} seconds step:")
kit.update(1.0 / 60.0)
print(f"Rendering {1/30} seconds step size and Physics {1/120} seconds step size:")
kit.update(1.0 / 30.0, 1.0 / 120.0, 4)
# cleanup
update_sub = None
physics_sub = None
kit.stop()
kit.shutdown()
| 1,873 | Python | 33.072727 | 107 | 0.695675 |
KazWong/omniverse_sample/ov_sample/python_samples/jetracer/gtc2020_track_utils.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import numpy as np
from PIL import Image
# TODO : This is custom, specific to the GTC2020 Jetracer course.
# Make a more general solution.
def line_seg_closest_point(v0, v1, p0):
# Project p0 onto (v0, v1) line, then clamp to line segment
d = v1 - v0
q = p0 - v0
t = np.dot(q, d) / np.dot(d, d)
t = np.clip(t, 0, 1)
return v0 + t * d
def line_seg_distance(v0, v1, p0):
p = line_seg_closest_point(v0, v1, p0)
return np.linalg.norm(p0 - p)
# Canonical arc is centered at origin, and goes from 0 to a0 radians
def canonical_arc_distance(R, a0, x):
a = np.arctan2(x[1], x[0])
if a < 0:
a = a + 2 * np.pi
if a > a0:
if a < a0 / 2 + np.pi:
a = a0
else:
a = 0
p = R * np.array([np.cos(a), np.sin(a)])
return np.linalg.norm(x - p)
def arc_distance(c, r, a0, a1, x):
# Point relative to arc origin
x0 = x - c
# Rotate point to canonical angle (where arc starts at 0)
c = np.cos(-a0)
s = np.sin(-a0)
R = np.array([[c, -s], [s, c]])
x0 = np.dot(R, x0)
return canonical_arc_distance(r, a1 - a0, x0)
def closest_point_arc(c, r, a0, a1, x):
# Direction to point
x0 = x - c
x0 = x0 / np.linalg.norm(x0)
# print(c, x0, r, c + x0 * r)
return c + x0 * r
# The forward direction at the closest point on an arc
def closest_point_arc_direction(c, r, a0, a1, x):
# Direction to point
x0 = x - c
x0 = x0 / np.linalg.norm(x0)
# The tangent is unit circle point rotated pi/2 radians
return np.array([-x0[1], x0[0]])
def arc_endpoints(c, r, a0, a1):
c0 = np.cos(a0)
s0 = np.sin(a0)
c1 = np.cos(a1)
s1 = np.sin(a1)
return c + r * np.array([[c0, s0], [c1, s1]])
# Measurements (in meters)
m0 = 7.620
m1 = 10.668
m2 = 5.491
m3 = 3.048
m4 = 4.348
m5 = 5.380
# Track width
w = 1.22
w_2 = w / 2
# Arc arrays
arc_center = np.zeros((4, 2))
arc_radius = np.zeros(4)
arc_angles = np.zeros((4, 2))
# Arcs
# Bottom left
arc_center[0] = np.array([w, w])
arc_radius[0] = w_2
arc_angles[0] = [np.pi, np.pi * 1.5]
# Top left
arc_center[1] = np.array([m3, m0])
arc_radius[1] = m3 - w_2
arc_angles[1] = [1.75 * np.pi, 3 * np.pi]
ep1 = arc_endpoints(arc_center[1], arc_radius[1], arc_angles[1][0], arc_angles[1][1])
# Others
arc_center[2] = np.array([m5, m4])
arc_radius[2] = 0.5 * (2.134 + 0.914)
arc_angles[2] = [0.75 * np.pi, 1.25 * np.pi]
ep2 = arc_endpoints(arc_center[2], arc_radius[2], arc_angles[2][0], arc_angles[2][1])
arc_center[3] = np.array([m2, w])
arc_radius[3] = w_2
arc_angles[3] = [np.pi * 1.5, np.pi * 2.25]
ep3 = arc_endpoints(arc_center[3], arc_radius[3], arc_angles[3][0], arc_angles[3][1])
# line segment points
line_verts = [
np.array([w_2, w]),
np.array([w_2, m0]),
ep1[0],
ep2[0],
ep2[1],
ep3[1],
np.array([m2, w_2]),
np.array([w, w_2]),
]
def random_track_point():
# TODO : Refactor these dimensions, which show up in multiple places
p = np.random.random(2) * [6.711, 10.668]
result = track_segment_closest_point(p)
return result * 100 # convert to cm. TODO standardize all entry points to cm
# Minimum distances to all segments of the track
def track_segment_distance(p):
d = np.zeros(8)
d[0] = line_seg_distance(line_verts[0], line_verts[1], p)
d[1] = line_seg_distance(line_verts[2], line_verts[3], p)
d[2] = line_seg_distance(line_verts[4], line_verts[5], p)
d[3] = line_seg_distance(line_verts[6], line_verts[7], p)
d[4] = arc_distance(arc_center[0], arc_radius[0], arc_angles[0][0], arc_angles[0][1], p)
d[5] = arc_distance(arc_center[1], arc_radius[1], arc_angles[1][0], arc_angles[1][1], p)
d[6] = arc_distance(arc_center[2], arc_radius[2], arc_angles[2][0], arc_angles[2][1], p)
d[7] = arc_distance(arc_center[3], arc_radius[3], arc_angles[3][0], arc_angles[3][1], p)
return d
def track_segment_closest_point(p):
d = track_segment_distance(p)
# If a line segment is the closest
if np.min(d[:4]) < np.min(d[4:]):
idx = np.argmin(d[:4], axis=0)
return line_seg_closest_point(line_verts[idx * 2], line_verts[idx * 2 + 1], p)
# If an arc is the closest
else:
idx = np.argmin(d[4:], axis=0)
return closest_point_arc(arc_center[idx], arc_radius[idx], arc_angles[idx][0], arc_angles[idx][1], p)
# Distance to closest point on the track
def center_line_dist(p):
p = 0.01 * p # Convert from m to cm
return np.min(track_segment_distance(p))
# Forward vector at the closest point on the center line
def closest_point_track_direction(p):
p = 0.01 * p # Convert from m to cm
d = track_segment_distance(p)
# If a line segment is the closest
if np.min(d[:4]) < np.min(d[4:]):
idx = np.argmin(d[:4], axis=0)
v = line_verts[idx * 2 + 1] - line_verts[idx * 2]
return v / np.linalg.norm(v)
# If an arc is the closest
else:
idx = np.argmin(d[4:], axis=0)
v = closest_point_arc_direction(arc_center[idx], arc_radius[idx], arc_angles[idx][0], arc_angles[idx][1], p)
# TODO : All arcs are defined counter-clockwise,
# but this doesn't always represent the forward direction on the track.
# This is a hack to correct the tangent vector on all but one of the arcs.
if idx != 2:
v = -v
return v
LANE_WIDTH = 0.7 # width of whole track is w = 1.22. To get out of bound is > 1.22/2, so around 0.7
TRACK_DIMS = [671, 1066] # the track is within (0, 0) to (671.1 cm, 1066.8 cm)
def is_racing_forward(prev_pose, curr_pose):
prev_pose = 0.01 * prev_pose
curr_pose = 0.01 * curr_pose
bottom_left_corner = np.array([0, 0])
top_left_corner = np.array([0, 10.668])
top_right_corner = np.array([6.711, 10.668])
bottom_right_corner = np.array([6.711, 0])
d0 = line_seg_distance(bottom_left_corner, top_left_corner, curr_pose)
d1 = line_seg_distance(top_left_corner, top_right_corner, curr_pose)
d2 = line_seg_distance(top_right_corner, bottom_right_corner, curr_pose)
d3 = line_seg_distance(bottom_right_corner, bottom_left_corner, curr_pose)
min_d = np.min([d0, d1, d2, d3])
which_side = np.array([0, 0])
if min_d == d0:
which_side = top_left_corner - bottom_left_corner
elif min_d == d1:
which_side = top_right_corner - top_left_corner
elif min_d == d2:
which_side = bottom_right_corner - top_right_corner
elif min_d == d3:
which_side = bottom_left_corner - bottom_right_corner
which_size_unit = which_side / np.linalg.norm(which_side)
curr_vel = curr_pose - prev_pose
curr_vel_norm = np.linalg.norm(curr_vel)
curr_vel_unit = np.array([0, 0])
# checking divide by zero
if curr_vel_norm:
curr_vel_unit = curr_vel / curr_vel_norm
return np.dot(curr_vel_unit, which_size_unit)
def is_outside_track_boundary(curr_pose):
dist = center_line_dist(curr_pose)
return dist < LANE_WIDTH
if __name__ == "__main__":
print("Generating test PNGs")
# scale
s = 0.02
H = int(10668 * s)
W = int(6711 * s)
d = np.zeros((H, W))
fwd = np.zeros((H, W, 3))
h = np.zeros((H, W))
print(H, W)
for _ in range(10000):
p_scaled = np.random.random(2) * [W, H]
p_meters = p_scaled / s / 1000.0
# p_proj = line_seg_closest_point(line_verts[6], line_verts[7], p_meters)
p_proj = track_segment_closest_point(p_meters)
# print(h.shape, p_scaled, p_meters, p_proj, p_proj * s)
p_proj = p_proj + np.random.normal([0, 0], 0.1)
idx = p_proj * s * 1000.0
idx = np.floor(idx)
idx = np.clip(idx, [0, 0], [W - 1, H - 1]) # HACK
idx = idx.astype("int")
h[idx[1], idx[0]] = h[idx[1], idx[0]] + 1
for i in range(H):
y = ((i + 0.5) / s) / 10.0
if i % 10 == 0:
print("{:0.1f}%".format(i / H * 100))
for j in range(W):
x = ((j + 0.5) / s) / 10.0
p = np.array([x, y])
d[i, j] = center_line_dist(p)
f = closest_point_track_direction(p)
fwd[i, j] = np.array([0.5 * (f[0] + 1), 0.5 * (f[1] + 1), 0])
print("100.0%")
# Images have zero at the top, so we flip vertically
d = np.flipud(d)
fwd = np.flip(fwd, axis=0)
h = np.flipud(h)
# Distance function
im = Image.fromarray((d * 255 / np.max(d)).astype("uint8"))
im.save("dist.png")
# Track forward vector
im = Image.fromarray((fwd * 255).astype("uint8"), "RGB")
im.save("fwd.png")
# Track forward vector X
im = Image.fromarray((fwd[:, :, 0] * 255).astype("uint8"))
im.save("fwd_x.png")
# Track forward vector Y
im = Image.fromarray((fwd[:, :, 1] * 255).astype("uint8"))
im.save("fwd_y.png")
# H
h = h / np.max(h)
im = Image.fromarray((h * 255).astype("uint8"))
im.save("h.png")
| 9,404 | Python | 25.643059 | 116 | 0.581986 |
KazWong/omniverse_sample/ov_sample/python_samples/jetracer/jetracer_env.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import torch
from torchvision.transforms import ColorJitter
import PIL
import numpy as np
import carb
import omni
import omni.kit.app
from pxr import UsdGeom, Gf, Sdf, Usd, Semantics
import os
import json
import time
import atexit
import asyncio
import numpy as np
import random
import matplotlib.pyplot as plt
from omni.isaac.synthetic_utils import visualization as vis
from omni.isaac.python_app import OmniKitHelper
from omni.isaac.synthetic_utils import SyntheticDataHelper
from jetracer import Jetracer
from track_environment import Environment
from gtc2020_track_utils import *
import gym
from gym import spaces
class JetracerEnv:
metadata = {"render.modes": ["human"]}
# TODO : Extract more training options
def __init__(
self,
omni_kit,
z_height=0,
max_resets=10,
updates_per_step=3,
steps_per_rollout=500,
mirror_mode=False,
backwards_term_mode=0,
reward_mode=0,
):
self.MIRROR_MODE = mirror_mode
self.BACKWARDS_TERMINATION_MODE = backwards_term_mode
self.REWARD_MODE = reward_mode
print("MIRROR_MODE = {}".format(self.MIRROR_MODE))
print("BACKWARDS_TERMINATION_MODE = {}".format(self.BACKWARDS_TERMINATION_MODE))
print("REWARD_MODE = {}".format(self.REWARD_MODE))
self.action_space = spaces.Box(low=-1.0, high=1.0, shape=(2,), dtype=np.float32)
self.observation_space = spaces.Box(low=0, high=255, shape=(224, 224, 6), dtype=np.uint8)
self.color_jitter = ColorJitter(0.1, 0.05, 0.05, 0.05)
self.noise = 0.05
self.dt = 1 / 30.0
self.omniverse_kit = omni_kit
self.sd_helper = SyntheticDataHelper()
self.roads = Environment(self.omniverse_kit)
# make environment z up
self.omniverse_kit.set_up_axis(UsdGeom.Tokens.z)
# generate roads
self.shape = [6, 6]
self.roads.generate_road(self.shape)
self.roads.generate_lights()
# randomize once to initialize stage
# the following two lines must be called prior to Jetracer initialization
# any DR related setup calls should occur before this point
self.omniverse_kit.update(1 / 60.0)
self.roads.dr.randomize_once()
# spawn robot
self.jetracer = Jetracer(self.omniverse_kit)
self.initial_loc = self.roads.get_valid_location()
self.jetracer.spawn(Gf.Vec3d(self.initial_loc[0], self.initial_loc[1], 5), 0)
self.prev_pose = [0, 0, 0]
self.current_pose = [0, 0, 0]
# switch kit camera to jetracer camera
self.jetracer.activate_camera()
# start simulation
self.omniverse_kit.play()
# Step simulation so that objects fall to rest
# wait until all materials are loaded
frame = 0
print("simulating physics...")
while frame < 60 or self.omniverse_kit.is_loading():
self.omniverse_kit.update(self.dt)
frame = frame + 1
print("done after frame: ", frame)
self.initialized = False
self.numsteps = 0
self.numresets = 0
self.maxresets = 10
# set this to 1 after around 200k steps to randomnize less
# self.maxresets = 1
# Randomly mirror horizontally
self.update_mirror_mode()
def update_mirror_mode(self):
# Mirror if mode is enabled and we randomly sample True
self.mirror_mode = self.MIRROR_MODE & random.choice([False, True])
def calculate_reward(self):
# Current and last positions
pose = np.array([self.current_pose[0], self.current_pose[1]])
prev_pose = np.array([self.prev_pose[0], self.prev_pose[1]])
# Finite difference velocity calculation
vel = pose - prev_pose
vel_norm = vel
vel_magnitude = np.linalg.norm(vel)
if vel_magnitude > 0.0:
vel_norm = vel / vel_magnitude
# Distance from the center of the track
dist = center_line_dist(pose)
self.dist = dist
# racing_forward = is_racing_forward(prev_pose, pose)
# reward = racing_forward * self.current_speed * np.exp(-dist ** 2 / 0.05 ** 2)
fwd_dir = closest_point_track_direction(pose)
fwd_dot = np.dot(fwd_dir, vel_norm)
if self.REWARD_MODE == 0:
reward = fwd_dot * self.current_speed * np.exp(-dist ** 2 / 0.05 ** 2)
elif self.REWARD_MODE == 1:
reward = fwd_dot * self.current_speed
return reward
def is_dead(self):
return not is_outside_track_boundary(np.array([self.current_pose[0], self.current_pose[1]]))
def transform_action(self, action):
# If mirrored, swap steering direction
if self.mirror_mode:
action[1] = -action[1]
return action
def transform_state_image(self, im):
# If enabled, mirror image horizontally
if self.mirror_mode:
return np.flip(im, axis=1)
return im
def reset(self):
# Randomly mirror horizontally
self.update_mirror_mode()
if self.numresets % self.maxresets == 0:
self.roads.reset(self.shape)
if not self.initialized:
state, reward, done, info, = self.step([0, 0])
self.initialized = True
# Random track point in cm, with a 10 cm stddev gaussian offset
loc = random_track_point()
loc = loc + np.random.normal([0.0, 0.0], 10.0)
# Forward direction at that point
fwd = closest_point_track_direction(loc)
# Forward angle in degrees, with a 10 degree stddev gaussian offset
rot = np.arctan2(fwd[1], fwd[0])
rot = rot * 180.0 / np.pi
rot = rot + np.random.normal(10.0)
self.jetracer.teleport(Gf.Vec3d(loc[0], loc[1], 5), rot, settle=True)
obs = self.jetracer.observations()
self.current_pose = obs["pose"]
self.current_speed = np.linalg.norm(np.array(obs["linear_velocity"]))
self.current_forward_velocity = obs["local_linear_velocity"][0]
if self.numresets % self.maxresets == 0:
frame = 0
while self.omniverse_kit.is_loading(): # or frame < 750:
self.omniverse_kit.update(self.dt)
frame += 1
viewport = omni.kit.viewport.get_default_viewport_window()
gt = self.sd_helper.get_groundtruth(["rgb"], viewport)
currentState = gt["rgb"][:, :, :3]
currentState = self.transform_state_image(currentState)
img = np.concatenate((currentState, currentState), axis=2)
img = np.clip((255 * self.noise * np.random.randn(224, 224, 6) + img.astype(np.float)), 0, 255).astype(np.uint8)
self.numsteps = 0
self.previousState = currentState
self.numresets += 1
return img
def is_driving_backwards(self):
# TODO : Refactor, the bulk of this code is shared with the reward function.
# Also, find out at what point in an iteration this is called,
# compared to the reward, physics and stuff.
# If off by a timestep it's close enough, probably won't cause any issues.
# Current and last positions
pose = np.array([self.current_pose[0], self.current_pose[1]])
prev_pose = np.array([self.prev_pose[0], self.prev_pose[1]])
# Finite difference velocity calculation
vel = pose - prev_pose
vel_norm = vel
vel_magnitude = np.linalg.norm(vel)
if vel_magnitude > 0.0:
vel_norm = vel / vel_magnitude
# Forward direction on the track
fwd_dir = closest_point_track_direction(pose)
# Normalized velocity projected onto the forward direction
fwd_dot = np.dot(fwd_dir, vel_norm)
# Going backwards more than 3*pi/8 radians
return fwd_dot < np.cos(7.0 * np.pi / 8.0)
def step(self, action):
print("Number of steps ", self.numsteps)
# print("Action ", action)
transformed_action = self.transform_action(action)
self.jetracer.command(transformed_action)
frame = 0
total_reward = 0
reward = 0
while frame < 3:
self.omniverse_kit.update(self.dt)
obs = self.jetracer.observations()
self.prev_pose = self.current_pose
self.current_pose = obs["pose"]
self.current_speed = np.linalg.norm(np.array(obs["linear_velocity"]))
self.current_forward_velocity = obs["local_linear_velocity"][0]
reward = self.calculate_reward()
done = self.is_dead()
total_reward += reward
frame = frame + 1
viewport = omni.kit.viewport.get_default_viewport_window()
gt = self.sd_helper.get_groundtruth(["rgb"], viewport)
currentState = gt["rgb"][:, :, :3]
currentState = self.transform_state_image(currentState)
if not self.initialized:
self.previousState = currentState
img = np.concatenate((currentState, self.previousState), axis=2)
img = np.clip((255 * self.noise * np.random.randn(224, 224, 6) + img.astype(np.float)), 0, 255).astype(np.uint8)
self.previousState = currentState
other = np.array(
[*obs["pose"], *obs["linear_velocity"], *obs["local_linear_velocity"], *obs["angular_velocity"]]
)
other = np.expand_dims(other.astype(float), 0)
self.numsteps += 1
if done:
print("robot is dead")
if self.numsteps > 500:
done = True
print("robot stepped 500 times")
if self.dist > LANE_WIDTH:
print("robot out of bounds. dist = ", self.dist)
done = True
if self.BACKWARDS_TERMINATION_MODE == 0:
if self.current_forward_velocity <= -35:
print("robot was going backwards forward velocity = ", self.current_forward_velocity)
done = True
elif self.BACKWARDS_TERMINATION_MODE == 1:
if self.is_driving_backwards():
print("Robot was driving backwards")
done = True
return img, reward, done, {}
| 10,660 | Python | 32.420063 | 120 | 0.61257 |
KazWong/omniverse_sample/ov_sample/python_samples/jetracer/jetracer.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import carb
import omni
from pxr import UsdGeom, Gf
import numpy as np
class Jetracer:
def __init__(self, omni_kit):
from omni.isaac.dynamic_control import _dynamic_control
from omni.isaac.utils.scripts.nucleus_utils import find_nucleus_server
self.omni_kit = omni_kit
# Enable this after stage is loaded to prevent errors
ext_manager = self.omni_kit.app.get_extension_manager()
ext_manager.set_extension_enabled("omni.physx.vehicle", True)
result, nucleus_server = find_nucleus_server()
if result is False:
carb.log_error("Could not find nucleus server with /Isaac folder")
return
self.usd_path = nucleus_server + "/Isaac/Robots/Jetracer/jetracer.usd"
self.robot_prim = None
self._dynamic_control = _dynamic_control
self.dc = _dynamic_control.acquire_dynamic_control_interface()
self.ar = None
# rotation is in degrees
def spawn(self, location, rotation):
stage = self.omni_kit.get_stage()
prefix = "/World/Robot/Jetracer"
prim_path = omni.usd.get_stage_next_free_path(stage, prefix, False)
print(prim_path)
self.robot_prim = stage.DefinePrim(prim_path, "Xform")
self.robot_prim.GetReferences().AddReference(self.usd_path)
xform = UsdGeom.Xformable(self.robot_prim)
xform_op = xform.AddXformOp(UsdGeom.XformOp.TypeTransform, UsdGeom.XformOp.PrecisionDouble, "")
mat = Gf.Matrix4d().SetTranslate(location)
mat.SetRotateOnly(Gf.Rotation(Gf.Vec3d(0, 0, 1), rotation))
xform_op.Set(mat)
self.camera_path = prim_path + "/Jetracer/Vehicle/jetracer_camera"
# self.camera_path = prim_path + "Vehicle/jetracer_camera"
def teleport(self, location, rotation, settle=False):
if self.ar is None:
self.ar = self.dc.get_rigid_body(self.robot_prim.GetPath().pathString + "/Vehicle")
self.chassis = self.ar
self.dc.wake_up_rigid_body(self.ar)
rot_quat = Gf.Rotation(Gf.Vec3d(0, 0, 1), rotation).GetQuaternion()
tf = self._dynamic_control.Transform(
location,
(rot_quat.GetImaginary()[0], rot_quat.GetImaginary()[1], rot_quat.GetImaginary()[2], rot_quat.GetReal()),
)
self.dc.set_rigid_body_pose(self.chassis, tf)
self.dc.set_rigid_body_linear_velocity(self.chassis, [0, 0, 0])
self.dc.set_rigid_body_angular_velocity(self.chassis, [0, 0, 0])
self.command((0, 0))
if settle:
frame = 0
velocity = 1
print("Settling robot...")
while velocity > 0.1 and frame < 120:
self.omni_kit.update(1.0 / 60.0)
lin_vel = self.dc.get_rigid_body_linear_velocity(self.chassis)
velocity = np.linalg.norm([lin_vel.x, lin_vel.y, lin_vel.z])
# print("velocity magnitude is: ", velocity)
frame = frame + 1
# print("done after frame: HERE", frame)
def activate_camera(self):
vpi = omni.kit.viewport.get_viewport_interface()
vpi.get_viewport_window().set_active_camera(str(self.camera_path))
def command(self, motor_value):
if self.ar is None:
vehicle_path = self.robot_prim.GetPath().pathString + "/Jetracer/Vehicle"
print(vehicle_path)
self.ar = self.dc.get_rigid_body(vehicle_path)
self.chassis = self.ar
print(self.chassis)
stage = self.omni_kit.get_stage()
# for child_prim in stage.Traverse():
# print(child_prim.GetPath().pathString)
self.accelerator = stage.GetPrimAtPath(vehicle_path).GetAttribute("physxVehicleController:accelerator")
self.left_steer = stage.GetPrimAtPath(vehicle_path).GetAttribute("physxVehicleController:steerLeft")
self.right_steer = stage.GetPrimAtPath(vehicle_path).GetAttribute("physxVehicleController:steerRight")
self.target_gear = stage.GetPrimAtPath(vehicle_path).GetAttribute("physxVehicleController:targetGear")
# TODO add brake physxVehicleController:brake
self.dc.wake_up_rigid_body(self.ar)
accel_cmd = self.wheel_speed_from_motor_value(motor_value[0])
steer_left_cmd = self.wheel_speed_from_motor_value(motor_value[1])
acceleration = max(min(accel_cmd, 1), -1)
steering = max(min(steer_left_cmd, 1), -1)
gear = 1 # going forward
if acceleration < 0:
gear = -1 # reverse
self.accelerator.Set(abs(acceleration))
self.target_gear.Set(gear)
if steering > 0:
self.right_steer.Set(steering)
else:
self.left_steer.Set(abs(steering))
# idealized motor model that converts a pwm value to a velocity
def wheel_speed_from_motor_value(self, input):
threshold = 0.05
if input >= 0:
if input > threshold:
return 1.604 * input - 0.05
else:
return 0
elif input < 0:
if input < -threshold:
return 1.725 * input + 0.0757
else:
return 0
def observations(self):
if self.ar is None:
self.ar = self.dc.get_rigid_body(self.robot_prim.GetPath().pathString + "/Vehicle")
self.chassis = self.ar
dc_pose = self.dc.get_rigid_body_pose(self.chassis)
dc_lin_vel = self.dc.get_rigid_body_linear_velocity(self.chassis)
dc_local_lin_vel = self.dc.get_rigid_body_local_linear_velocity(self.chassis)
dc_ang_vel = self.dc.get_rigid_body_angular_velocity(self.chassis)
return {
"pose": (dc_pose.p.x, dc_pose.p.y, dc_pose.p.z, dc_pose.r.w, dc_pose.r.x, dc_pose.r.y, dc_pose.r.z),
"linear_velocity": (dc_lin_vel.x, dc_lin_vel.y, dc_lin_vel.z),
"local_linear_velocity": (dc_local_lin_vel.x, dc_local_lin_vel.y, dc_local_lin_vel.z),
"angular_velocity": (dc_ang_vel.x, dc_ang_vel.y, dc_ang_vel.z),
}
| 6,524 | Python | 42.21192 | 117 | 0.622777 |
KazWong/omniverse_sample/ov_sample/python_samples/jetracer/track_environment.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import carb
import omni
import random
from pxr import UsdGeom, Gf, Sdf, UsdPhysics
from omni.isaac.synthetic_utils import DomainRandomization
from gtc2020_track_utils import *
class Environment:
def __init__(self, omni_kit, z_height=0):
from omni.isaac.utils.scripts.nucleus_utils import find_nucleus_server
self.omni_kit = omni_kit
self.find_nucleus_server = find_nucleus_server
result, nucleus_server = self.find_nucleus_server()
if result is False:
carb.log_error(
"Could not find nucleus server with /Isaac folder. Please specify the correct nucleus server in apps/omni.isaac.sim.python.kit"
)
return
self.texture_list = [
nucleus_server + "/Isaac/Samples/DR/Materials/Textures/checkered.png",
nucleus_server + "/Isaac/Samples/DR/Materials/Textures/marble_tile.png",
nucleus_server + "/Isaac/Samples/DR/Materials/Textures/picture_a.png",
nucleus_server + "/Isaac/Samples/DR/Materials/Textures/picture_b.png",
nucleus_server + "/Isaac/Samples/DR/Materials/Textures/textured_wall.png",
nucleus_server + "/Isaac/Samples/DR/Materials/Textures/checkered_color.png",
]
self.prims = [] # list of spawned tiles
self.height = z_height # height of the ground tiles
self.state = None
# because the ground plane is what the robot drives on, we only do this once. We can then re-generate the road as often as we need without impacting physics
self.setup_physics()
contents = omni.client.list(nucleus_server + "/Isaac/Props/Sortbot_Housing/Materials/Textures/")[1]
for entry in contents:
self.texture_list.append(
nucleus_server + "/Isaac/Props/Sortbot_Housing/Materials/Textures/" + entry.relative_path
)
contents = omni.client.list(nucleus_server + "/Isaac/Props/YCB/Axis_Aligned/")[1]
names = []
loaded_paths = []
for entry in contents:
if not entry.flags & omni.client.ItemFlags.CAN_HAVE_CHILDREN:
names.append(nucleus_server + "/Isaac/Props/YCB/Axis_Aligned/" + entry.relative_path)
loaded_paths.append("/World/DR/mesh_component/mesh_" + entry.relative_path[0:-4])
print(loaded_paths)
self.omni_kit.create_prim("/World/Floor", "Xform")
stage = omni.usd.get_context().get_stage()
cubeGeom = UsdGeom.Cube.Define(stage, "/World/Floor/thefloor")
cubeGeom.CreateSizeAttr(300)
offset = Gf.Vec3f(75, 75, -150.1)
cubeGeom.AddTranslateOp().Set(offset)
prims = []
self.dr = DomainRandomization()
self.dr.toggle_manual_mode()
self.dr.create_mesh_comp(prim_paths=prims, mesh_list=names, mesh_range=[1, 1])
self.omni_kit.update(1 / 60.0)
print("waiting for materials to load...")
while self.omni_kit.is_loading():
self.omni_kit.update(1 / 60.0)
lights = []
for i in range(5):
prim_path = "/World/Lights/light_" + str(i)
self.omni_kit.create_prim(
prim_path,
"SphereLight",
translation=(0, 0, 200),
rotation=(0, 0, 0),
attributes={"radius": 10, "intensity": 1000.0, "color": (1.0, 1.0, 1.0)},
)
lights.append(prim_path)
self.dr.create_movement_comp(
prim_paths=loaded_paths, min_range=(0, 0, 15), max_range=(TRACK_DIMS[0], TRACK_DIMS[1], 15)
)
self.dr.create_rotation_comp(prim_paths=loaded_paths)
self.dr.create_visibility_comp(prim_paths=loaded_paths, num_visible_range=(15, 15))
self.dr.create_light_comp(light_paths=lights)
self.dr.create_movement_comp(
prim_paths=lights, min_range=(0, 0, 30), max_range=(TRACK_DIMS[0], TRACK_DIMS[1], 30)
)
self.dr.create_texture_comp(
prim_paths=["/World/Floor"], enable_project_uvw=True, texture_list=self.texture_list
)
def generate_lights(self):
# TODO: center this onto the track
prim_path = omni.usd.get_stage_next_free_path(self.omni_kit.get_stage(), "/World/Env/Light", False)
# self.prims.append(prim_path)
# LOCMOD revisit (don't add so it won't be removed on reset)
self.omni_kit.create_prim(
prim_path,
"RectLight",
translation=(75, 75, 100),
rotation=(0, 0, 0),
attributes={"height": 150, "width": 150, "intensity": 2000.0, "color": (1.0, 1.0, 1.0)},
)
def reset(self, shape):
# this deletes objects in self.prims
stage = omni.usd.get_context().get_stage()
for layer in stage.GetLayerStack():
edit = Sdf.BatchNamespaceEdit()
for path in self.prims:
prim_spec = layer.GetPrimAtPath(path)
if prim_spec is None:
continue
parent_spec = prim_spec.realNameParent
if parent_spec is not None:
edit.Add(path, Sdf.Path.emptyPath)
layer.Apply(edit)
self.prims = []
# self.pxrImageable.MakeInvisible()
# LOCMOD revisit
# self.generate_road(shape)
self.dr.randomize_once()
def generate_road(self, shape):
stage = self.omni_kit.get_stage()
self.add_track(stage)
def add_track(self, stage):
result, nucleus_server = self.find_nucleus_server()
if result is False:
carb.log_error("Could not find nucleus server with /Isaac folder")
return
path = nucleus_server + "/Isaac/Environments/Jetracer/jetracer_track_solid.usd"
prefix = "/World/Env/Track"
prim_path = omni.usd.get_stage_next_free_path(stage, prefix, False)
# self.prims.append(prim_path) #(don't add so the jetracer track won't be removed on reset)
track_prim = stage.DefinePrim(prim_path, "Xform")
track_prim.GetReferences().AddReference(path)
# xform = UsdGeom.Xformable(track_prim)
# xform_op = xform.AddXformOp(UsdGeom.XformOp.TypeTransform, UsdGeom.XformOp.PrecisionDouble, "")
# mat = Gf.Matrix4d().SetTranslate(location)
# mat.SetRotateOnly(Gf.Rotation(Gf.Vec3d(0, 0, 1), rotation))
# xform_op.Set(mat)
def setup_physics(self):
from pxr import PhysxSchema, PhysicsSchemaTools
stage = self.omni_kit.get_stage()
# Add physics scene
scene = UsdPhysics.Scene.Define(stage, Sdf.Path("/World/Env/PhysicsScene"))
# Set gravity vector
scene.CreateGravityDirectionAttr().Set(Gf.Vec3f(0.0, 0.0, -1.0))
scene.CreateGravityMagnitudeAttr().Set(981.0)
# Set physics scene to use cpu physics
PhysxSchema.PhysxSceneAPI.Apply(stage.GetPrimAtPath("/World/Env/PhysicsScene"))
physxSceneAPI = PhysxSchema.PhysxSceneAPI.Get(stage, "/World/Env/PhysicsScene")
physxSceneAPI.CreateEnableCCDAttr(True)
physxSceneAPI.CreateEnableStabilizationAttr(True)
physxSceneAPI.CreateEnableGPUDynamicsAttr(False)
physxSceneAPI.CreateBroadphaseTypeAttr("MBP")
physxSceneAPI.CreateSolverTypeAttr("TGS")
# Create physics plane for the ground
PhysicsSchemaTools.addGroundPlane(
stage, "/World/Env/GroundPlane", "Z", 100.0, Gf.Vec3f(0, 0, self.height), Gf.Vec3f(1.0)
)
# Hide the visual geometry
imageable = UsdGeom.Imageable(stage.GetPrimAtPath("/World/Env/GroundPlane/geom"))
if imageable:
imageable.MakeInvisible()
def get_valid_location(self):
# keep try until within the center track
dist = 1
x = 4
y = 4
while dist > LANE_WIDTH:
x = random.randint(0, TRACK_DIMS[0])
y = random.randint(0, TRACK_DIMS[1])
dist = center_line_dist(np.array([x, y]))
print("get valid location called", x, y)
return (x, y)
| 8,502 | Python | 41.515 | 164 | 0.620325 |
KazWong/omniverse_sample/ov_sample/python_samples/syntheticdata/online_generation/segmentation/dataset.py | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Dataset with online randomized scene generation for Instance Segmentation training.
Use OmniKit to generate a simple scene. At each iteration, the scene is populated by
adding assets from the user-specified classes with randomized pose and colour.
The camera position is also randomized before capturing groundtruth consisting of
an RGB rendered image, Tight 2D Bounding Boxes and Instance Segmentation masks.
"""
import os
import glob
import torch
import random
import numpy as np
import signal
import omni
from omni.isaac.python_app import OmniKitHelper
# to work around torch's SSL issue
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
# Setup default generation variables
# Value are (min, max) ranges
RANDOM_TRANSLATION_X = (-30.0, 30.0)
RANDOM_TRANSLATION_Z = (-30.0, 30.0)
RANDOM_ROTATION_Y = (0.0, 360.0)
SCALE = 20
CAMERA_DISTANCE = 300
BBOX_AREA_THRESH = 16
# Default rendering parameters
RENDER_CONFIG = {
"renderer": "PathTracing",
"samples_per_pixel_per_frame": 12,
"headless": False,
"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit',
}
class RandomObjects(torch.utils.data.IterableDataset):
"""Dataset of random ShapeNet objects.
Objects are randomly chosen from selected categories and are positioned, rotated and coloured
randomly in an empty room. RGB, BoundingBox2DTight and Instance Segmentation are captured by moving a
camera aimed at the centre of the scene which is positioned at random at a fixed distance from the centre.
This dataset is intended for use with ShapeNet but will function with any dataset of USD models
structured as `root/category/**/*.usd. One note is that this is designed for assets without materials
attached. This is to avoid requiring to compile MDLs and load textures while training.
Args:
categories (tuple of str): Tuple or list of categories. For ShapeNet, these will be the synset IDs.
max_asset_size (int): Maximum asset file size that will be loaded. This prevents out of memory errors
due to loading large meshes.
num_assets_min (int): Minimum number of assets populated in the scene.
num_assets_max (int): Maximum number of assets populated in the scene.
split (float): Fraction of the USDs found to use for training.
train (bool): If true, use the first training split and generate infinite random scenes.
"""
def __init__(
self, root, categories, max_asset_size=None, num_assets_min=3, num_assets_max=5, split=0.7, train=True
):
assert len(categories) > 1
assert (split > 0) and (split <= 1.0)
self.kit = OmniKitHelper(config=RENDER_CONFIG)
from omni.isaac.synthetic_utils import SyntheticDataHelper
from omni.isaac.synthetic_utils import shapenet
self.sd_helper = SyntheticDataHelper()
self.stage = self.kit.get_stage()
# If ShapeNet categories are specified with their names, convert to synset ID
# Remove this if using with a different dataset than ShapeNet
category_ids = [shapenet.LABEL_TO_SYNSET.get(c, c) for c in categories]
self.categories = category_ids
self.range_num_assets = (num_assets_min, max(num_assets_min, num_assets_max))
self.references = self._find_usd_assets(root, category_ids, max_asset_size, split, train)
self._setup_world()
self.cur_idx = 0
self.exiting = False
signal.signal(signal.SIGINT, self._handle_exit)
def _handle_exit(self, *args, **kwargs):
print("exiting dataset generation...")
self.exiting = True
def _setup_world(self):
from pxr import UsdGeom
"""Setup lights, walls, floor, ceiling and camera"""
# In a practical setting, the room parameters should attempt to match those of the
# target domain. Here, we insteady choose for simplicity.
self.kit.create_prim(
"/World/Room", "Sphere", attributes={"radius": 1e3, "primvars:displayColor": [(1.0, 1.0, 1.0)]}
)
self.kit.create_prim(
"/World/Ground",
"Cylinder",
translation=(0.0, -0.5, 0.0),
rotation=(90.0, 0.0, 0.0),
attributes={"height": 1, "radius": 1e4, "primvars:displayColor": [(1.0, 1.0, 1.0)]},
)
self.kit.create_prim(
"/World/Light1",
"SphereLight",
translation=(-450, 350, 350),
attributes={"radius": 100, "intensity": 30000.0, "color": (0.0, 0.365, 0.848)},
)
self.kit.create_prim(
"/World/Light2",
"SphereLight",
translation=(450, 350, 350),
attributes={"radius": 100, "intensity": 30000.0, "color": (1.0, 0.278, 0.0)},
)
self.kit.create_prim("/World/Asset", "Xform")
self.camera_rig = UsdGeom.Xformable(self.kit.create_prim("/World/CameraRig", "Xform"))
self.camera = self.kit.create_prim("/World/CameraRig/Camera", "Camera", translation=(0.0, 0.0, CAMERA_DISTANCE))
vpi = omni.kit.viewport.get_viewport_interface()
vpi.get_viewport_window().set_active_camera(str(self.camera.GetPath()))
self.viewport = omni.kit.viewport.get_default_viewport_window()
self.kit.update()
def _find_usd_assets(self, root, categories, max_asset_size, split, train=True):
"""Look for USD files under root/category for each category specified.
For each category, generate a list of all USD files found and select
assets up to split * len(num_assets) if `train=True`, otherwise select the
remainder.
"""
references = {}
for category in categories:
all_assets = glob.glob(os.path.join(root, category, "**/*.usd"), recursive=True)
# Filter out large files (which can prevent OOM errors during training)
if max_asset_size is None:
assets_filtered = all_assets
else:
assets_filtered = []
for a in all_assets:
if os.stat(a).st_size > max_asset_size * 1e6:
print(f"{a} skipped as it exceeded the max size {max_asset_size} MB.")
else:
assets_filtered.append(a)
num_assets = len(assets_filtered)
if num_assets == 0:
raise ValueError(f"No USDs found for category {category} under max size {max_asset_size} MB.")
if train:
references[category] = assets_filtered[: int(num_assets * split)]
else:
references[category] = assets_filtered[int(num_assets * split) :]
return references
def _add_preview_surface(self, prim, diffuse, roughness, metallic):
from pxr import UsdShade, Sdf
"""Add a preview surface material using the metallic workflow."""
path = f"{prim.GetPath()}/mat"
material = UsdShade.Material.Define(self.stage, path)
pbrShader = UsdShade.Shader.Define(self.stage, f"{path}/shader")
pbrShader.CreateIdAttr("UsdPreviewSurface")
pbrShader.CreateInput("diffuseColor", Sdf.ValueTypeNames.Float3).Set(diffuse)
pbrShader.CreateInput("roughness", Sdf.ValueTypeNames.Float).Set(roughness)
pbrShader.CreateInput("metallic", Sdf.ValueTypeNames.Float).Set(metallic)
material.CreateSurfaceOutput().ConnectToSource(pbrShader, "surface")
UsdShade.MaterialBindingAPI(prim).Bind(material)
def load_single_asset(self, ref, semantic_label, suffix=""):
from pxr import UsdGeom
"""Load a USD asset with random pose.
args
ref (str): Path to the USD that this prim will reference.
semantic_label (str): Semantic label.
suffix (str): String to add to the end of the prim's path.
"""
x = random.uniform(*RANDOM_TRANSLATION_X)
z = random.uniform(*RANDOM_TRANSLATION_Z)
rot_y = random.uniform(*RANDOM_ROTATION_Y)
asset = self.kit.create_prim(
f"/World/Asset/mesh{suffix}",
"Xform",
scale=(SCALE, SCALE, SCALE),
rotation=(0.0, rot_y, 0.0),
ref=ref,
semantic_label=semantic_label,
)
bound = UsdGeom.Mesh(asset).ComputeWorldBound(0.0, "default")
box_min_y = bound.GetBox().GetMin()[1]
UsdGeom.XformCommonAPI(asset).SetTranslate((x, -box_min_y, z))
return asset
def populate_scene(self):
"""Clear the scene and populate it with assets."""
self.stage.RemovePrim("/World/Asset")
self.assets = []
num_assets = random.randint(*self.range_num_assets)
for i in range(num_assets):
category = random.choice(list(self.references.keys()))
ref = random.choice(self.references[category])
self.assets.append(self.load_single_asset(ref, category, i))
def randomize_asset_material(self):
"""Ranomize asset material properties"""
for asset in self.assets:
colour = (random.random(), random.random(), random.random())
# Here we choose not to have materials unrealistically rough or reflective.
roughness = random.uniform(0.1, 0.9)
# Here we choose to have more metallic than non-metallic objects.
metallic = random.choices([0.0, 1.0], weights=(0.8, 0.2))[0]
self._add_preview_surface(asset, colour, roughness, metallic)
def randomize_camera(self):
"""Randomize the camera position."""
# By simply rotating a camera "rig" instead repositioning the camera
# itself, we greatly simplify our job.
# Clear previous transforms
self.camera_rig.ClearXformOpOrder()
# Change azimuth angle
self.camera_rig.AddRotateYOp().Set(random.random() * 360)
# Change elevation angle
self.camera_rig.AddRotateXOp().Set(random.random() * -90)
def __iter__(self):
return self
def __next__(self):
# Generate a new scene
self.populate_scene()
self.randomize_camera()
self.randomize_asset_material()
# step once and then wait for materials to load
self.kit.update()
print("waiting for materials to load...")
while self.kit.is_loading():
self.kit.update()
print("done")
self.kit.update()
# Collect Groundtruth
gt = self.sd_helper.get_groundtruth(["rgb", "boundingBox2DTight", "instanceSegmentation"], self.viewport)
# RGB
# Drop alpha channel
image = gt["rgb"][..., :3]
# Cast to tensor if numpy array
if isinstance(gt["rgb"], np.ndarray):
image = torch.tensor(image, dtype=torch.float, device="cuda")
# Normalize between 0. and 1. and change order to channel-first.
image = image.float() / 255.0
image = image.permute(2, 0, 1)
# Bounding Box
gt_bbox = gt["boundingBox2DTight"]
# Create mapping from categories to index
mapping = {cat: i + 1 for i, cat in enumerate(self.categories)}
bboxes = torch.tensor(gt_bbox[["x_min", "y_min", "x_max", "y_max"]].tolist())
# For each bounding box, map semantic label to label index
labels = torch.LongTensor([mapping[bb["semanticLabel"]] for bb in gt_bbox])
# Calculate bounding box area for each area
areas = (bboxes[:, 2] - bboxes[:, 0]) * (bboxes[:, 3] - bboxes[:, 1])
# Idenfiy invalid bounding boxes to filter final output
valid_areas = (areas > 0.0) * (areas < (image.shape[1] * image.shape[2]))
# Instance Segmentation
instance_data, instance_mappings = gt["instanceSegmentation"][0], gt["instanceSegmentation"][1]
instance_list = [im[0] for im in gt_bbox]
masks = np.zeros((len(instance_list), *instance_data.shape), dtype=np.bool)
for i, instances in enumerate(instance_list):
masks[i] = np.isin(instance_data, instances)
if isinstance(masks, np.ndarray):
masks = torch.tensor(masks, device="cuda")
target = {
"boxes": bboxes[valid_areas],
"labels": labels[valid_areas],
"masks": masks[valid_areas],
"image_id": torch.LongTensor([self.cur_idx]),
"area": areas[valid_areas],
"iscrowd": torch.BoolTensor([False] * len(bboxes[valid_areas])), # Assume no crowds
}
self.cur_idx += 1
return image, target
if __name__ == "__main__":
"Typical usage"
import argparse
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser("Dataset test")
parser.add_argument("--categories", type=str, nargs="+", required=True, help="List of object classes to use")
parser.add_argument(
"--max-asset-size",
type=float,
default=10.0,
help="Maximum asset size to use in MB. Larger assets will be skipped.",
)
parser.add_argument(
"--root",
type=str,
default=None,
help="Root directory containing USDs. If not specified, use {SHAPENET_LOCAL_DIR}_nomat as root.",
)
args = parser.parse_args()
# If root is not specified use the environment variable SHAPENET_LOCAL_DIR with the _nomat suffix as root
if args.root is None:
args.root = f"{os.path.abspath(os.environ['SHAPENET_LOCAL_DIR'])}_nomat"
dataset = RandomObjects(args.root, args.categories, max_asset_size=args.max_asset_size)
from omni.isaac.synthetic_utils import visualization as vis
from omni.isaac.synthetic_utils import shapenet
categories = [shapenet.LABEL_TO_SYNSET.get(c, c) for c in args.categories]
# Iterate through dataset and visualize the output
plt.ion()
_, axes = plt.subplots(1, 2, figsize=(10, 5))
plt.tight_layout()
for image, target in dataset:
for ax in axes:
ax.clear()
ax.axis("off")
np_image = image.permute(1, 2, 0).cpu().numpy()
axes[0].imshow(np_image)
num_instances = len(target["boxes"])
colours = vis.random_colours(num_instances, enable_random=False)
overlay = np.zeros_like(np_image)
for mask, colour in zip(target["masks"].cpu().numpy(), colours):
overlay[mask, :3] = colour
axes[1].imshow(overlay)
mapping = {i + 1: cat for i, cat in enumerate(categories)}
labels = [shapenet.SYNSET_TO_LABEL[mapping[label.item()]] for label in target["labels"]]
vis.plot_boxes(ax, target["boxes"].tolist(), labels=labels, colours=colours)
plt.draw()
plt.savefig("dataset.png")
if dataset.exiting:
break
# cleanup
dataset.kit.shutdown()
| 15,242 | Python | 40.308943 | 120 | 0.63056 |
KazWong/omniverse_sample/ov_sample/python_samples/syntheticdata/online_generation/segmentation/train.py | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Instance Segmentation Training Demonstration
Use a PyTorch dataloader together with OmniKit to generate scenes and groundtruth to
train a [Mask-RCNN](https://arxiv.org/abs/1703.06870) model.
"""
import os
import torch
from torch.utils.data import DataLoader
import torchvision
import matplotlib.pyplot as plt
import numpy as np
import signal
from dataset import RandomObjects
def main(args):
device = "cuda"
# Setup data
train_set = RandomObjects(
args.root, args.categories, num_assets_min=3, num_assets_max=5, max_asset_size=args.max_asset_size
)
train_loader = DataLoader(train_set, batch_size=2, collate_fn=lambda x: tuple(zip(*x)))
def handle_exit(self, *args, **kwargs):
print("exiting dataset generation...")
train_set.exiting = True
signal.signal(signal.SIGINT, handle_exit)
from omni.isaac.synthetic_utils import visualization as vis
from omni.isaac.synthetic_utils import shapenet
# Setup Model
model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False, num_classes=1 + len(args.categories))
model = model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
if args.visualize:
plt.ion()
fig, axes = plt.subplots(1, 2, figsize=(14, 7))
for i, train_batch in enumerate(train_loader):
if i > args.max_iters or train_set.exiting:
print("Exiting ...")
train_set.kit.shutdown()
break
model.train()
images, targets = train_batch
images = [i.to(device) for i in images]
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
loss_dict = model(images, targets)
loss = sum(loss for loss in loss_dict.values())
print(f"ITER {i} | {loss:.6f}")
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % 10 == 0:
model.eval()
with torch.no_grad():
predictions = model(images[:1])
if args.visualize:
idx = 0
score_thresh = 0.5
mask_thresh = 0.5
pred = predictions[idx]
np_image = images[idx].permute(1, 2, 0).cpu().numpy()
for ax in axes:
fig.suptitle(f"Iteration {i:05}", fontsize=14)
ax.cla()
ax.axis("off")
ax.imshow(np_image)
axes[0].set_title("Input")
axes[1].set_title("Input + Predictions")
score_filter = [i for i in range(len(pred["scores"])) if pred["scores"][i] > score_thresh]
num_instances = len(score_filter)
colours = vis.random_colours(num_instances, enable_random=False)
overlay = np.zeros_like(np_image)
for mask, colour in zip(pred["masks"], colours):
overlay[mask.squeeze().cpu().numpy() > mask_thresh, :3] = colour
axes[1].imshow(overlay, alpha=0.5)
# If ShapeNet categories are specified with their names, convert to synset ID
# Remove this if using with a different dataset than ShapeNet
args.categories = [shapenet.LABEL_TO_SYNSET.get(c, c) for c in args.categories]
mapping = {i + 1: cat for i, cat in enumerate(args.categories)}
labels = [shapenet.SYNSET_TO_LABEL[mapping[label.item()]] for label in pred["labels"]]
vis.plot_boxes(axes[1], pred["boxes"], labels=labels, colours=colours)
plt.draw()
plt.savefig("train.png")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser("Dataset test")
parser.add_argument(
"--root",
type=str,
default=None,
help="Root directory containing ShapeNet USDs. If not specified, use {SHAPENET_LOCAL_DIR}_nomat as root.",
)
parser.add_argument(
"--categories", type=str, nargs="+", required=True, help="List of ShapeNet categories to use (space seperated)."
)
parser.add_argument(
"--max-asset-size",
type=float,
default=10.0,
help="Maximum asset size to use in MB. Larger assets will be skipped.",
)
parser.add_argument("-lr", "--learning_rate", type=float, default=1e-4, help="Learning rate")
parser.add_argument("--max-iters", type=float, default=1000, help="Number of training iterations.")
parser.add_argument("--visualize", action="store_true", help="Visualize predicted masks during training.")
args = parser.parse_args()
# If root is not specified use the environment variable SHAPENET_LOCAL_DIR with the _nomat suffix as root
if args.root is None:
args.root = f"{os.path.abspath(os.environ['SHAPENET_LOCAL_DIR'])}_nomat"
main(args)
| 5,320 | Python | 35.951389 | 120 | 0.617105 |
KazWong/omniverse_sample/ov_sample/python_samples/syntheticdata/basic/visualize_groundtruth.py | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Demonstration of using OmniKit to generate a scene, collect groundtruth and visualize
the results.
"""
import copy
import os
import omni
import random
import numpy as np
from omni.isaac.python_app import OmniKitHelper
import matplotlib.pyplot as plt
TRANSLATION_RANGE = 300.0
SCALE = 50.0
def main():
kit = OmniKitHelper(
{"renderer": "RayTracedLighting", "experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit'}
)
from pxr import UsdGeom, Semantics
from omni.isaac.synthetic_utils import SyntheticDataHelper
sd_helper = SyntheticDataHelper()
from omni.syntheticdata import visualize, helpers
# SCENE SETUP
# Get the current stage
stage = kit.get_stage()
# Add a distant light
stage.DefinePrim("/World/Light", "DistantLight")
# Create 10 randomly positioned and coloured spheres and cube
# We will assign each a semantic label based on their shape (sphere/cube)
for i in range(10):
prim_type = random.choice(["Cube", "Sphere"])
prim = stage.DefinePrim(f"/World/cube{i}", prim_type)
translation = np.random.rand(3) * TRANSLATION_RANGE
UsdGeom.XformCommonAPI(prim).SetTranslate(translation.tolist())
UsdGeom.XformCommonAPI(prim).SetScale((SCALE, SCALE, SCALE))
prim.GetAttribute("primvars:displayColor").Set([np.random.rand(3).tolist()])
# Add semantic label based on prim type
sem = Semantics.SemanticsAPI.Apply(prim, "Semantics")
sem.CreateSemanticTypeAttr()
sem.CreateSemanticDataAttr()
sem.GetSemanticTypeAttr().Set("class")
sem.GetSemanticDataAttr().Set(prim_type)
# Get groundtruth
kit.update()
viewport = omni.kit.viewport.get_default_viewport_window()
gt = sd_helper.get_groundtruth(
[
"rgb",
"depth",
"boundingBox2DTight",
"boundingBox2DLoose",
"instanceSegmentation",
"semanticSegmentation",
"boundingBox3D",
],
viewport,
)
# GROUNDTRUTH VISUALIZATION
# Setup a figure
_, axes = plt.subplots(2, 4, figsize=(20, 7))
axes = axes.flat
for ax in axes:
ax.axis("off")
# RGB
axes[0].set_title("RGB")
for ax in axes[:-1]:
ax.imshow(gt["rgb"])
# DEPTH
axes[1].set_title("Depth")
depth_data = np.clip(gt["depth"], 0, 255)
axes[1].imshow(visualize.colorize_depth(depth_data.squeeze()))
# BBOX2D TIGHT
axes[2].set_title("BBox 2D Tight")
rgb_data = copy.deepcopy(gt["rgb"])
axes[2].imshow(visualize.colorize_bboxes(gt["boundingBox2DTight"], rgb_data))
# BBOX2D LOOSE
axes[3].set_title("BBox 2D Loose")
rgb_data = copy.deepcopy(gt["rgb"])
axes[3].imshow(visualize.colorize_bboxes(gt["boundingBox2DLoose"], rgb_data))
# INSTANCE SEGMENTATION
axes[4].set_title("Instance Segmentation")
instance_seg = gt["instanceSegmentation"][0]
instance_rgb = visualize.colorize_segmentation(instance_seg)
axes[4].imshow(instance_rgb, alpha=0.7)
# SEMANTIC SEGMENTATION
axes[5].set_title("Semantic Segmentation")
semantic_seg = gt["semanticSegmentation"]
semantic_rgb = visualize.colorize_segmentation(semantic_seg)
axes[5].imshow(semantic_rgb, alpha=0.7)
# BBOX 3D
axes[6].set_title("BBox 3D")
bbox_3d_data = gt["boundingBox3D"]
bboxes_3d_corners = bbox_3d_data["corners"]
projected_corners = helpers.world_to_image(bboxes_3d_corners.reshape(-1, 3), viewport)
projected_corners = projected_corners.reshape(-1, 8, 3)
rgb_data = copy.deepcopy(gt["rgb"])
bboxes3D_rgb = visualize.colorize_bboxes_3d(projected_corners, rgb_data)
axes[6].imshow(bboxes3D_rgb)
# Save figure
print("saving figure to: ", os.getcwd() + "/visualize_groundtruth.png")
plt.savefig("visualize_groundtruth.png")
# cleanup
kit.shutdown()
if __name__ == "__main__":
main()
| 4,370 | Python | 31.139706 | 110 | 0.670252 |
KazWong/omniverse_sample/ov_sample/python_samples/syntheticdata/offline_generation/generator_stereo.py | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Generate offline synthetic dataset using two cameras
"""
import asyncio
import copy
import numpy as np
import os
import random
import torch
import signal
import carb
import omni
from omni.isaac.python_app import OmniKitHelper
# Default rendering parameters
RENDER_CONFIG = {
"renderer": "RayTracedLighting",
"samples_per_pixel_per_frame": 12,
"headless": False,
"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit',
}
class RandomScenario(torch.utils.data.IterableDataset):
def __init__(self, scenario_path, max_queue_size):
self.kit = OmniKitHelper(config=RENDER_CONFIG)
from omni.isaac.synthetic_utils import SyntheticDataHelper, DataWriter, DomainRandomization
self.sd_helper = SyntheticDataHelper()
self.dr_helper = DomainRandomization()
self.writer_helper = DataWriter
self.dr_helper.toggle_manual_mode()
self.stage = self.kit.get_stage()
self.result = True
from omni.isaac.utils.scripts.nucleus_utils import find_nucleus_server
if scenario_path is None:
self.result, nucleus_server = find_nucleus_server()
if self.result is False:
carb.log_error("Could not find nucleus server with /Isaac folder")
return
self.asset_path = nucleus_server + "/Isaac"
scenario_path = self.asset_path + "/Samples/Synthetic_Data/Stage/warehouse_with_sensors.usd"
self.scenario_path = scenario_path
self.max_queue_size = max_queue_size
self.data_writer = None
self._setup_world(scenario_path)
self.cur_idx = 0
self.exiting = False
self._viewport = omni.kit.viewport.get_viewport_interface()
self._sensor_settings = {}
signal.signal(signal.SIGINT, self._handle_exit)
def _handle_exit(self, *args, **kwargs):
print("exiting dataset generation...")
self.exiting = True
def add_stereo_setup(self):
from pxr import Gf, UsdGeom
stage = omni.usd.get_context().get_stage()
# Create two camera
center_point = Gf.Vec3d(0, 0, 200)
stereoPrimPath = "/World/Stereo"
leftCameraPrimPath = stereoPrimPath + "/LeftCamera"
rightCameraPrimPath = stereoPrimPath + "/RightCamera"
self.stereoPrim = stage.DefinePrim(stereoPrimPath, "Xform")
UsdGeom.XformCommonAPI(self.stereoPrim).SetTranslate(center_point)
leftCameraPrim = stage.DefinePrim(leftCameraPrimPath, "Camera")
UsdGeom.XformCommonAPI(leftCameraPrim).SetTranslate(Gf.Vec3d(0, -10, 0))
UsdGeom.XformCommonAPI(leftCameraPrim).SetRotate(Gf.Vec3f(90, 0, 90))
rightCameraPrim = stage.DefinePrim(rightCameraPrimPath, "Camera")
UsdGeom.XformCommonAPI(rightCameraPrim).SetTranslate(Gf.Vec3d(0, 10, 0))
UsdGeom.XformCommonAPI(rightCameraPrim).SetRotate(Gf.Vec3f(90, 0, 90))
# Need to set this before setting viewport window size
carb.settings.acquire_settings_interface().set_int("/app/renderer/resolution/width", -1)
carb.settings.acquire_settings_interface().set_int("/app/renderer/resolution/height", -1)
# Get existing viewport, set active camera as left camera
viewport_handle_1 = omni.kit.viewport.get_viewport_interface().get_instance("Viewport")
viewport_window_1 = omni.kit.viewport.get_viewport_interface().get_viewport_window(viewport_handle_1)
viewport_window_1.set_texture_resolution(1280, 720)
viewport_window_1.set_active_camera(leftCameraPrimPath)
# Create new viewport, set active camera as right camera
viewport_handle_2 = omni.kit.viewport.get_viewport_interface().create_instance()
viewport_window_2 = omni.kit.viewport.get_viewport_interface().get_viewport_window(viewport_handle_2)
viewport_window_2.set_active_camera("/World/Stereo/RightCamera")
viewport_window_2.set_texture_resolution(1280, 720)
viewport_window_2.set_window_pos(720, 0)
viewport_window_2.set_window_size(720, 890)
# Setup stereo camera movement randomization
radius = 100
target_points_list = []
for theta in range(200, 300):
th = theta * np.pi / 180
x = radius * np.cos(th) + center_point[0]
y = radius * np.sin(th) + center_point[1]
target_points_list.append(Gf.Vec3f(x, y, center_point[2]))
lookat_target_points_list = [a for a in target_points_list[1:]]
lookat_target_points_list.append(target_points_list[0])
result, prim = omni.kit.commands.execute(
"CreateTransformComponentCommand",
prim_paths=[stereoPrimPath],
target_points=target_points_list,
lookat_target_points=lookat_target_points_list,
enable_sequential_behavior=True,
)
async def load_stage(self, path):
await omni.usd.get_context().open_stage_async(path)
def _setup_world(self, scenario_path):
# Load scenario
setup_task = asyncio.ensure_future(self.load_stage(scenario_path))
while not setup_task.done():
self.kit.update()
self.add_stereo_setup()
self.kit.update()
self.kit.setup_renderer()
self.kit.update()
def __iter__(self):
return self
def __next__(self):
# step once and then wait for materials to load
self.dr_helper.randomize_once()
self.kit.update()
while self.kit.is_loading():
self.kit.update()
# Enable/disable sensor output and their format
sensor_settings_viewport_1 = {
"rgb": {"enabled": True},
"depth": {"enabled": True, "colorize": True, "npy": True},
"instance": {"enabled": True, "colorize": True, "npy": True},
"semantic": {"enabled": True, "colorize": True, "npy": True},
"bbox_2d_tight": {"enabled": True, "colorize": True, "npy": True},
"bbox_2d_loose": {"enabled": True, "colorize": True, "npy": True},
}
sensor_settings_viewport_2 = {
"rgb": {"enabled": True},
"depth": {"enabled": True, "colorize": True, "npy": True},
"instance": {"enabled": True, "colorize": True, "npy": True},
"semantic": {"enabled": True, "colorize": True, "npy": True},
"bbox_2d_tight": {"enabled": True, "colorize": True, "npy": True},
"bbox_2d_loose": {"enabled": True, "colorize": True, "npy": True},
}
viewports = self._viewport.get_instance_list()
self._viewport_names = [self._viewport.get_viewport_window_name(vp) for vp in viewports]
# Make sure two viewports are initialized
if len(self._viewport_names) != 2:
return
self._sensor_settings[self._viewport_names[0]] = copy.deepcopy(sensor_settings_viewport_1)
self._sensor_settings[self._viewport_names[1]] = copy.deepcopy(sensor_settings_viewport_2)
self._num_worker_threads = 4
self._output_folder = os.getcwd() + "/output"
# Write to disk
if self.data_writer is None:
self.data_writer = self.writer_helper(
self._output_folder, self._num_worker_threads, self.max_queue_size, self._sensor_settings
)
self.data_writer.start_threads()
image = None
for viewport_name in self._viewport_names:
groundtruth = {
"METADATA": {
"image_id": str(self.cur_idx),
"viewport_name": viewport_name,
"DEPTH": {},
"INSTANCE": {},
"SEMANTIC": {},
"BBOX2DTIGHT": {},
"BBOX2DLOOSE": {},
},
"DATA": {},
}
gt_list = []
if self._sensor_settings[viewport_name]["rgb"]["enabled"]:
gt_list.append("rgb")
if self._sensor_settings[viewport_name]["depth"]["enabled"]:
gt_list.append("depthLinear")
if self._sensor_settings[viewport_name]["bbox_2d_tight"]["enabled"]:
gt_list.append("boundingBox2DTight")
if self._sensor_settings[viewport_name]["bbox_2d_loose"]["enabled"]:
gt_list.append("boundingBox2DLoose")
if self._sensor_settings[viewport_name]["instance"]["enabled"]:
gt_list.append("instanceSegmentation")
if self._sensor_settings[viewport_name]["semantic"]["enabled"]:
gt_list.append("semanticSegmentation")
# Render new frame
self.kit.update()
# Collect Groundtruth
viewport = self._viewport.get_viewport_window(self._viewport.get_instance(viewport_name))
gt = self.sd_helper.get_groundtruth(gt_list, viewport)
# RGB
image = gt["rgb"]
if self._sensor_settings[viewport_name]["rgb"]["enabled"] and gt["state"]["rgb"]:
groundtruth["DATA"]["RGB"] = gt["rgb"]
# Depth
if self._sensor_settings[viewport_name]["depth"]["enabled"] and gt["state"]["depthLinear"]:
groundtruth["DATA"]["DEPTH"] = gt["depthLinear"].squeeze()
groundtruth["METADATA"]["DEPTH"]["COLORIZE"] = self._sensor_settings[viewport_name]["depth"]["colorize"]
groundtruth["METADATA"]["DEPTH"]["NPY"] = self._sensor_settings[viewport_name]["depth"]["npy"]
# Instance Segmentation
if self._sensor_settings[viewport_name]["instance"]["enabled"] and gt["state"]["instanceSegmentation"]:
instance_data = gt["instanceSegmentation"][0]
groundtruth["DATA"]["INSTANCE"] = instance_data
groundtruth["METADATA"]["INSTANCE"]["WIDTH"] = instance_data.shape[1]
groundtruth["METADATA"]["INSTANCE"]["HEIGHT"] = instance_data.shape[0]
groundtruth["METADATA"]["INSTANCE"]["COLORIZE"] = self._sensor_settings[viewport_name]["instance"][
"colorize"
]
groundtruth["METADATA"]["INSTANCE"]["NPY"] = self._sensor_settings[viewport_name]["instance"]["npy"]
# Semantic Segmentation
if self._sensor_settings[viewport_name]["semantic"]["enabled"] and gt["state"]["semanticSegmentation"]:
semantic_data = gt["semanticSegmentation"]
semantic_data[semantic_data == 65535] = 0 # deals with invalid semantic id
groundtruth["DATA"]["SEMANTIC"] = semantic_data
groundtruth["METADATA"]["SEMANTIC"]["WIDTH"] = semantic_data.shape[1]
groundtruth["METADATA"]["SEMANTIC"]["HEIGHT"] = semantic_data.shape[0]
groundtruth["METADATA"]["SEMANTIC"]["COLORIZE"] = self._sensor_settings[viewport_name]["semantic"][
"colorize"
]
groundtruth["METADATA"]["SEMANTIC"]["NPY"] = self._sensor_settings[viewport_name]["semantic"]["npy"]
# 2D Tight BBox
if self._sensor_settings[viewport_name]["bbox_2d_tight"]["enabled"] and gt["state"]["boundingBox2DTight"]:
groundtruth["DATA"]["BBOX2DTIGHT"] = gt["boundingBox2DTight"]
groundtruth["METADATA"]["BBOX2DTIGHT"]["COLORIZE"] = self._sensor_settings[viewport_name][
"bbox_2d_tight"
]["colorize"]
groundtruth["METADATA"]["BBOX2DTIGHT"]["NPY"] = self._sensor_settings[viewport_name]["bbox_2d_tight"][
"npy"
]
# 2D Loose BBox
if self._sensor_settings[viewport_name]["bbox_2d_loose"]["enabled"] and gt["state"]["boundingBox2DLoose"]:
groundtruth["DATA"]["BBOX2DLOOSE"] = gt["boundingBox2DLoose"]
groundtruth["METADATA"]["BBOX2DLOOSE"]["COLORIZE"] = self._sensor_settings[viewport_name][
"bbox_2d_loose"
]["colorize"]
groundtruth["METADATA"]["BBOX2DLOOSE"]["NPY"] = self._sensor_settings[viewport_name]["bbox_2d_loose"][
"npy"
]
self.data_writer.q.put(groundtruth)
self.cur_idx += 1
return image
if __name__ == "__main__":
"Typical usage"
import argparse
parser = argparse.ArgumentParser("Stereo dataset generator")
parser.add_argument("--scenario", type=str, help="Scenario to load from omniverse server")
parser.add_argument("--num_frames", type=int, default=30, help="Number of frames to record")
parser.add_argument("--max_queue_size", type=int, default=500, help="Max size of queue to store and process data")
args = parser.parse_args()
dataset = RandomScenario(args.scenario, args.max_queue_size)
if dataset.result:
# Iterate through dataset and visualize the output
print("Loading materials. Will generate data soon...")
for image in dataset:
print("ID: ", dataset.cur_idx)
if dataset.cur_idx == args.num_frames:
break
if dataset.exiting:
break
# cleanup
dataset.kit.shutdown()
| 13,669 | Python | 44.415282 | 120 | 0.603117 |
KazWong/omniverse_sample/ov_sample/python_samples/syntheticdata/offline_generation/generator.py | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Generate offline synthetic dataset
"""
import asyncio
import copy
import os
import torch
import signal
import carb
import omni
from omni.isaac.python_app import OmniKitHelper
# Default rendering parameters
RENDER_CONFIG = {
"renderer": "RayTracedLighting",
"samples_per_pixel_per_frame": 12,
"headless": True,
"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit',
"width": 1024,
"height": 800,
}
class RandomScenario(torch.utils.data.IterableDataset):
def __init__(self, scenario_path, writer_mode, data_dir, max_queue_size, train_size, classes):
self.kit = OmniKitHelper(config=RENDER_CONFIG)
from omni.isaac.synthetic_utils import SyntheticDataHelper, DataWriter, KittiWriter, DomainRandomization
self.sd_helper = SyntheticDataHelper()
self.dr_helper = DomainRandomization()
self.writer_mode = writer_mode
self.writer_helper = KittiWriter if writer_mode == "kitti" else DataWriter
self.dr_helper.toggle_manual_mode()
self.stage = self.kit.get_stage()
self.result = True
from omni.isaac.utils.scripts.nucleus_utils import find_nucleus_server
if scenario_path is None:
self.result, nucleus_server = find_nucleus_server()
if self.result is False:
carb.log_error("Could not find nucleus server with /Isaac folder")
return
self.asset_path = nucleus_server + "/Isaac"
scenario_path = self.asset_path + "/Samples/Synthetic_Data/Stage/warehouse_with_sensors.usd"
self.scenario_path = scenario_path
self.max_queue_size = max_queue_size
self.data_writer = None
self.data_dir = data_dir
self.train_size = train_size
self.classes = classes
self._setup_world(scenario_path)
self.cur_idx = 0
self.exiting = False
self._sensor_settings = {}
signal.signal(signal.SIGINT, self._handle_exit)
def _handle_exit(self, *args, **kwargs):
print("exiting dataset generation...")
self.exiting = True
async def load_stage(self, path):
await omni.usd.get_context().open_stage_async(path)
def _setup_world(self, scenario_path):
# Load scenario
setup_task = asyncio.ensure_future(self.load_stage(scenario_path))
while not setup_task.done():
self.kit.update()
self.kit.setup_renderer()
self.kit.update()
def __iter__(self):
return self
def __next__(self):
# step once and then wait for materials to load
self.dr_helper.randomize_once()
self.kit.update()
while self.kit.is_loading():
self.kit.update()
# Enable/disable sensor output and their format
self._enable_rgb = True
self._enable_depth = True
self._enable_instance = True
self._enable_semantic = True
self._enable_bbox_2d_tight = True
self._enable_bbox_2d_loose = True
self._enable_depth_colorize = True
self._enable_instance_colorize = True
self._enable_semantic_colorize = True
self._enable_bbox_2d_tight_colorize = True
self._enable_bbox_2d_loose_colorize = True
self._enable_depth_npy = True
self._enable_instance_npy = True
self._enable_semantic_npy = True
self._enable_bbox_2d_tight_npy = True
self._enable_bbox_2d_loose_npy = True
self._num_worker_threads = 4
self._output_folder = self.data_dir
sensor_settings_viewport = {
"rgb": {"enabled": self._enable_rgb},
"depth": {
"enabled": self._enable_depth,
"colorize": self._enable_depth_colorize,
"npy": self._enable_depth_npy,
},
"instance": {
"enabled": self._enable_instance,
"colorize": self._enable_instance_colorize,
"npy": self._enable_instance_npy,
},
"semantic": {
"enabled": self._enable_semantic,
"colorize": self._enable_semantic_colorize,
"npy": self._enable_semantic_npy,
},
"bbox_2d_tight": {
"enabled": self._enable_bbox_2d_tight,
"colorize": self._enable_bbox_2d_tight_colorize,
"npy": self._enable_bbox_2d_tight_npy,
},
"bbox_2d_loose": {
"enabled": self._enable_bbox_2d_loose,
"colorize": self._enable_bbox_2d_loose_colorize,
"npy": self._enable_bbox_2d_loose_npy,
},
}
self._sensor_settings["Viewport"] = copy.deepcopy(sensor_settings_viewport)
# Write to disk
if self.data_writer is None:
print(f"Writing data to {self._output_folder}")
if self.writer_mode == "kitti":
self.data_writer = self.writer_helper(
self._output_folder, self._num_worker_threads, self.max_queue_size, self.train_size, self.classes
)
else:
self.data_writer = self.writer_helper(
self._output_folder, self._num_worker_threads, self.max_queue_size, self._sensor_settings
)
self.data_writer.start_threads()
viewport_iface = omni.kit.viewport.get_viewport_interface()
viewport_name = "Viewport"
viewport = viewport_iface.get_viewport_window(viewport_iface.get_instance(viewport_name))
groundtruth = {
"METADATA": {
"image_id": str(self.cur_idx),
"viewport_name": viewport_name,
"DEPTH": {},
"INSTANCE": {},
"SEMANTIC": {},
"BBOX2DTIGHT": {},
"BBOX2DLOOSE": {},
},
"DATA": {},
}
gt_list = []
if self._enable_rgb:
gt_list.append("rgb")
if self._enable_depth:
gt_list.append("depthLinear")
if self._enable_bbox_2d_tight:
gt_list.append("boundingBox2DTight")
if self._enable_bbox_2d_loose:
gt_list.append("boundingBox2DLoose")
if self._enable_instance:
gt_list.append("instanceSegmentation")
if self._enable_semantic:
gt_list.append("semanticSegmentation")
# Render new frame
self.kit.update()
# Collect Groundtruth
gt = self.sd_helper.get_groundtruth(gt_list, viewport)
# RGB
image = gt["rgb"]
if self._enable_rgb:
groundtruth["DATA"]["RGB"] = gt["rgb"]
# Depth
if self._enable_depth:
groundtruth["DATA"]["DEPTH"] = gt["depthLinear"].squeeze()
groundtruth["METADATA"]["DEPTH"]["COLORIZE"] = self._enable_depth_colorize
groundtruth["METADATA"]["DEPTH"]["NPY"] = self._enable_depth_npy
# Instance Segmentation
if self._enable_instance:
instance_data = gt["instanceSegmentation"][0]
instance_data_shape = instance_data.shape
groundtruth["DATA"]["INSTANCE"] = instance_data
groundtruth["METADATA"]["INSTANCE"]["WIDTH"] = instance_data_shape[1]
groundtruth["METADATA"]["INSTANCE"]["HEIGHT"] = instance_data_shape[0]
groundtruth["METADATA"]["INSTANCE"]["COLORIZE"] = self._enable_instance_colorize
groundtruth["METADATA"]["INSTANCE"]["NPY"] = self._enable_instance_npy
# Semantic Segmentation
if self._enable_semantic:
semantic_data = gt["semanticSegmentation"]
semantic_data_shape = semantic_data.shape
groundtruth["DATA"]["SEMANTIC"] = semantic_data
groundtruth["METADATA"]["SEMANTIC"]["WIDTH"] = semantic_data_shape[1]
groundtruth["METADATA"]["SEMANTIC"]["HEIGHT"] = semantic_data_shape[0]
groundtruth["METADATA"]["SEMANTIC"]["COLORIZE"] = self._enable_semantic_colorize
groundtruth["METADATA"]["SEMANTIC"]["NPY"] = self._enable_semantic_npy
# 2D Tight BBox
if self._enable_bbox_2d_tight:
groundtruth["DATA"]["BBOX2DTIGHT"] = gt["boundingBox2DTight"]
groundtruth["METADATA"]["BBOX2DTIGHT"]["COLORIZE"] = self._enable_bbox_2d_tight_colorize
groundtruth["METADATA"]["BBOX2DTIGHT"]["NPY"] = self._enable_bbox_2d_tight_npy
# 2D Loose BBox
if self._enable_bbox_2d_loose:
groundtruth["DATA"]["BBOX2DLOOSE"] = gt["boundingBox2DLoose"]
groundtruth["METADATA"]["BBOX2DLOOSE"]["COLORIZE"] = self._enable_bbox_2d_loose_colorize
groundtruth["METADATA"]["BBOX2DLOOSE"]["NPY"] = self._enable_bbox_2d_loose_npy
groundtruth["METADATA"]["BBOX2DLOOSE"]["WIDTH"] = RENDER_CONFIG["width"]
groundtruth["METADATA"]["BBOX2DLOOSE"]["HEIGHT"] = RENDER_CONFIG["height"]
self.data_writer.q.put(groundtruth)
self.cur_idx += 1
return image
if __name__ == "__main__":
"Typical usage"
import argparse
parser = argparse.ArgumentParser("Dataset generator")
parser.add_argument("--scenario", type=str, help="Scenario to load from omniverse server")
parser.add_argument("--num_frames", type=int, default=10, help="Number of frames to record")
parser.add_argument("--writer_mode", type=str, default="npy", help="Specify output format - npy or kitti")
parser.add_argument(
"--data_dir", type=str, default=os.getcwd() + "/output", help="Location where data will be output"
)
parser.add_argument("--max_queue_size", type=int, default=500, help="Max size of queue to store and process data")
parser.add_argument(
"--train_size", type=int, default=8, help="Number of frames for training set, works when writer_mode is kitti"
)
parser.add_argument(
"--classes",
type=str,
nargs="+",
default=[],
help="Which classes to write labels for, works when writer_mode is kitti. Defaults to all classes",
)
args = parser.parse_args()
dataset = RandomScenario(
args.scenario, args.writer_mode, args.data_dir, args.max_queue_size, args.train_size, args.classes
)
if dataset.result:
# Iterate through dataset and visualize the output
print("Loading materials. Will generate data soon...")
for image in dataset:
print("ID: ", dataset.cur_idx)
if dataset.cur_idx == args.num_frames:
break
if dataset.exiting:
break
# cleanup
dataset.kit.shutdown()
| 11,119 | Python | 37.881119 | 118 | 0.595377 |
KazWong/omniverse_sample/ov_sample/python_samples/syntheticdata/advanced/shapenet_usd_convertor.py | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
import pprint
from omni.isaac.python_app import OmniKitHelper
"""Convert ShapeNetCore V2 to USD without materials.
By only converting the ShapeNet geometry, we can more quickly load assets into scenes for the purpose of creating
large datasets or for online training of Deep Learning models.
"""
if __name__ == "__main__":
RENDER_CONFIG = {"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit'}
kit = OmniKitHelper(config=RENDER_CONFIG)
import argparse
from omni.isaac.synthetic_utils import shapenet
parser = argparse.ArgumentParser("Convert ShapeNet assets to USD")
parser.add_argument(
"--categories",
type=str,
nargs="+",
default=None,
help="List of ShapeNet categories to convert (space seperated).",
)
parser.add_argument(
"--max-models", type=int, default=None, help="If specified, convert up to `max-models` per category."
)
parser.add_argument(
"--load-materials", action="store_true", help="If specified, materials will be loaded from shapenet meshes"
)
args = parser.parse_args()
# Ensure Omniverse Kit is launched via OmniKitHelper before shapenet_convert() is called
shapenet.shapenet_convert(args.categories, args.max_models, args.load_materials)
# cleanup
kit.shutdown()
| 1,763 | Python | 37.347825 | 115 | 0.723199 |
KazWong/omniverse_sample/ov_sample/python_samples/syntheticdata/advanced/visualize_groundtruth_physics.py | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Demonstration of using OmniKit to generate a scene, collect groundtruth and visualize
the results. This advanced sample also simulates physics and uses a custom glass material
"""
import copy
import os
import random
import numpy as np
from omni.isaac.python_app import OmniKitHelper
import matplotlib.pyplot as plt
TRANSLATION_RANGE = 300.0
SCALE = 50.0
# specify a custom config
CUSTOM_CONFIG = {
"width": 1024,
"height": 1024,
"renderer": "PathTracing",
"samples_per_pixel_per_frame": 128,
"max_bounces": 10,
"max_specular_transmission_bounces": 6,
"max_volume_bounces": 4,
"subdiv_refinement_level": 2,
"headless": True,
"sync_loads": True,
"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit',
}
def main():
kit = OmniKitHelper(CUSTOM_CONFIG)
from pxr import Gf, Sdf, UsdShade, UsdGeom, Semantics
from omni.isaac.synthetic_utils import SyntheticDataHelper
sd_helper = SyntheticDataHelper()
from omni.syntheticdata import visualize, helpers
from omni.physx.scripts import utils
from pxr import UsdPhysics, PhysxSchema, PhysicsSchemaTools
import omni
# SCENE SETUP
# Get the current stage
stage = kit.get_stage()
# Add a sphere light
kit.create_prim(
"/World/Light1",
"SphereLight",
translation=(0, 200, 0),
attributes={"radius": 100, "intensity": 100000.0, "color": (1, 1, 1)},
)
# Add physics scene
scene = UsdPhysics.Scene.Define(stage, Sdf.Path("/World/physicsScene"))
# Set gravity vector
scene.CreateGravityDirectionAttr().Set(Gf.Vec3f(0.0, 0.0, -1.0))
scene.CreateGravityMagnitudeAttr().Set(981.0)
# Set physics scene to use cpu physics
PhysxSchema.PhysxSceneAPI.Apply(stage.GetPrimAtPath("/World/physicsScene"))
physxSceneAPI = PhysxSchema.PhysxSceneAPI.Get(stage, "/World/physicsScene")
physxSceneAPI.CreateEnableCCDAttr(True)
physxSceneAPI.CreateEnableStabilizationAttr(True)
physxSceneAPI.CreateEnableGPUDynamicsAttr(False)
physxSceneAPI.CreateBroadphaseTypeAttr("MBP")
physxSceneAPI.CreateSolverTypeAttr("TGS")
# Create a ground plane
PhysicsSchemaTools.addGroundPlane(stage, "/World/groundPlane", "Z", 1000, Gf.Vec3f(0, 0, -100), Gf.Vec3f(1.0))
# Create 10 randomly positioned and coloured spheres and cube
# We will assign each a semantic label based on their shape (sphere/cube/cone)
prims = []
for i in range(10):
prim_type = random.choice(["Cube", "Sphere", "Cylinder"])
prim = stage.DefinePrim(f"/World/cube{i}", prim_type)
translation = np.random.rand(3) * TRANSLATION_RANGE
UsdGeom.XformCommonAPI(prim).SetTranslate(translation.tolist())
UsdGeom.XformCommonAPI(prim).SetScale((SCALE, SCALE, SCALE))
# prim.GetAttribute("primvars:displayColor").Set([np.random.rand(3).tolist()])
# Add semantic label based on prim type
sem = Semantics.SemanticsAPI.Apply(prim, "Semantics")
sem.CreateSemanticTypeAttr()
sem.CreateSemanticDataAttr()
sem.GetSemanticTypeAttr().Set("class")
sem.GetSemanticDataAttr().Set(prim_type)
# Add physics to prims
utils.setRigidBody(prim, "convexHull", False)
# Set Mass to 1 kg
mass_api = UsdPhysics.MassAPI.Apply(prim)
mass_api.CreateMassAttr(1)
# add prim reference to list
prims.append(prim)
# Apply glass material
for prim in prims:
# Create Glass material
mtl_created_list = []
kit.execute(
"CreateAndBindMdlMaterialFromLibrary",
mdl_name="OmniGlass.mdl",
mtl_name="OmniGlass",
mtl_created_list=mtl_created_list,
)
mtl_prim = stage.GetPrimAtPath(mtl_created_list[0])
# Set material inputs, these can be determined by looking at the .mdl file
# or by selecting the Shader attached to the Material in the stage window and looking at the details panel
color = Gf.Vec3f(random.random(), random.random(), random.random())
omni.usd.create_material_input(mtl_prim, "glass_color", color, Sdf.ValueTypeNames.Color3f)
omni.usd.create_material_input(mtl_prim, "glass_ior", 1.25, Sdf.ValueTypeNames.Float)
# This value is the volumetric light absorption scale, reduce to zero to make glass clearer
omni.usd.create_material_input(mtl_prim, "depth", 0.001, Sdf.ValueTypeNames.Float)
# Enable for thin glass objects if needed
omni.usd.create_material_input(mtl_prim, "thin_walled", False, Sdf.ValueTypeNames.Bool)
# Bind the material to the prim
prim_mat_shade = UsdShade.Material(mtl_prim)
UsdShade.MaterialBindingAPI(prim).Bind(prim_mat_shade, UsdShade.Tokens.strongerThanDescendants)
# force RayTracedLighting mode for better performance while simulating physics
kit.set_setting("/rtx/rendermode", "RayTracedLighting")
# start simulation
kit.play()
# Step simulation so that objects fall to rest
# wait until all materials are loaded
frame = 0
print("simulating physics...")
while frame < 60 or kit.is_loading():
kit.update(1 / 60.0)
frame = frame + 1
print("done")
# Return to user specified render mode
kit.set_setting("/rtx/rendermode", CUSTOM_CONFIG["renderer"])
print("capturing...")
# Get groundtruth using glass material
kit.update()
viewport = omni.kit.viewport.get_default_viewport_window()
gt = sd_helper.get_groundtruth(
[
"rgb",
"depth",
"boundingBox2DTight",
"boundingBox2DLoose",
"instanceSegmentation",
"semanticSegmentation",
"boundingBox3D",
],
viewport,
)
print("done")
# everything is captured, stop simulating
kit.stop()
print("visualize results")
# GROUNDTRUTH VISUALIZATION
# Setup a figure
_, axes = plt.subplots(2, 4, figsize=(20, 7))
axes = axes.flat
for ax in axes:
ax.axis("off")
# RGB
axes[0].set_title("RGB")
for ax in axes[:-1]:
ax.imshow(gt["rgb"])
# DEPTH
axes[1].set_title("Depth")
depth_data = np.clip(gt["depth"], 0, 255)
axes[1].imshow(visualize.colorize_depth(depth_data.squeeze()))
# BBOX2D TIGHT
axes[2].set_title("BBox 2D Tight")
rgb_data = copy.deepcopy(gt["rgb"])
axes[2].imshow(visualize.colorize_bboxes(gt["boundingBox2DTight"], rgb_data))
# BBOX2D LOOSE
axes[3].set_title("BBox 2D Loose")
rgb_data = copy.deepcopy(gt["rgb"])
axes[3].imshow(visualize.colorize_bboxes(gt["boundingBox2DLoose"], rgb_data))
# INSTANCE SEGMENTATION
axes[4].set_title("Instance Segmentation")
instance_seg = gt["instanceSegmentation"][0]
instance_rgb = visualize.colorize_segmentation(instance_seg)
axes[4].imshow(instance_rgb, alpha=0.7)
# SEMANTIC SEGMENTATION
axes[5].set_title("Semantic Segmentation")
semantic_seg = gt["semanticSegmentation"]
semantic_rgb = visualize.colorize_segmentation(semantic_seg)
axes[5].imshow(semantic_rgb, alpha=0.7)
# BBOX 3D
axes[6].set_title("BBox 3D")
bbox_3d_data = gt["boundingBox3D"]
bboxes_3d_corners = bbox_3d_data["corners"]
projected_corners = helpers.world_to_image(bboxes_3d_corners.reshape(-1, 3), viewport)
projected_corners = projected_corners.reshape(-1, 8, 3)
rgb_data = copy.deepcopy(gt["rgb"])
bboxes3D_rgb = visualize.colorize_bboxes_3d(projected_corners, rgb_data)
axes[6].imshow(bboxes3D_rgb)
# Save figure
print("saving figure to: ", os.getcwd() + "/visualize_groundtruth_physics.png")
plt.savefig("visualize_groundtruth_physics.png")
# cleanup
kit.shutdown()
if __name__ == "__main__":
main()
| 8,243 | Python | 35.31718 | 114 | 0.672449 |
KazWong/omniverse_sample/ov_sample/python_samples/isaac_sdk/load_stage.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
from omni.isaac.python_app import OmniKitHelper
import carb
import omni
# This sample loads a usd stage and creates a robot engine bridge application and starts simulation
# Disposes average fps of the simulation for given time
# Useful for testing an Isaac SDK sample scene using python
CONFIG = {
"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit',
"width": 1280,
"height": 720,
"sync_loads": True,
"headless": False,
"renderer": "RayTracedLighting",
}
class UsdLoadSample:
def __init__(self, args):
CONFIG["headless"] = args.headless
self.kit = OmniKitHelper(config=CONFIG)
self.usd_path = ""
self._viewport = omni.kit.viewport.get_viewport_interface()
def start(self):
self.kit.play()
def stop(self):
self.kit.stop()
omni.kit.commands.execute("RobotEngineBridgeDestroyApplication")
self.kit.shutdown()
def load_stage(self, args):
from omni.isaac.utils.scripts.nucleus_utils import find_nucleus_server
result, nucleus_server = find_nucleus_server()
if result is False:
carb.log_error("Could not find nucleus server with /Isaac folder")
return False
self._asset_path = nucleus_server + "/Isaac"
self.usd_path = self._asset_path + args.usd_path
omni.usd.get_context().open_stage(self.usd_path, None)
# Wait two frames so that stage starts loading
self.kit.app.update()
self.kit.app.update()
return True
def configure_bridge(self, json_file: str = "isaacsim.app.json"):
"""
Configure the SDK bridge application that publishes data over tcp
"""
ext_manager = omni.kit.app.get_app().get_extension_manager()
ext_id = ext_manager.get_enabled_extension_id("omni.isaac.robot_engine_bridge")
reb_extension_path = ext_manager.get_extension_path(ext_id)
app_file = f"{reb_extension_path}/resources/isaac_engine/json/{json_file}"
carb.log_info(f"create application with: {reb_extension_path} {app_file}")
return omni.kit.commands.execute(
"RobotEngineBridgeCreateApplication", asset_path=reb_extension_path, app_file=app_file
)
def disable_existing_reb_cameras(self):
"""
Disable existing REB_Camera prims for perf testing
"""
import omni.isaac.RobotEngineBridgeSchema as REBSchema
stage = self.kit.get_stage()
for prim in stage.Traverse():
if prim.IsA(REBSchema.RobotEngineCamera):
reb_camera_prim = REBSchema.RobotEngineCamera(prim)
reb_camera_prim.GetEnabledAttr().Set(False)
def create_reb_camera(self, cameraIndex, name, width, height):
"""Create a new REB camera in the stage"""
from pxr import Gf
result, reb_camera_prim = omni.kit.commands.execute(
"RobotEngineBridgeCreateCamera",
path="/World/REB_Camera",
parent=None,
rgb_output_component="output",
rgb_output_channel="encoder_color_{}".format(cameraIndex),
depth_output_component="output",
depth_output_channel="encoder_depth_{}".format(cameraIndex),
segmentation_output_component="output",
segmentation_output_channel="encoder_segmentation_{}".format(cameraIndex),
bbox2d_output_component="output",
bbox2d_output_channel="encoder_bbox_{}".format(cameraIndex),
bbox2d_class_list="",
bbox3d_output_component="output",
bbox3d_output_channel="encoder_bbox3d_{}".format(cameraIndex),
bbox3d_class_list="",
rgb_enabled=True,
depth_enabled=False,
segmentaion_enabled=True,
bbox2d_enabled=False,
bbox3d_enabled=False,
camera_prim_rel=["{}".format(name)],
resolution=Gf.Vec2i(int(width), int(height)),
)
if __name__ == "__main__":
import argparse
import time
parser = argparse.ArgumentParser("Usd Load sample")
parser.add_argument("--usd_path", type=str, help="Path to usd file", required=True)
parser.add_argument("--headless", default=False, action="store_true", help="Run stage headless")
parser.add_argument("--test", default=False, action="store_true", help="Run in test mode")
parser.add_argument("--benchmark", default=False, action="store_true", help="Run in benchmark mode")
parser.add_argument(
"--benchmark_timeout", type=int, default=60, help="Total walltime in seconds to calculate average FPS for"
)
parser.add_argument(
"--add_rebcamera",
nargs="*",
type=str,
default=[],
help="Total number of REB Camera prims to add, existing ones will be disabled if this option is specified",
)
args, unknown = parser.parse_known_args()
sample = UsdLoadSample(args)
if sample.load_stage(args):
print("Loading stage...")
while sample.kit.is_loading():
sample.kit.update(1.0 / 60.0)
print("Loading Complete")
# Add parameterized rebcamera along with viewport
if args.add_rebcamera is not None and len(args.add_rebcamera) > 0:
# disable existing cameras if we are making new ones
sample.disable_existing_reb_cameras()
reb_count = 0
for name in args.add_rebcamera:
info = name.split(",")
sample.create_reb_camera(reb_count, info[0], info[1], info[2])
reb_count = reb_count + 1
sample.configure_bridge()
sample.start()
if args.test is True:
for i in range(10):
sample.kit.update()
sample.stop()
elif args.benchmark is True:
# Warm up simulation
while sample._viewport.get_viewport_window().get_fps() < 1:
sample.kit.update(1.0 / 60.0)
fps_count = 0
start_time = time.perf_counter()
end_time = start_time + args.benchmark_timeout
count = 0
# Calculate average fps
while sample.kit.app.is_running() and end_time > time.perf_counter():
sample.kit.update(1.0 / 60.0)
fps = sample._viewport.get_viewport_window().get_fps()
fps_count = fps_count + fps
count = count + 1
sample.stop()
print(f"\n----------- Avg. FPS over {args.benchmark_timeout} sec : {fps_count/count}-----------")
else:
while sample.kit.app.is_running():
# Run in realtime mode, we don't specify the step size
sample.kit.update()
sample.stop()
| 7,211 | Python | 39.977273 | 115 | 0.617667 |
KazWong/omniverse_sample/ov_sample/python_samples/isaac_sdk/pose_estimation.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import random
import os
import omni
from omni.isaac.python_app import OmniKitHelper
import carb.tokens
import argparse
CONFIG = {
"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit',
"width": 1280,
"height": 720,
"sync_loads": True,
"headless": True,
"renderer": "RayTracedLighting",
}
# D435
FOCAL_LEN = 1.93
HORIZONTAL_APERTURE = 2.682
VERTICAL_APERTURE = 1.509
FOCUS_DIST = 400
RANDOMIZE_SCENE_EVERY_N_STEPS = 10
class DualCameraSample:
def __init__(self):
self.kit = OmniKitHelper(config=CONFIG)
import omni.physx
from pxr import UsdGeom, Usd, Gf
from omni.isaac.synthetic_utils import DomainRandomization
from omni.isaac.synthetic_utils import SyntheticDataHelper
from omni.isaac.robot_engine_bridge import _robot_engine_bridge
self._re_bridge = _robot_engine_bridge.acquire_robot_engine_bridge_interface()
self._viewport = omni.kit.viewport.get_viewport_interface()
self.dr_helper = DomainRandomization()
self.sd_helper = SyntheticDataHelper()
self.frame = 0
self.Gf = Gf
self.UsdGeom = UsdGeom
self.Usd = Usd
def shutdown(self):
self.kit.shutdown()
def start(self):
self.kit.play()
def stop(self):
self.kit.stop()
omni.kit.commands.execute("RobotEngineBridgeDestroyApplication")
def create_stage(self):
# open base stage and set up axis to Z
stage = self.kit.get_stage()
rootLayer = stage.GetRootLayer()
rootLayer.SetPermissionToEdit(True)
with self.Usd.EditContext(stage, rootLayer):
self.UsdGeom.SetStageUpAxis(stage, self.UsdGeom.Tokens.z)
# make two prims, one for env and one for just the room
# this allows us to add other prims to environment for randomization and still hide them all at once
self._environment = stage.DefinePrim("/environment", "Xform")
self._room = stage.DefinePrim("/environment/room", "Xform")
from omni.isaac.utils.scripts.nucleus_utils import find_nucleus_server
result, nucleus_server = find_nucleus_server()
if result is False:
carb.log_error("Could not find nucleus server with /Isaac folder")
return False
self._asset_path = nucleus_server + "/Isaac"
stage_path = self._asset_path + "/Environments/Simple_Room/simple_room.usd"
self._room.GetReferences().AddReference(stage_path)
self._target_prim = self.kit.create_prim(
"/objects/cube", "Cube", translation=(0, 0, 100), scale=(10, 10, 50), semantic_label="target"
)
# make sure that we wait for the stage to load
self.kit.app.update()
self.kit.app.update()
return True
def create_camera(self):
self._camera = self.kit.create_prim(
"/World/Camera",
"Camera",
translation=(0.0, 0.0, 0.0),
attributes={
"focusDistance": FOCUS_DIST,
"focalLength": FOCAL_LEN,
"horizontalAperture": HORIZONTAL_APERTURE,
"verticalAperture": VERTICAL_APERTURE,
},
)
# activate new camera
self._viewport.get_viewport_window().set_active_camera(str(self._camera.GetPath()))
# the camera reference frame between sdk and sim seems to be flipped 180 on x
# this prim acts as a proxy to do that coordinate transformation
self._camera_proxy = self.kit.create_prim("/World/Camera/proxy", "Xform", rotation=(180, 0, 0))
def create_bridge_components(self):
result, self.occluded_provider = omni.kit.commands.execute(
"RobotEngineBridgeCreateCamera",
path="/World/REB_Occluded_Provider",
parent=None,
rgb_output_component="output",
rgb_output_channel="encoder_color",
depth_output_component="output",
depth_output_channel="encoder_depth",
segmentation_output_component="output",
segmentation_output_channel="encoder_segmentation",
bbox2d_output_component="output",
bbox2d_output_channel="encoder_bbox",
bbox2d_class_list="",
bbox3d_output_component="output",
bbox3d_output_channel="encoder_bbox3d",
bbox3d_class_list="",
rgb_enabled=True,
depth_enabled=False,
segmentaion_enabled=True,
bbox2d_enabled=False,
bbox3d_enabled=False,
camera_prim_rel=[self._camera.GetPath()],
resolution=self.Gf.Vec2i(1280, 720),
)
result, self.unoccluded_provider = omni.kit.commands.execute(
"RobotEngineBridgeCreateCamera",
path="/World/REB_Unoccluded_Provider",
parent=None,
rgb_output_component="output",
rgb_output_channel="decoder_color",
depth_output_component="output",
depth_output_channel="decoder_depth",
segmentation_output_component="output",
segmentation_output_channel="decoder_segmentation",
bbox2d_output_component="output",
bbox2d_output_channel="decoder_bbox",
bbox2d_class_list="",
bbox3d_output_component="output",
bbox3d_output_channel="decoder_bbox3d",
bbox3d_class_list="",
rgb_enabled=True,
depth_enabled=False,
segmentaion_enabled=True,
bbox2d_enabled=False,
bbox3d_enabled=False,
camera_prim_rel=[self._camera.GetPath()],
resolution=self.Gf.Vec2i(1280, 720),
)
# turn both cameras off so that we don't send an image when time is stepped
self.occluded_provider.GetEnabledAttr().Set(False)
self.unoccluded_provider.GetEnabledAttr().Set(False)
# create rigid body sink to publish ground truth pose information
result, self.rbs_provider = omni.kit.commands.execute(
"RobotEngineBridgeCreateRigidBodySink",
path="/World/REB_RigidBodiesSink",
parent=None,
enabled=False,
output_component="output",
output_channel="bodies",
rigid_body_prims_rel=[self._camera_proxy.GetPath(), self._target_prim.GetPath()],
)
# disable rigid body sink until the final image is sent out so its only published once
self.rbs_provider.GetEnabledAttr().Set(False)
def configure_bridge(self, json_file: str = "isaacsim.app.json"):
ext_manager = omni.kit.app.get_app().get_extension_manager()
ext_id = ext_manager.get_enabled_extension_id("omni.isaac.robot_engine_bridge")
reb_extension_path = ext_manager.get_extension_path(ext_id)
app_file = f"{reb_extension_path}/resources/isaac_engine/json/{json_file}"
carb.log_info(f"create application with: {reb_extension_path} {app_file}")
return omni.kit.commands.execute(
"RobotEngineBridgeCreateApplication", asset_path=reb_extension_path, app_file=app_file
)
def configure_randomization(self):
texture_list = [
self._asset_path + "/Samples/DR/Materials/Textures/checkered.png",
self._asset_path + "/Samples/DR/Materials/Textures/marble_tile.png",
self._asset_path + "/Samples/DR/Materials/Textures/picture_a.png",
self._asset_path + "/Samples/DR/Materials/Textures/picture_b.png",
self._asset_path + "/Samples/DR/Materials/Textures/textured_wall.png",
self._asset_path + "/Samples/DR/Materials/Textures/checkered_color.png",
]
base_path = str(self._room.GetPath())
self.texture_comp = self.dr_helper.create_texture_comp([base_path], False, texture_list)
# self.color_comp = self.dr_helper.create_color_comp([base_path+"/floor"])
# disable automatic DR, we run it ourselves in the step function
# add a movement and rotation component
# the movement component is offset by 100cm in z so that the object remains above the table
self.movement_comp = self.dr_helper.create_movement_comp(
[str(self._target_prim.GetPath())], min_range=(-10, -10, -10 + 100), max_range=(10, 10, 10 + 100)
)
self.rotation_comp = self.dr_helper.create_rotation_comp([str(self._target_prim.GetPath())])
self.dr_helper.toggle_manual_mode()
def randomize_camera(self):
# randomize camera position
self._viewport.get_viewport_window().set_camera_position(
str(self._camera.GetPath()),
random.randrange(-250, 250),
random.randrange(-250, 250),
random.randrange(10, 250),
True,
)
# get target pose and point camera at it
pose = omni.usd.get_world_transform_matrix(self._target_prim)
# can specify an offset on target position
target = pose.ExtractTranslation() + self.Gf.Vec3d(0, 0, 0)
self._viewport.get_viewport_window().set_camera_target(
str(self._camera.GetPath()), target[0], target[1], target[2], True
)
def randomize_scene(self):
self.dr_helper.randomize_once()
def toggle_environment(self, state):
imageable = self.UsdGeom.Imageable(self._environment)
if state:
imageable.MakeVisible()
else:
imageable.MakeInvisible()
def step(self):
# randomize camera every frame
self.randomize_camera()
# randomize textures every 10 frames
if self.frame % RANDOMIZE_SCENE_EVERY_N_STEPS == 0:
self.randomize_scene()
self.toggle_environment(True)
self.kit.update(1.0 / 60.0)
# render occluded view
omni.kit.commands.execute("RobotEngineBridgeTickComponent", path=str(self.occluded_provider.GetPath()))
# hide everything but the object
self.toggle_environment(False)
self.kit.update(0)
# render unoccluded view
omni.kit.commands.execute("RobotEngineBridgeTickComponent", path=str(self.unoccluded_provider.GetPath()))
omni.kit.commands.execute("RobotEngineBridgeTickComponent", path=str(self.rbs_provider.GetPath()))
# output fps every 100 frames
if self.frame % 100 == 0:
print("FPS: ", self._viewport.get_viewport_window().get_fps())
self.frame = self.frame + 1
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Generate Occluded and Unoccluded data")
parser.add_argument("--test", action="store_true")
args, unknown = parser.parse_known_args()
sample = DualCameraSample()
# On start if state creation was successful
if sample.create_stage():
sample.create_camera()
sample.configure_randomization()
# wait for stage to load
while sample.kit.is_loading():
sample.kit.update(0)
sample.create_bridge_components()
sample.configure_bridge()
sample.start()
while sample.kit.app.is_running():
sample.step()
if args.test:
break
sample.stop()
sample.shutdown()
| 11,691 | Python | 39.041096 | 113 | 0.62886 |
KazWong/omniverse_sample/ov_sample/python_samples/core/app_framework.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
import sys
import carb
import omni.kit.app
import asyncio
# Simple example showing the minimal setup to run omniverse app from python
class PythonApp:
def __init__(self):
# Load app plugin
self.framework = carb.get_framework()
self.framework.load_plugins(
loaded_file_wildcards=["omni.kit.app.plugin"],
search_paths=[os.path.abspath(f'{os.environ["CARB_APP_PATH"]}/plugins')],
)
self.app = omni.kit.app.get_app()
# Path to where kit was built to
app_root = os.environ["CARB_APP_PATH"]
# Inject experience config:
sys.argv.insert(1, f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit')
# Add paths to extensions
sys.argv.append(f"--ext-folder")
sys.argv.append(f'{os.path.abspath(os.environ["ISAAC_PATH"])}/exts')
# Run headless
sys.argv.append("--no-window")
# Set some settings
sys.argv.append("--/app/asyncRendering=False")
# Start the app
self.app.startup("Isaac-Sim", app_root, sys.argv)
def shutdown(self):
# Shutdown
self.app.shutdown()
self.framework.unload_all_plugins()
print("Shutdown complete")
if __name__ == "__main__":
kit = PythonApp()
# Do something, in this case we wait for stage to open and then exit
stage_task = asyncio.ensure_future(omni.usd.get_context().new_stage_async())
while not stage_task.done():
kit.app.update()
kit.shutdown()
| 1,952 | Python | 30 | 85 | 0.655225 |
KazWong/omniverse_sample/ov_sample/python_samples/core/helper.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
from omni.isaac.python_app import OmniKitHelper
CONFIG = {
"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit',
"renderer": "RayTracedLighting",
"headless": True,
}
if __name__ == "__main__":
# Simple example showing how to start and stop the helper
kit = OmniKitHelper(config=CONFIG)
### Perform any omniverse imports here after the helper loads ###
kit.play() # Start simulation
kit.update(1.0 / 60.0) # Render a single frame
kit.stop() # Stop Simulation
kit.shutdown() # Cleanup application
| 1,000 | Python | 34.749999 | 76 | 0.727 |
KazWong/omniverse_sample/ov_sample/python_samples/dofbot/online_training/dofbot_cube_detection.py | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Dofbot Cube Detection Demonstration
Use a PyTorch dataloader together with OmniKit to generate scenes and groundtruth to
train a [MobileNetV3](https://arxiv.org/abs/1905.02244) model.
"""
import torch
from torch.utils.data import DataLoader
import torchvision
import matplotlib.pyplot as plt
import numpy as np
import signal
from dofbot_dataset import RandomObjects
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
def main(args):
device = "cuda"
# Setup data
train_set = RandomObjects()
train_loader = DataLoader(train_set, batch_size=2, collate_fn=lambda x: tuple(zip(*x)))
def handle_exit(self, *args, **kwargs):
print("exiting cube detection dataset generation...")
train_set.exiting = True
signal.signal(signal.SIGINT, handle_exit)
from omni.isaac.synthetic_utils import visualization as vis
# Setup Model
if args.eval_model == "":
model = torchvision.models.detection.fasterrcnn_mobilenet_v3_large_fpn(
pretrained=False, num_classes=1 + len(args.categories)
)
model = model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
else:
model = torch.load(args.eval_model)
if args.visualize:
plt.ion()
fig, axes = plt.subplots(1, 2, figsize=(14, 7))
for i, train_batch in enumerate(train_loader):
if i > args.max_iters or train_set.exiting:
print("Exiting ...")
train_set.kit.shutdown()
break
if args.eval_model == "":
model.train()
images, targets = train_batch
images = [i.to(device) for i in images]
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
if args.eval_model == "":
loss_dict = model(images, targets)
loss = sum(loss for loss in loss_dict.values())
print(f"ITER {i} | {loss:.6f}")
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % 10 == 0:
model.eval()
with torch.no_grad():
predictions = model(images[:1])
if args.visualize:
idx = 0
score_thresh = 0.5
pred = predictions[idx]
np_image = images[idx].permute(1, 2, 0).cpu().numpy()
for ax in axes:
if args.eval_model == "":
fig.suptitle(f"Iteration {i:05} \n {loss:.6f}", fontsize=14)
else:
fig.suptitle(f"Iteration {i:05} \n Evaluating", fontsize=14)
ax.cla()
ax.axis("off")
ax.imshow(np_image)
axes[0].set_title("Input")
axes[1].set_title("Input + Predictions")
score_filter = [i for i in range(len(pred["scores"])) if pred["scores"][i] > score_thresh]
num_instances = len(score_filter)
colours = vis.random_colours(num_instances, enable_random=False)
mapping = {i + 1: cat for i, cat in enumerate(args.categories)}
labels = [mapping[label.item()] for label in pred["labels"]]
vis.plot_boxes(ax, pred["boxes"].tolist(), labels=labels, colours=colours, label_size=10)
if not labels:
axes[1].set_title("None")
plt.draw()
plt.savefig("train.png")
# save every 100 steps
if i % 100 == 0 and args.eval_model == "":
torch.save(model, "cube_model_" + str(i) + ".pth")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser("Dataset test")
parser.add_argument("-lr", "--learning_rate", type=float, default=1e-4, help="Learning rate")
parser.add_argument("--max-iters", type=float, default=1000, help="Number of training iterations.")
parser.add_argument("--visualize", action="store_true", help="Visualize predicted bounding boxes during training.")
parser.add_argument("--eval_model", help="model file to evaluate", default="", type=str)
args = parser.parse_args()
# Temporary
args.visualize = True
args.categories = ["None", "Cube", "Sphere", "Cone"]
main(args)
| 4,774 | Python | 34.110294 | 119 | 0.591537 |
KazWong/omniverse_sample/ov_sample/python_samples/dofbot/online_training/dofbot_dataset.py | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Cube Dataset with online randomized scene generation for Bounding Box Detection training.
Use OmniKit to generate a simple scene. At each iteration, the scene is populated by
creating a cube that rests on a plane. The cube pose, colours and textures are randomized.
The camera position is also randomized within a range expected for the Dofbot's POV
before capturing groundtruth consisting of an RGB rendered image, and Tight 2D Bounding Boxes
"""
from math import floor
import os
import torch
import random
import numpy as np
import signal
import omni
import carb
from omni.isaac.python_app import OmniKitHelper
# Setup default generation variables
# Value are (min, max) ranges
OBJ_TRANSLATION_X = (-60.0, 60.0)
OBJ_TRANSLATION_Z = (-60.0, 60.0)
OBJ_ROTATION_Y = (0.0, 360.0)
LIGHT_INTENSITY = (500.0, 50000.0)
# Camera POV generation variables
AZIMUTH_ROTATION = (-30.0, 30.0)
ELEVATION_ROTATION = (-70.0, -20.0)
CAM_TRANSLATION_XYZ = (-50.0, 50.0)
OBJECT_SCALE = (15, 20)
CAMERA_DISTANCE = 800
BBOX_AREA_THRESH = 16
BLANK_SCENES = (5, 8) # between 5-8%
# Default rendering parameters
RENDER_CONFIG = {
"renderer": "PathTracing",
"samples_per_pixel_per_frame": 12,
"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit',
"headless": False,
"width": 640,
"height": 480,
}
class RandomObjects(torch.utils.data.IterableDataset):
"""Dataset of cube + distractor objects - domain randomize position/colour/texture/lighting/camera angle
The RGB image and BoundingBox are captured by moving a camera aimed at the centre of the scene
which is positioned at random but at a fixed distance from the centre.
"""
def __init__(
self, categories=["None", "Cube", "Sphere", "Cone"], num_assets_min=1, num_assets_max=3, split=0.7, train=True
):
assert len(categories) > 1
assert (split > 0) and (split <= 1.0)
self.kit = OmniKitHelper(config=RENDER_CONFIG)
self.stage = self.kit.get_stage()
from omni.isaac.synthetic_utils import SyntheticDataHelper
from omni.isaac.synthetic_utils import DomainRandomization
self.sd_helper = SyntheticDataHelper()
self.dr_helper = DomainRandomization()
self.dr_helper.toggle_manual_mode()
from omni.isaac.utils.scripts.nucleus_utils import find_nucleus_server
result, nucleus_server = find_nucleus_server()
if result is False:
carb.log_error(
"Could not find nucleus server with /Isaac folder. Please specify the correct nucleus server in apps/omni.isaac.sim.python.kit"
)
return
result, nucleus_server = find_nucleus_server("/Library/Props/Road_Tiles/Parts/")
if result is False:
carb.log_error(
"Could not find nucleus server with /Library/Props/Road_Tiles/Parts/ folder. Please refer to the documentation to aquire the road tile assets"
)
return
self.categories = categories
self.range_num_assets = (num_assets_min, num_assets_max)
self.asset_path = nucleus_server + "/Isaac"
self._setup_world()
self.cur_idx = 0
self.empty_idx = floor(100 / random.uniform(*BLANK_SCENES))
self.exiting = False
signal.signal(signal.SIGINT, self._handle_exit)
# turn this on to fix the PathTracing + Play (needed for overlap test) producing line artifacts
carb.settings.get_settings().set_bool("/rtx/resetPtAccumOnAnimTimeChange", True)
def _handle_exit(self, *args, **kwargs):
print("exiting dataset generation...")
self.exiting = True
def _setup_world(self):
from pxr import Sdf, UsdGeom, Gf, UsdPhysics, PhysxSchema
# Create physics scene for collision testing
scene = UsdPhysics.Scene.Define(self.stage, Sdf.Path("/World/physicsScene"))
scene.CreateGravityDirectionAttr().Set(Gf.Vec3f(0.0, 0.0, -1.0))
scene.CreateGravityMagnitudeAttr().Set(981.0)
# Set physics scene to use cpu physics
PhysxSchema.PhysxSceneAPI.Apply(self.stage.GetPrimAtPath("/World/physicsScene"))
physxSceneAPI = PhysxSchema.PhysxSceneAPI.Get(self.stage, "/World/physicsScene")
physxSceneAPI.CreateEnableCCDAttr(True)
physxSceneAPI.CreateEnableStabilizationAttr(True)
physxSceneAPI.CreateEnableGPUDynamicsAttr(False)
physxSceneAPI.CreateBroadphaseTypeAttr("MBP")
physxSceneAPI.CreateSolverTypeAttr("TGS")
"""Setup lights, walls, floor, ceiling and camera"""
# Setup Room
self.kit.create_prim(
"/World/Room", "Sphere", attributes={"radius": 1e3, "primvars:displayColor": [(1.0, 1.0, 1.0)]}
)
# Setup ground plane
ground_scale = max(OBJECT_SCALE)
ground_prim = self.stage.DefinePrim("/World/Ground", "Cylinder")
UsdGeom.XformCommonAPI(ground_prim).SetScale((ground_scale, ground_scale, ground_scale))
UsdGeom.XformCommonAPI(ground_prim).SetTranslate((0.0, ground_scale * -0.5, 0.0))
UsdGeom.XformCommonAPI(ground_prim).SetRotate((90.0, 0.0, 0.0))
attributes = {"height": 1, "radius": 1e4, "primvars:displayColor": [(1.0, 1.0, 1.0)]}
for k, v in attributes.items():
ground_prim.GetAttribute(k).Set(v)
# Setup lights
self.kit.create_prim(
"/World/Light1",
"SphereLight",
translation=(-450, 350, 350),
attributes={"radius": 100, "intensity": 30000.0, "color": (0.0, 0.365, 0.848)},
)
self.kit.create_prim(
"/World/Light2",
"SphereLight",
translation=(450, 350, 350),
attributes={"radius": 100, "intensity": 30000.0, "color": (1.0, 0.278, 0.0)},
)
self.kit.create_prim("/World/Asset", "Xform")
# Setup camera
self.camera_rig = UsdGeom.Xformable(self.kit.create_prim("/World/CameraRig", "Xform"))
self.camera = self.kit.create_prim("/World/CameraRig/Camera", "Camera", translation=(0.0, 0.0, CAMERA_DISTANCE))
# Change azimuth angle
self.camera_rig.AddRotateYOp().Set(0)
# Change elevation angle
self.camera_rig.AddRotateXOp().Set(-40)
vpi = omni.kit.viewport.get_viewport_interface()
vpi.get_viewport_window().set_active_camera(str(self.camera.GetPath()))
self.viewport = omni.kit.viewport.get_default_viewport_window()
self.create_dr_comp()
self.kit.update()
def load_single_asset(self, prim_type, scale, i):
from omni.physx.scripts import utils
from pxr import Semantics, UsdGeom
overlapping = True
attempts = 0
max_attempts = 5 # after 5 placement attempts, move on
stage = self.kit.get_stage()
# Randomly generate transforms until a valid position is found
# (i.e. new object will not overlap with existing ones)
# print("attempting to spawn object...", end=" ")
while overlapping and attempts < max_attempts:
x = random.uniform(*OBJ_TRANSLATION_X)
y = scale # assumes bounding box of standard prim is 1 cubic unit
z = random.uniform(*OBJ_TRANSLATION_Z)
rot_y = random.uniform(*OBJ_ROTATION_Y)
# Validate this proposed transform
rot = carb.Float4(0.0, 0.0, 1.0, 0.0)
origin = carb.Float3(float(x), float(y), float(z))
extent = carb.Float3(float(scale), float(scale), float(scale))
overlapping = self.check_overlap(extent, origin, rot)
attempts += 1
if overlapping:
return None
# print("object spawned!")
# No overlap, define the prim and apply the transform
prim = stage.DefinePrim(f"/World/Asset/obj{i}", prim_type)
bound = UsdGeom.Mesh(prim).ComputeWorldBound(0.0, "default")
box_min_y = bound.GetBox().GetMin()[1] * scale
UsdGeom.XformCommonAPI(prim).SetScale((scale, scale, scale))
UsdGeom.XformCommonAPI(prim).SetTranslate((x, -box_min_y, z))
UsdGeom.XformCommonAPI(prim).SetRotate((0, rot_y, 0))
# Add semantic label based on prim type
sem = Semantics.SemanticsAPI.Apply(prim, "Semantics")
sem.CreateSemanticTypeAttr()
sem.CreateSemanticDataAttr()
sem.GetSemanticTypeAttr().Set("class")
sem.GetSemanticDataAttr().Set(prim_type)
# Add physics to the prim
utils.setCollider(prim, approximationShape="convexHull")
return prim
# OVERLAP --------------------------------------------
def report_hit(self, hit):
""" Existing object turns red if the proposed position would result in a collision
Note: use for troubleshooting, material randomization must be disabled for this to work
"""
from pxr import UsdGeom, Gf, Vt
hitColor = Vt.Vec3fArray([Gf.Vec3f(180.0 / 255.0, 16.0 / 255.0, 0.0)])
usdGeom = UsdGeom.Mesh.Get(self.stage, hit.rigid_body)
usdGeom.GetDisplayColorAttr().Set(hitColor)
return True
def check_overlap(self, extent, origin, rot):
from omni.physx import get_physx_scene_query_interface
numHits = get_physx_scene_query_interface().overlap_box(extent, origin, rot, self.report_hit, False)
return numHits > 0
# POPULATE AND RANDOMIZE -------------------------------
def create_dr_comp(self):
"""Creates DR components with various attributes.
The asset prims to randomize is an empty list for most components
since we get a new list of assets every iteration.
The asset list will be updated for each component in update_dr_comp()
"""
texture_list = [
self.asset_path + "/Samples/DR/Materials/Textures/checkered.png",
self.asset_path + "/Samples/DR/Materials/Textures/marble_tile.png",
self.asset_path + "/Samples/DR/Materials/Textures/picture_a.png",
self.asset_path + "/Samples/DR/Materials/Textures/picture_b.png",
self.asset_path + "/Samples/DR/Materials/Textures/textured_wall.png",
self.asset_path + "/Samples/DR/Materials/Textures/checkered_color.png",
]
material_list = [
self.asset_path + "/Samples/DR/Materials/checkered.mdl",
self.asset_path + "/Samples/DR/Materials/checkered_color.mdl",
self.asset_path + "/Samples/DR/Materials/marble_tile.mdl",
self.asset_path + "/Samples/DR/Materials/picture_a.mdl",
self.asset_path + "/Samples/DR/Materials/picture_b.mdl",
self.asset_path + "/Samples/DR/Materials/textured_wall.mdl",
]
light_list = ["World/Light1", "World/Light2"]
self.texture_comp = self.dr_helper.create_texture_comp([], True, texture_list)
self.color_comp = self.dr_helper.create_color_comp([])
self.material_comp = self.dr_helper.create_material_comp([], material_list)
self.movement_comp = self.dr_helper.create_movement_comp([])
self.rotation_comp = self.dr_helper.create_rotation_comp([])
self.scale_comp = self.dr_helper.create_scale_comp([], max_range=(50, 50, 50))
self.light_comp = self.dr_helper.create_light_comp(light_list)
self.visibility_comp = self.dr_helper.create_visibility_comp([])
def update_dr_comp(self, dr_comp):
"""Updates DR component with the asset prim paths that will be randomized"""
comp_prim_paths_target = dr_comp.GetPrimPathsRel()
comp_prim_paths_target.ClearTargets(True)
# Add targets for all objects in scene (cube + distractors)
for asset in self.assets:
comp_prim_paths_target.AddTarget(asset.GetPrimPath())
# Can also add target for ground plane
# comp_prim_paths_target.AddTarget("/World/Ground")
def populate_scene(self):
from omni.physx.scripts import utils
"""Clear the scene and populate it with assets."""
self.stage.RemovePrim("/World/Asset")
self.assets = []
# Start simulation so we can check overlaps before spawning
self.kit.play()
# After every (n = self.empty_idx) scenes, generate a blank scene
if (self.cur_idx % self.empty_idx) != 0:
# Add random number of objects
num_objects = random.randint(*self.range_num_assets)
for i in range(num_objects):
prim_type = random.choice(self.categories)
prim_scale = random.uniform(*OBJECT_SCALE)
new_asset = self.load_single_asset(prim_type, prim_scale, i)
# Make sure valid object was returned before appending
if new_asset:
self.assets.append(new_asset)
self.kit.update()
else:
print("Blank scene -------------------------------------------------------------")
self.stage.RemovePrim("/World/Asset")
self.assets = []
# Pick a new value for (n = self.empty_idx)
self.empty_idx = floor(100 / random.uniform(*BLANK_SCENES))
def randomize_camera(self):
"""Randomize the camera position."""
# Clear previous transforms
self.camera_rig.ClearXformOpOrder()
# Change azimuth angle
self.camera_rig.AddRotateYOp().Set(random.uniform(*AZIMUTH_ROTATION))
# Change elevation angle
self.camera_rig.AddRotateXOp().Set(random.uniform(*ELEVATION_ROTATION))
# Move camera position (translate)
translation_xyz = tuple(random.uniform(*CAM_TRANSLATION_XYZ) for _ in range(3))
self.camera_rig.AddTranslateOp().Set(translation_xyz)
def randomize_lighting(self):
self.stage.RemovePrim("/World/Light1")
intens = random.uniform(*LIGHT_INTENSITY)
self.kit.create_prim(
"/World/Light1",
"SphereLight",
translation=(-450, 350, 350),
attributes={"radius": 100, "intensity": intens, "color": (0.0, 0.365, 0.848)},
)
self.kit.update()
# ITERATION----------------------------------------------
def __iter__(self):
return self
def __next__(self):
print("next!------------------------------")
# Generate a new scene
self.populate_scene()
self.randomize_camera()
self.update_dr_comp(self.texture_comp)
self.dr_helper.randomize_once()
self.randomize_lighting()
# Step once and then wait for materials to load
self.kit.update()
print("waiting for materials to load...")
while self.kit.is_loading():
self.kit.update()
print("done")
self.kit.update()
# Collect Groundtruth
gt = self.sd_helper.get_groundtruth(["rgb", "boundingBox2DTight"], self.viewport)
# RGB
# Drop alpha channel
image = gt["rgb"][..., :3]
# Cast to tensor if numpy array
if isinstance(gt["rgb"], np.ndarray):
image = torch.tensor(image, dtype=torch.float, device="cuda")
# Normalize between 0. and 1. and change order to channel-first.
image = image.float() / 255.0
image = image.permute(2, 0, 1)
# Bounding Box
gt_bbox = gt["boundingBox2DTight"]
# Create mapping from categories to index
self.categories = ["None", "Cube", "Sphere", "Cone"]
mapping = {cat: i + 1 for i, cat in enumerate(self.categories)}
bboxes = torch.tensor(gt_bbox[["x_min", "y_min", "x_max", "y_max"]].tolist())
labels = torch.LongTensor([mapping[bb["semanticLabel"]] for bb in gt_bbox])
# If no objects present in view
if bboxes.nelement() == 0:
print("No object present in view")
target = {
"boxes": torch.zeros((0, 4), dtype=torch.float32),
"labels": torch.tensor([1], dtype=torch.int64),
"image_id": torch.LongTensor([self.cur_idx]),
"area": torch.tensor(0, dtype=torch.float32),
"iscrowd": torch.zeros((0,), dtype=torch.int64),
}
else:
# Calculate bounding box area for each area
areas = (bboxes[:, 2] - bboxes[:, 0]) * (bboxes[:, 3] - bboxes[:, 1])
# Identify invalid bounding boxes to filter final output
valid_areas = (areas > 0.0) * (areas < (image.shape[1] * image.shape[2]))
target = {
"boxes": bboxes[valid_areas],
"labels": labels[valid_areas],
"image_id": torch.LongTensor([self.cur_idx]),
"area": areas[valid_areas],
"iscrowd": torch.BoolTensor([False] * len(bboxes[valid_areas])), # Assume no crowds
}
self.cur_idx += 1
return image, target
if __name__ == "__main__":
"Typical usage"
import argparse
import matplotlib.pyplot as plt
dataset = RandomObjects()
from omni.isaac.synthetic_utils import visualization as vis
# Iterate through dataset and visualize the output
plt.ion()
_, axes = plt.subplots(1, 2, figsize=(10, 5))
plt.tight_layout()
count = 0
for image, target in dataset:
for ax in axes:
ax.clear()
ax.axis("off")
np_image = image.permute(1, 2, 0).cpu().numpy()
axes[0].imshow(np_image)
num_instances = len(target["boxes"])
colours = vis.random_colours(num_instances, enable_random=False)
categories = categories = ["None", "Cube", "Sphere", "Cone"]
mapping = {i + 1: cat for i, cat in enumerate(categories)}
labels = [mapping[label.item()] for label in target["labels"]]
vis.plot_boxes(ax, target["boxes"].tolist(), labels=labels, colours=colours)
plt.draw()
plt.savefig("dataset.png")
if dataset.exiting:
break
# cleanup
dataset.kit.stop()
dataset.kit.shutdown()
| 18,433 | Python | 40.147321 | 158 | 0.616015 |
KazWong/omniverse_sample/ov_sample/ros_samples/joint_control/ros_publisher.py | #!/usr/bin/env python
# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import rospy
from sensor_msgs.msg import JointState
import numpy as np
import time
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--mode", required=True, help="position: for joint position control. velocity for joint velocity control"
)
args = parser.parse_args()
rospy.init_node("test_rosbridge", anonymous=True)
pub = rospy.Publisher("/joint_command", JointState, queue_size=10)
joint_state = JointState()
joint_state.name = [
"panda_joint1",
"panda_joint2",
"panda_joint3",
"panda_joint4",
"panda_joint5",
"panda_joint6",
"panda_joint7",
"panda_finger_joint1",
"panda_finger_joint2",
]
num_joints = len(joint_state.name)
# make sure kit's editor is playing for receiving messages ##
if args.mode == "position":
joint_state.position = np.array([0.0] * num_joints)
default_joints = [0.0, -1.16, -0.0, -2.3, -0.0, 1.6, 1.1, 0.4, 0.4]
# limiting the movements to a smaller range (this is not the range of the robot, just the range of the movement
max_joints = np.array(default_joints) + 0.5
min_joints = np.array(default_joints) - 0.5
# position control the robot to wiggle around each joint
time_start = time.time()
rate = rospy.Rate(20)
while not rospy.is_shutdown():
joint_state.position = np.sin(time.time() - time_start) * (max_joints - min_joints) * 0.5 + default_joints
pub.publish(joint_state)
rate.sleep()
elif args.mode == "velocity":
rate = rospy.Rate(0.5)
joint_state.position = []
joint_state.velocity = np.array([-0.7] * num_joints)
while not rospy.is_shutdown():
pub.publish(joint_state)
rate.sleep()
joint_state.velocity = -joint_state.velocity
else:
print("control mode error")
| 2,235 | Python | 28.421052 | 115 | 0.689485 |
KazWong/omniverse_sample/ov_sample/ros_samples/isaac_moveit/scripts/panda_finger_joint2_publisher.py | #!/usr/bin/env python
# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#!/usr/bin/env python
import rospy
from sensor_msgs.msg import JointState
finger_joint2 = JointState()
finger_joint2.name = ["panda_finger_joint2"]
def joint_states_callback(message):
for i, name in enumerate(message.name):
if name == "panda_finger_joint1":
pos = message.position[i]
finger_joint2.position = [pos]
pub.publish(finger_joint2)
return
if __name__ == "__main__":
rospy.init_node("panda_finger_joint2_publisher")
pub = rospy.Publisher("/joint_command", JointState, queue_size=1)
rospy.Subscriber("/joint_command", JointState, joint_states_callback, queue_size=1)
rospy.spin()
| 1,116 | Python | 32.848484 | 87 | 0.72043 |
KazWong/omniverse_sample/ov_sample/ros_samples/navigation/carter_2dnav/params/base_local_planner_params.yaml | TrajectoryPlannerROS:
holonomic_robot: false
max_vel_x: 0.5
min_vel_x: 0.1
max_vel_theta: 0.8
min_vel_theta: -0.8
min_in_place_vel_theta: 0.4
acc_lim_theta: 0.2
acc_lim_x: 0.5
acc_lim_y: 0.0
xy_goal_tolerance: 0.15
yaw_goal_tolerance: 0.05
occdist_scale: 1.0
escape_vel: -0.1
| 303 | YAML | 19.266665 | 29 | 0.646865 |
KazWong/omniverse_sample/ov_sample/ros_samples/navigation/carter_2dnav/params/costmap_common_params.yaml | obstacle_range: 100
raytrace_range: 3
robot_radius: 0.5
inflation_radius: 0.15
observation_sources: laser_scan_sensor
laser_scan_sensor: {sensor_frame: carter_lidar, data_type: LaserScan, topic: /scan, marking: true, clearing: true}
| 233 | YAML | 32.428567 | 114 | 0.772532 |
KazWong/omniverse_sample/ov_sample/ros_samples/teleport/ros_pose_client.py | #!/usr/bin/env python
# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from __future__ import print_function
from isaac_ros_messages.srv import IsaacPose, IsaacPoseRequest
from geometry_msgs.msg import Pose, Twist, Vector3
import rospy
import numpy as np
def send_pose_cube_client(new_pose):
rospy.wait_for_service("/teleport_pos")
try:
send_pose = rospy.ServiceProxy("/teleport_pos", IsaacPose)
send_pose(new_pose)
except rospy.ServiceException as e:
print("Service call failed: %s" % e)
def compose_pose(pos_vec, quat_vec):
obj_pose = Pose()
obj_pose.position.x = pos_vec[0]
obj_pose.position.y = pos_vec[1]
obj_pose.position.z = pos_vec[2]
obj_pose.orientation.w = quat_vec[0]
obj_pose.orientation.x = quat_vec[1]
obj_pose.orientation.y = quat_vec[2]
obj_pose.orientation.z = quat_vec[3]
return obj_pose
def compose_twist(lx, ly, lz, ax, ay, az):
obj_twist = Twist()
obj_twist.linear.x = lx
obj_twist.linear.y = ly
obj_twist.linear.z = lz
obj_twist.angular.x = ax
obj_twist.angular.y = ay
obj_twist.angular.z = az
return obj_twist
def compose_vec3(x, y, z):
obj_scale = Vector3()
obj_scale.x = x
obj_scale.y = y
obj_scale.z = z
return obj_scale
if __name__ == "__main__":
rospy.init_node("test_ros_teleport", anonymous=True)
new_isaac_pose_cube = IsaacPoseRequest()
new_isaac_pose_cube.names = ["/Cube"]
cube_pos_vec = np.array([0.0, 0.0, 0.0])
quat_vec = np.array([1, 0.0, 0.0, 0.0])
rate = rospy.Rate(1) # hz
while not rospy.is_shutdown():
# new random pose
cube_pos_vec = np.random.rand(3) * 0.1
cube_pose = compose_pose(cube_pos_vec, quat_vec)
new_isaac_pose_cube.poses = [cube_pose]
# publish
send_pose_cube_client(new_isaac_pose_cube)
rate.sleep()
| 2,264 | Python | 28.415584 | 76 | 0.661219 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.