file_path
stringlengths 20
207
| content
stringlengths 5
3.85M
| size
int64 5
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
2820207922/isaac_ws/standalone_examples/api/omni.isaac.sensor/camera_opencv_fisheye.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": True}) # Option: "renderer": "PathTracing"
import numpy as np
import omni.isaac.core.utils.numpy.rotations as rot_utils
from omni.isaac.core import World
from omni.isaac.core.objects import DynamicCuboid
from omni.isaac.sensor import Camera
from PIL import Image, ImageDraw
# Given the OpenCV camera matrix and distortion coefficients (Fisheye, Kannala-Brandt model),
# creates a camera and a sample scene, renders an image and saves it to
# camera_opencv_fisheye.png file. The asset is also saved to camera_opencv_fisheye.usd file.
# Currently only supports square images (there is an issue in the rendering pipeline).
# To produce non-square images, the region of the image that is not used should be cropped
width, height = 1920, 1200
camera_matrix = [[455.8, 0.0, 943.8], [0.0, 454.7, 602.3], [0.0, 0.0, 1.0]]
distortion_coefficients = [0.05, 0.01, -0.003, -0.0005]
# Camera sensor size and optical path parameters. These parameters are not the part of the
# OpenCV camera model, but they are nessesary to simulate the depth of field effect.
#
# To disable the depth of field effect, set the f_stop to 0.0. This is useful for debugging.
pixel_size = 3 # in microns, 3 microns is common
f_stop = 1.8 # f-number, the ratio of the lens focal length to the diameter of the entrance pupil
focus_distance = 0.6 # in meters, the distance from the camera to the object plane
diagonal_fov = 235 # in degrees, the diagonal field of view to be rendered
# Create a world, add a 1x1x1 meter cube, a ground plane, and a camera
world = World(stage_units_in_meters=1.0)
world.scene.add_default_ground_plane()
cube_1 = world.scene.add(
DynamicCuboid(
prim_path="/new_cube_1",
name="cube_1",
position=np.array([0, 0, 0.5]),
scale=np.array([1.0, 1.0, 1.0]),
size=1.0,
color=np.array([255, 0, 0]),
)
)
cube_2 = world.scene.add(
DynamicCuboid(
prim_path="/new_cube_2",
name="cube_2",
position=np.array([2, 0, 0.5]),
scale=np.array([1.0, 1.0, 1.0]),
size=1.0,
color=np.array([0, 255, 0]),
)
)
cube_3 = world.scene.add(
DynamicCuboid(
prim_path="/new_cube_3",
name="cube_3",
position=np.array([0, 4, 1]),
scale=np.array([2.0, 2.0, 2.0]),
size=1.0,
color=np.array([0, 0, 255]),
)
)
camera = Camera(
prim_path="/World/camera",
position=np.array([0.0, 0.0, 2.0]), # 1 meter away from the side of the cube
frequency=30,
resolution=(width, height),
orientation=rot_utils.euler_angles_to_quats(np.array([0, 90, 0]), degrees=True),
)
# Setup the scene and render a frame
world.reset()
camera.initialize()
# Calculate the focal length and aperture size from the camera matrix
((fx, _, cx), (_, fy, cy), (_, _, _)) = camera_matrix
horizontal_aperture = pixel_size * 1e-3 * width
vertical_aperture = pixel_size * 1e-3 * height
focal_length_x = fx * pixel_size * 1e-3
focal_length_y = fy * pixel_size * 1e-3
focal_length = (focal_length_x + focal_length_y) / 2 # in mm
# Set the camera parameters, note the unit conversion between Isaac Sim sensor and Kit
camera.set_focal_length(focal_length / 10.0)
camera.set_focus_distance(focus_distance)
camera.set_lens_aperture(f_stop * 100.0)
camera.set_horizontal_aperture(horizontal_aperture / 10.0)
camera.set_vertical_aperture(vertical_aperture / 10.0)
camera.set_clipping_range(0.05, 1.0e5)
# Set the distortion coefficients
camera.set_projection_type("fisheyePolynomial")
camera.set_kannala_brandt_properties(width, height, cx, cy, diagonal_fov, distortion_coefficients)
# Get the rendered frame and save it to a file
for i in range(100):
world.step(render=True)
camera.get_current_frame()
img = Image.fromarray(camera.get_rgba()[:, :, :3])
# Optional step, draw the 3D points to the image plane using the OpenCV fisheye model
def draw_points_opencv_fisheye(points3d):
import cv2
rvecs, tvecs = np.array([0.0, 0.0, 0.0]), np.array([0.0, 0.0, 0.0])
points, jac = cv2.fisheye.projectPoints(
np.expand_dims(points3d, 1), rvecs, tvecs, np.array(camera_matrix), np.array(distortion_coefficients)
)
draw = ImageDraw.Draw(img)
for pt in points:
x, y = pt[0]
print("Drawing point at: ", x, y)
draw.ellipse((x - 4, y - 4, x + 4, y + 4), fill="yellow", outline="yellow")
# Draw a few 3D points at the image plane (camera is pointing down to the ground plane).
# OpenCV doen't support projecting points behind the camera, so we avoid that.
draw_points_opencv_fisheye(
points3d=np.array(
[
[0.5, 0.5, 1.0],
[-0.5, 0.5, 1.0],
[0.5, -0.5, 1.0],
[-0.5, -0.5, 1.0],
[-3.0, -1.0, 0.0],
[-3.0, 1.0, 0.0],
[-0.5, -1.5, 1.0],
[0.5, -1.5, 1.0],
]
)
)
print("Saving the rendered image to: camera_opencv_fisheye.png")
img.save("camera_opencv_fisheye.png")
print("Saving the asset to camera_opencv_fisheye.usd")
world.scene.stage.Export("camera_opencv_fisheye.usd")
simulation_app.close()
| 5,598 | Python | 34.891025 | 109 | 0.670954 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.sensor/rotating_lidar_physX.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import argparse
import sys
import carb
import numpy as np
from omni.isaac.core import World
from omni.isaac.core.objects import DynamicCuboid
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.sensor import RotatingLidarPhysX
from omni.isaac.wheeled_robots.controllers.differential_controller import DifferentialController
from omni.isaac.wheeled_robots.robots import WheeledRobot
parser = argparse.ArgumentParser()
parser.add_argument("--test", default=False, action="store_true", help="Run in test mode")
args, unknown = parser.parse_known_args()
my_world = World(stage_units_in_meters=1.0)
my_world.scene.add_default_ground_plane()
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
simulation_app.close()
sys.exit()
asset_path = assets_root_path + "/Isaac/Robots/Carter/carter_v1.usd"
my_carter = my_world.scene.add(
WheeledRobot(
prim_path="/World/Carter",
name="my_carter",
wheel_dof_names=["left_wheel", "right_wheel"],
create_robot=True,
usd_path=asset_path,
position=np.array([0, 0.0, 0.5]),
)
)
my_lidar = my_world.scene.add(
RotatingLidarPhysX(
prim_path="/World/Carter/chassis_link/lidar", name="lidar", translation=np.array([-0.06, 0, 0.38])
)
)
cube_1 = my_world.scene.add(
DynamicCuboid(prim_path="/World/cube", name="cube_1", position=np.array([2, 2, 2.5]), scale=np.array([20, 0.2, 5]))
)
cube_2 = my_world.scene.add(
DynamicCuboid(
prim_path="/World/cube_2", name="cube_2", position=np.array([2, -2, 2.5]), scale=np.array([20, 0.2, 5])
)
)
my_controller = DifferentialController(name="simple_control", wheel_radius=0.24, wheel_base=0.56)
my_world.reset()
my_lidar.add_depth_data_to_frame()
my_lidar.add_point_cloud_data_to_frame()
my_lidar.enable_visualization()
i = 0
while simulation_app.is_running():
my_world.step(render=True)
if my_world.is_playing():
if my_world.current_time_step_index == 0:
my_world.reset()
my_controller.reset()
# print(imu_sensor.get_current_frame())
if i >= 0 and i < 1000:
# print(my_lidar.get_current_frame())
# forward
my_carter.apply_wheel_actions(my_controller.forward(command=[0.05, 0]))
elif i >= 1000 and i < 1265:
# rotate
my_carter.apply_wheel_actions(my_controller.forward(command=[0.0, np.pi / 12]))
elif i >= 1265 and i < 2000:
# forward
my_carter.apply_wheel_actions(my_controller.forward(command=[0.05, 0]))
elif i == 2000:
i = 0
i += 1
if args.test is True:
break
simulation_app.close()
| 3,291 | Python | 32.938144 | 119 | 0.676998 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.sensor/camera_opencv.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": True})
import numpy as np
import omni.isaac.core.utils.numpy.rotations as rot_utils
from omni.isaac.core import World
from omni.isaac.core.objects import DynamicCuboid
from omni.isaac.sensor import Camera
from PIL import Image, ImageDraw
# Given the OpenCV camera matrix and distortion coefficients (Rational Polynomial model),
# creates a camera and a sample scene, renders an image and saves it to
# camera_opencv_fisheye.png file. The asset is also saved to camera_opencv_fisheye.usd file.
width, height = 1920, 1200
camera_matrix = [[958.8, 0.0, 957.8], [0.0, 956.7, 589.5], [0.0, 0.0, 1.0]]
distortion_coefficients = [0.14, -0.03, -0.0002, -0.00003, 0.009, 0.5, -0.07, 0.017]
# Camera sensor size and optical path parameters. These parameters are not the part of the
# OpenCV camera model, but they are nessesary to simulate the depth of field effect.
#
# To disable the depth of field effect, set the f_stop to 0.0. This is useful for debugging.
pixel_size = 3 # in microns, 3 microns is common
f_stop = 1.8 # f-number, the ratio of the lens focal length to the diameter of the entrance pupil
focus_distance = 0.6 # in meters, the distance from the camera to the object plane
diagonal_fov = 140 # in degrees, the diagonal field of view to be rendered
# Create a world, add a 1x1x1 meter cube, a ground plane, and a camera
world = World(stage_units_in_meters=1.0)
world.scene.add_default_ground_plane()
cube_1 = world.scene.add(
DynamicCuboid(
prim_path="/new_cube_1",
name="cube_1",
position=np.array([0, 0, 0.5]),
scale=np.array([1.0, 1.0, 1.0]),
size=1.0,
color=np.array([255, 0, 0]),
)
)
camera = Camera(
prim_path="/World/camera",
position=np.array([0.0, 0.0, 2.0]), # 1 meter away from the side of the cube
frequency=30,
resolution=(width, height),
orientation=rot_utils.euler_angles_to_quats(np.array([0, 90, 0]), degrees=True),
)
# Setup the scene and render a frame
world.reset()
camera.initialize()
# Calculate the focal length and aperture size from the camera matrix
((fx, _, cx), (_, fy, cy), (_, _, _)) = camera_matrix
horizontal_aperture = pixel_size * 1e-3 * width
vertical_aperture = pixel_size * 1e-3 * height
focal_length_x = fx * pixel_size * 1e-3
focal_length_y = fy * pixel_size * 1e-3
focal_length = (focal_length_x + focal_length_y) / 2 # in mm
# Set the camera parameters, note the unit conversion between Isaac Sim sensor and Kit
camera.set_focal_length(focal_length / 10.0)
camera.set_focus_distance(focus_distance)
camera.set_lens_aperture(f_stop * 100.0)
camera.set_horizontal_aperture(horizontal_aperture / 10.0)
camera.set_vertical_aperture(vertical_aperture / 10.0)
camera.set_clipping_range(0.05, 1.0e5)
# Set the distortion coefficients
camera.set_projection_type("fisheyePolynomial")
camera.set_rational_polynomial_properties(width, height, cx, cy, diagonal_fov, distortion_coefficients)
# Get the rendered frame and save it to a file
for i in range(100):
world.step(render=True)
camera.get_current_frame()
img = Image.fromarray(camera.get_rgba()[:, :, :3])
# Optional step, draw the 3D points to the image plane using the OpenCV fisheye model
def draw_points_opencv(points3d):
import cv2
rvecs, tvecs = np.array([0.0, 0.0, 0.0]), np.array([0.0, 0.0, 0.0])
points, jac = cv2.projectPoints(
np.expand_dims(points3d, 1), rvecs, tvecs, np.array(camera_matrix), np.array(distortion_coefficients)
)
draw = ImageDraw.Draw(img)
for pt in points:
x, y = pt[0]
print("Drawing point at: ", x, y)
draw.ellipse((x - 4, y - 4, x + 4, y + 4), fill="orange", outline="orange")
# Draw the 3D points to the image plane
draw_points_opencv(points3d=np.array([[0.5, 0.5, 1.0], [-0.5, 0.5, 1.0], [0.5, -0.5, 1.0], [-0.5, -0.5, 1.0]]))
print("Saving the rendered image to: camera_opencv.png")
img.save("camera_opencv.png")
print("Saving the asset to camera_opencv.usd")
world.scene.stage.Export("camera_opencv.usd")
simulation_app.close()
| 4,537 | Python | 37.786324 | 111 | 0.706634 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.sensor/camera_ros.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
# Given a printout of ROS topic, containing the intrinsic and extrinsic parameters of the camera,
# creates a camera and a sample scene, renders an image and saves it to camera_ros.png file.
# The asset is also saved to camera_ros.usd file. The camera model is based on Intel RealSense D435i.
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": True})
import math
import numpy as np
import omni.isaac.core.utils.numpy.rotations as rot_utils
import yaml
from omni.isaac.core import World
from omni.isaac.core.objects import DynamicCuboid
from omni.isaac.sensor import Camera
from PIL import Image, ImageDraw
# To create a model of a given ROS camera, print the camera_info topic with:
# rostopicecho /camera/color/camera_info
# And copy the output into the yaml_data variable below. Populate additional parameters using the sensor manual.
#
# Note: only rational_polynomial model is supported in this example. For plump_bob or pinhole
# models set the distortion_model to "rational_polynomial" and compliment array D with 0.0 to 8 elements
# The camera_info topic in the Isaac Sim ROS bridge will be in the rational_polynomial format.
#
# Note: when fx is not equal to fy (pixels are not square), the average of fx and fy is used as the focal length.
# and the intrinsic matrix is adjusted to have square pixels. This updated matrix is used for rendering and
# it is also populated into the camera_info topic in the Isaac Sim ROS bridge.
yaml_data = """
# rostopic echo /camera/color/camera_info
header:
seq: 211
stamp:
secs: 1694379352
nsecs: 176209771
frame_id: "camera_color_optical_frame"
height: 480
width: 640
distortion_model: "rational_polynomial"
D: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
K: [612.4178466796875, 0.0, 309.72296142578125, 0.0, 612.362060546875, 245.35870361328125, 0.0, 0.0, 1.0]
R: [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
P: [612.4178466796875, 0.0, 309.72296142578125, 0.0, 0.0, 612.362060546875, 245.35870361328125, 0.0, 0.0, 0.0, 1.0, 0.0]
"""
# Camera sensor size and optical path parameters. These parameters are not the part of the
# OpenCV camera model, but they are nessesary to simulate the depth of field effect.
#
# To disable the depth of field effect, set the f_stop to 0.0. This is useful for debugging.
pixel_size = 1.4 # Pixel size in microns, 3 microns is common
f_stop = 2.0 # F-number, the ratio of the lens focal length to the diameter of the entrance pupil
focus_distance = 0.5 # Focus distance in meters, the distance from the camera to the object plane
# Parsing the YAML data
data = yaml.safe_load(yaml_data)
print("Header Frame ID:", data["header"]["frame_id"])
width, height, K, D = data["width"], data["height"], data["K"], data["D"]
# Create a world, add a 1x1x1 meter cube, a ground plane, and a camera
world = World(stage_units_in_meters=1.0)
world.scene.add_default_ground_plane()
world.reset()
cube_1 = world.scene.add(
DynamicCuboid(
prim_path="/new_cube_1",
name="cube_1",
position=np.array([0, 0, 0.5]),
scale=np.array([1.0, 1.0, 1.0]),
size=1.0,
color=np.array([255, 0, 0]),
)
)
camera = Camera(
prim_path="/World/camera",
position=np.array([0.0, 0.0, 3.0]), # 2 meter away from the side of the cube
frequency=30,
resolution=(width, height),
orientation=rot_utils.euler_angles_to_quats(np.array([0, 90, 0]), degrees=True),
)
camera.initialize()
# Calculate the focal length and aperture size from the camera matrix
(fx, _, cx, _, fy, cy, _, _, _) = K
horizontal_aperture = pixel_size * 1e-3 * width
vertical_aperture = pixel_size * 1e-3 * height
focal_length_x = fx * pixel_size * 1e-3
focal_length_y = fy * pixel_size * 1e-3
focal_length = (focal_length_x + focal_length_y) / 2 # in mm
# Set the camera parameters, note the unit conversion between Isaac Sim sensor and Kit
camera.set_focal_length(focal_length / 10.0)
camera.set_focus_distance(focus_distance)
camera.set_lens_aperture(f_stop * 100.0)
camera.set_horizontal_aperture(horizontal_aperture / 10.0)
camera.set_vertical_aperture(vertical_aperture / 10.0)
camera.set_clipping_range(0.05, 1.0e5)
# Set the distortion coefficients, this is nessesary, when cx, cy are not in the center of the image
diagonal = 2 * math.sqrt(max(cx, width - cx) ** 2 + max(cy, height - cy) ** 2)
diagonal_fov = 2 * math.atan2(diagonal, fx + fy) * 180 / math.pi
camera.set_projection_type("fisheyePolynomial")
camera.set_rational_polynomial_properties(width, height, cx, cy, diagonal_fov, D)
# Get the rendered frame and save it to a file
for i in range(100):
world.step(render=True)
camera.get_current_frame()
img = Image.fromarray(camera.get_rgba()[:, :, :3])
# Optional step, draw the 3D points to the image plane using the OpenCV fisheye model
def draw_points_opencv(points3d):
try:
# To install, run python.sh -m pip install opencv-python
import cv2
rvecs, tvecs = np.array([0.0, 0.0, 0.0]), np.array([0.0, 0.0, 0.0])
points, jac = cv2.projectPoints(
np.expand_dims(points3d, 1), rvecs, tvecs, np.array(K).reshape(3, 3), np.array(D)
)
draw = ImageDraw.Draw(img)
for pt in points:
x, y = pt[0]
print("Drawing point at: ", x, y)
draw.ellipse((x - 4, y - 4, x + 4, y + 4), fill="orange", outline="orange")
except:
print("OpenCV is not installed, skipping OpenCV overlay")
print("To install OpenCV, run: python.sh -m pip install opencv-python")
# Draw the 3D points to the image plane
draw_points_opencv(points3d=np.array([[0.5, 0.5, 4.0], [-0.5, 0.5, 4.0], [0.5, -0.5, 4.0], [-0.5, -0.5, 4.0]]))
print("Saving the rendered image to: camera_ros.png")
img.save("camera_ros.png")
print("Saving the asset to camera_ros.usd")
world.scene.stage.Export("camera_ros.usd")
simulation_app.close()
| 6,319 | Python | 39.774193 | 120 | 0.700902 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.ros2_bridge/carter_multiple_robot_navigation.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import argparse
import sys
import carb
from omni.isaac.kit import SimulationApp
HOSPITAL_USD_PATH = "/Isaac/Samples/ROS2/Scenario/multiple_robot_carter_hospital_navigation.usd"
OFFICE_USD_PATH = "/Isaac/Samples/ROS2/Scenario/multiple_robot_carter_office_navigation.usd"
# Default environment: Hospital
ENV_USD_PATH = HOSPITAL_USD_PATH
if len(sys.argv) > 1:
if sys.argv[1] == "office":
# Choosing Office environment
ENV_USD_PATH = OFFICE_USD_PATH
elif sys.argv[1] != "hospital":
carb.log_warn("Environment name is invalid. Choosing default Hospital environment.")
else:
carb.log_warn("Environment name not specified. Choosing default Hospital environment.")
CONFIG = {"renderer": "RayTracedLighting", "headless": False}
# Example ROS2 bridge sample demonstrating the manual loading of Multiple Robot Navigation scenario
simulation_app = SimulationApp(CONFIG)
import omni
from omni.isaac.core import SimulationContext
from omni.isaac.core.utils import extensions, nucleus, prims, rotations, stage, viewports
from omni.isaac.core.utils.extensions import enable_extension
from pxr import Sdf
# enable ROS2 bridge extension
enable_extension("omni.isaac.ros2_bridge")
# Locate assets root folder to load sample
assets_root_path = nucleus.get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
simulation_app.close()
sys.exit()
usd_path = assets_root_path + ENV_USD_PATH
omni.usd.get_context().open_stage(usd_path, None)
# Wait two frames so that stage starts loading
simulation_app.update()
simulation_app.update()
print("Loading stage...")
from omni.isaac.core.utils.stage import is_stage_loading
while is_stage_loading():
simulation_app.update()
print("Loading Complete")
simulation_context = SimulationContext(stage_units_in_meters=1.0)
simulation_app.update()
simulation_context.play()
simulation_app.update()
while simulation_app.is_running():
# runs with a realtime clock
simulation_app.update()
simulation_context.stop()
simulation_app.close()
| 2,512 | Python | 29.646341 | 99 | 0.766322 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.ros2_bridge/rtx_lidar.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import argparse
import sys
import carb
from omni.isaac.kit import SimulationApp
# Example for creating a RTX lidar sensor and publishing PointCloud2 data
simulation_app = SimulationApp({"headless": False})
import omni
import omni.kit.viewport.utility
import omni.replicator.core as rep
from omni.isaac.core import SimulationContext
from omni.isaac.core.utils import nucleus, stage
from omni.isaac.core.utils.extensions import enable_extension
from pxr import Gf
# enable ROS2 bridge extension
enable_extension("omni.isaac.ros2_bridge")
simulation_app.update()
# Locate Isaac Sim assets folder to load environment and robot stages
assets_root_path = nucleus.get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
simulation_app.close()
sys.exit()
simulation_app.update()
# Loading the simple_room environment
stage.add_reference_to_stage(
assets_root_path + "/Isaac/Environments/Simple_Warehouse/full_warehouse.usd", "/background"
)
simulation_app.update()
# Create the lidar sensor that generates data into "RtxSensorCpu"
# Sensor needs to be rotated 90 degrees about X so that its Z up
# Possible options are Example_Rotary and Example_Solid_State
# drive sim applies 0.5,-0.5,-0.5,w(-0.5), we have to apply the reverse
_, sensor = omni.kit.commands.execute(
"IsaacSensorCreateRtxLidar",
path="/sensor",
parent=None,
config="Example_Rotary",
translation=(0, 0, 1.0),
orientation=Gf.Quatd(1.0, 0.0, 0.0, 0.0), # Gf.Quatd is w,i,j,k
)
# RTX sensors are cameras and must be assigned to their own render product
hydra_texture = rep.create.render_product(sensor.GetPath(), [1, 1], name="Isaac")
simulation_context = SimulationContext(physics_dt=1.0 / 60.0, rendering_dt=1.0 / 60.0, stage_units_in_meters=1.0)
simulation_app.update()
# Create Point cloud publisher pipeline in the post process graph
writer = rep.writers.get("RtxLidar" + "ROS2PublishPointCloud")
writer.initialize(topicName="point_cloud", frameId="sim_lidar")
writer.attach([hydra_texture])
# Create the debug draw pipeline in the post process graph
writer = rep.writers.get("RtxLidar" + "DebugDrawPointCloud")
writer.attach([hydra_texture])
# Create LaserScan publisher pipeline in the post process graph
writer = rep.writers.get("RtxLidar" + "ROS2PublishLaserScan")
writer.initialize(topicName="laser_scan", frameId="sim_lidar")
writer.attach([hydra_texture])
simulation_app.update()
simulation_context.play()
while simulation_app.is_running():
simulation_app.update()
# cleanup and shutdown
simulation_context.stop()
simulation_app.close()
| 3,055 | Python | 32.582417 | 113 | 0.765303 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.ros2_bridge/clock.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import argparse
import time
import carb
from omni.isaac.kit import SimulationApp
# Example ROS2 bridge sample showing rclpy and rosclock interaction
simulation_app = SimulationApp({"renderer": "RayTracedLighting", "headless": True})
import omni
import omni.graph.core as og
from omni.isaac.core import SimulationContext
from omni.isaac.core.utils.extensions import enable_extension
# enable ROS2 bridge extension
enable_extension("omni.isaac.ros2_bridge")
simulation_app.update()
# Note that this is not the system level rclpy, but one compiled for omniverse
import rclpy
from rosgraph_msgs.msg import Clock
rclpy.init()
clock_topic = "sim_time"
manual_clock_topic = "manual_time"
# Creating a action graph with ROS component nodes
try:
og.Controller.edit(
{"graph_path": "/ActionGraph", "evaluator_name": "execution"},
{
og.Controller.Keys.CREATE_NODES: [
("ReadSimTime", "omni.isaac.core_nodes.IsaacReadSimulationTime"),
("OnPlaybackTick", "omni.graph.action.OnPlaybackTick"),
("PublishClock", "omni.isaac.ros2_bridge.ROS2PublishClock"),
("OnImpulseEvent", "omni.graph.action.OnImpulseEvent"),
("PublishManualClock", "omni.isaac.ros2_bridge.ROS2PublishClock"),
],
og.Controller.Keys.CONNECT: [
# Connecting execution of OnPlaybackTick node to PublishClock to automatically publish each frame
("OnPlaybackTick.outputs:tick", "PublishClock.inputs:execIn"),
# Connecting execution of OnImpulseEvent node to PublishManualClock so it will only publish when an impulse event is triggered
("OnImpulseEvent.outputs:execOut", "PublishManualClock.inputs:execIn"),
# Connecting simulationTime data of ReadSimTime to the clock publisher nodes
("ReadSimTime.outputs:simulationTime", "PublishClock.inputs:timeStamp"),
("ReadSimTime.outputs:simulationTime", "PublishManualClock.inputs:timeStamp"),
],
og.Controller.Keys.SET_VALUES: [
# Assigning topic names to clock publishers
("PublishClock.inputs:topicName", clock_topic),
("PublishManualClock.inputs:topicName", manual_clock_topic),
],
},
)
except Exception as e:
print(e)
simulation_app.update()
simulation_app.update()
# Define ROS2 callbacks
def sim_clock_callback(data):
print("sim time:", data.clock)
def manual_clock_callback(data):
print("manual stepped sim time:", data.clock)
# Create rclpy ndoe
node = rclpy.create_node("isaac_sim_clock")
# create subscribers
sim_clock_sub = node.create_subscription(Clock, clock_topic, sim_clock_callback, 1)
manual_clock_sub = node.create_subscription(Clock, manual_clock_topic, manual_clock_callback, 1)
time.sleep(1.0)
simulation_context = SimulationContext(physics_dt=1.0 / 60.0, rendering_dt=1.0 / 60.0, stage_units_in_meters=1.0)
# need to initialize physics getting any articulation..etc
simulation_context.initialize_physics()
simulation_context.play()
# perform a fixed number of steps with fixed step size
for frame in range(20):
# publish manual clock every 10 frames
if frame % 10 == 0:
og.Controller.set(og.Controller.attribute("/ActionGraph/OnImpulseEvent.state:enableImpulse"), True)
simulation_context.render() # This updates rendering/app loop which calls the sim clock
simulation_context.step(render=False) # runs with a non-realtime clock
rclpy.spin_once(node, timeout_sec=0.0) # Spin node once
# This sleep is to make this sample run a bit more deterministically for the subscriber callback
# In general this sleep is not needed
time.sleep(0.1)
# perform a fixed number of steps with realtime clock
for frame in range(20):
# publish manual clock every 10 frames
if frame % 10 == 0:
og.Controller.set(og.Controller.attribute("/ActionGraph/OnImpulseEvent.state:enableImpulse"), True)
simulation_app.update() # runs with a realtime clock
rclpy.spin_once(node, timeout_sec=0.0) # Spin node once
# This sleep is to make this sample run a bit more deterministically for the subscriber callback
# In general this sleep is not needed
time.sleep(0.1)
# shutdown
rclpy.shutdown()
simulation_context.stop()
simulation_app.close()
| 4,822 | Python | 37.584 | 142 | 0.710908 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.ros2_bridge/carter_stereo.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import argparse
import carb
from omni.isaac.kit import SimulationApp
parser = argparse.ArgumentParser(description="Carter Stereo Example")
parser.add_argument("--test", action="store_true")
args, unknown = parser.parse_known_args()
# Example ROS2 bridge sample showing manual control over messages
simulation_app = SimulationApp({"renderer": "RayTracedLighting", "headless": False})
import omni
import omni.graph.core as og
from omni.isaac.core import SimulationContext
from omni.isaac.core.utils.extensions import enable_extension
from omni.isaac.core.utils.nucleus import get_assets_root_path
from pxr import Sdf
# enable ROS2 bridge extension
enable_extension("omni.isaac.ros2_bridge")
# Locate assets root folder to load sample
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
simulation_app.close()
exit()
usd_path = assets_root_path + "/Isaac/Samples/ROS2/Scenario/carter_warehouse_navigation.usd"
omni.usd.get_context().open_stage(usd_path, None)
# Wait two frames so that stage starts loading
simulation_app.update()
simulation_app.update()
print("Loading stage...")
from omni.isaac.core.utils.stage import is_stage_loading
while is_stage_loading():
simulation_app.update()
print("Loading Complete")
simulation_context = SimulationContext(stage_units_in_meters=1.0)
ros_cameras_graph_path = "/World/Nova_Carter_ROS/front_hawk"
# Enabling rgb image publishers for left camera. Cameras will automatically publish images each frame
og.Controller.set(og.Controller.attribute(ros_cameras_graph_path + "/left_camera_render_product.inputs:enabled"), True)
simulation_context.play()
simulation_context.step()
# Enabling rgb image publishers for right camera after left cameras are initialized. Cameras will automatically publish images each frame
og.Controller.set(og.Controller.attribute(ros_cameras_graph_path + "/right_camera_render_product.inputs:enabled"), True)
# Simulate for one second to warm up sim and let everything settle
for frame in range(60):
simulation_context.step()
# Create a ROS publisher to publish message to spin robot in place
# If system level rclpy is sourced in bashrc or terminal, it is imported otherwise backup rclpy libraries shipped with Isaac sim is used
import rclpy
rclpy.init()
from geometry_msgs.msg import Twist
node = rclpy.create_node("carter_stereo")
publisher = node.create_publisher(Twist, "cmd_vel", 10)
frame = 0
while simulation_app.is_running():
# Run with a fixed step size
simulation_context.step(render=True)
# Publish the ROS Twist message every 2 frames
if frame % 2 == 0:
message = Twist()
message.angular.z = 0.5 # spin in place
publisher.publish(message)
if args.test and frame > 120:
break
frame = frame + 1
node.destroy_node()
rclpy.shutdown()
simulation_context.stop()
simulation_app.close()
| 3,363 | Python | 32.64 | 137 | 0.765685 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.ros2_bridge/camera_manual.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import argparse
import sys
import carb
from omni.isaac.kit import SimulationApp
CAMERA_STAGE_PATH = "/Camera"
ROS_CAMERA_GRAPH_PATH = "/ROS_Camera"
BACKGROUND_STAGE_PATH = "/background"
BACKGROUND_USD_PATH = "/Isaac/Environments/Simple_Warehouse/warehouse_with_forklifts.usd"
CONFIG = {"renderer": "RayTracedLighting", "headless": False}
# Example ROS2 bridge sample demonstrating the manual loading of stages and manual publishing of images
simulation_app = SimulationApp(CONFIG)
import omni
import omni.graph.core as og
import usdrt.Sdf
from omni.isaac.core import SimulationContext
from omni.isaac.core.utils import extensions, nucleus, stage
from omni.kit.viewport.utility import get_active_viewport
from pxr import Gf, Usd, UsdGeom
# enable ROS2 bridge extension
extensions.enable_extension("omni.isaac.ros2_bridge")
simulation_app.update()
simulation_context = SimulationContext(stage_units_in_meters=1.0)
# Locate Isaac Sim assets folder to load environment and robot stages
assets_root_path = nucleus.get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
simulation_app.close()
sys.exit()
# Loading the simple_room environment
stage.add_reference_to_stage(assets_root_path + BACKGROUND_USD_PATH, BACKGROUND_STAGE_PATH)
# Creating a Camera prim
camera_prim = UsdGeom.Camera(omni.usd.get_context().get_stage().DefinePrim(CAMERA_STAGE_PATH, "Camera"))
xform_api = UsdGeom.XformCommonAPI(camera_prim)
xform_api.SetTranslate(Gf.Vec3d(-1, 5, 1))
xform_api.SetRotate((90, 0, 0), UsdGeom.XformCommonAPI.RotationOrderXYZ)
camera_prim.GetHorizontalApertureAttr().Set(21)
camera_prim.GetVerticalApertureAttr().Set(16)
camera_prim.GetProjectionAttr().Set("perspective")
camera_prim.GetFocalLengthAttr().Set(24)
camera_prim.GetFocusDistanceAttr().Set(400)
simulation_app.update()
# Creating an on-demand push graph with cameraHelper nodes to generate ROS image publishers
keys = og.Controller.Keys
(ros_camera_graph, _, _, _) = og.Controller.edit(
{
"graph_path": ROS_CAMERA_GRAPH_PATH,
"evaluator_name": "push",
"pipeline_stage": og.GraphPipelineStage.GRAPH_PIPELINE_STAGE_ONDEMAND,
},
{
keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("createViewport", "omni.isaac.core_nodes.IsaacCreateViewport"),
("getRenderProduct", "omni.isaac.core_nodes.IsaacGetViewportRenderProduct"),
("setCamera", "omni.isaac.core_nodes.IsaacSetCameraOnRenderProduct"),
("cameraHelperRgb", "omni.isaac.ros2_bridge.ROS2CameraHelper"),
("cameraHelperInfo", "omni.isaac.ros2_bridge.ROS2CameraHelper"),
("cameraHelperDepth", "omni.isaac.ros2_bridge.ROS2CameraHelper"),
],
keys.CONNECT: [
("OnTick.outputs:tick", "createViewport.inputs:execIn"),
("createViewport.outputs:execOut", "getRenderProduct.inputs:execIn"),
("createViewport.outputs:viewport", "getRenderProduct.inputs:viewport"),
("getRenderProduct.outputs:execOut", "setCamera.inputs:execIn"),
("getRenderProduct.outputs:renderProductPath", "setCamera.inputs:renderProductPath"),
("setCamera.outputs:execOut", "cameraHelperRgb.inputs:execIn"),
("setCamera.outputs:execOut", "cameraHelperInfo.inputs:execIn"),
("setCamera.outputs:execOut", "cameraHelperDepth.inputs:execIn"),
("getRenderProduct.outputs:renderProductPath", "cameraHelperRgb.inputs:renderProductPath"),
("getRenderProduct.outputs:renderProductPath", "cameraHelperInfo.inputs:renderProductPath"),
("getRenderProduct.outputs:renderProductPath", "cameraHelperDepth.inputs:renderProductPath"),
],
keys.SET_VALUES: [
("createViewport.inputs:viewportId", 0),
("cameraHelperRgb.inputs:frameId", "sim_camera"),
("cameraHelperRgb.inputs:topicName", "rgb"),
("cameraHelperRgb.inputs:type", "rgb"),
("cameraHelperInfo.inputs:frameId", "sim_camera"),
("cameraHelperInfo.inputs:topicName", "camera_info"),
("cameraHelperInfo.inputs:type", "camera_info"),
("cameraHelperDepth.inputs:frameId", "sim_camera"),
("cameraHelperDepth.inputs:topicName", "depth"),
("cameraHelperDepth.inputs:type", "depth"),
("setCamera.inputs:cameraPrim", [usdrt.Sdf.Path(CAMERA_STAGE_PATH)]),
],
},
)
# Run the ROS Camera graph once to generate ROS image publishers in SDGPipeline
og.Controller.evaluate_sync(ros_camera_graph)
simulation_app.update()
# Use the IsaacSimulationGate step value to block execution on specific frames
SD_GRAPH_PATH = "/Render/PostProcess/SDGPipeline"
viewport_api = get_active_viewport()
if viewport_api is not None:
import omni.syntheticdata._syntheticdata as sd
curr_stage = omni.usd.get_context().get_stage()
# Required for editing the SDGPipeline graph which exists in the Session Layer
with Usd.EditContext(curr_stage, curr_stage.GetSessionLayer()):
# Get name of rendervar for RGB sensor type
rv_rgb = omni.syntheticdata.SyntheticData.convert_sensor_type_to_rendervar(sd.SensorType.Rgb.name)
# Get path to IsaacSimulationGate node in RGB pipeline
rgb_camera_gate_path = omni.syntheticdata.SyntheticData._get_node_path(
rv_rgb + "IsaacSimulationGate", viewport_api.get_render_product_path()
)
rv_depth = omni.syntheticdata.SyntheticData.convert_sensor_type_to_rendervar(
sd.SensorType.DistanceToImagePlane.name
)
# Get path to IsaacSimulationGate node in Depth pipeline
depth_camera_gate_path = omni.syntheticdata.SyntheticData._get_node_path(
rv_depth + "IsaacSimulationGate", viewport_api.get_render_product_path()
)
# Get path to IsaacSimulationGate node in CameraInfo pipeline
camera_info_gate_path = omni.syntheticdata.SyntheticData._get_node_path(
"PostProcessDispatch" + "IsaacSimulationGate", viewport_api.get_render_product_path()
)
# Need to initialize physics getting any articulation..etc
simulation_context.initialize_physics()
simulation_context.play()
frame = 0
while simulation_app.is_running() and simulation_context.is_playing():
# Run with a fixed step size
simulation_context.step(render=True)
if simulation_context.is_playing():
# Rotate camera by 0.5 degree every frame
xform_api.SetRotate((90, 0, frame / 4.0), UsdGeom.XformCommonAPI.RotationOrderXYZ)
# Set the step value for the simulation gates to zero to stop execution
og.Controller.attribute(rgb_camera_gate_path + ".inputs:step").set(0)
og.Controller.attribute(depth_camera_gate_path + ".inputs:step").set(0)
og.Controller.attribute(camera_info_gate_path + ".inputs:step").set(0)
# Publish the ROS rgb image message every 5 frames
if frame % 5 == 0:
# Enable rgb Branch node to start publishing rgb image
og.Controller.attribute(rgb_camera_gate_path + ".inputs:step").set(1)
# Publish the ROS Depth image message every 60 frames
if frame % 60 == 0:
# Enable depth Branch node to start publishing depth image
og.Controller.attribute(depth_camera_gate_path + ".inputs:step").set(1)
# Publish the ROS Camera Info message every frame
og.Controller.attribute(camera_info_gate_path + ".inputs:step").set(1)
frame = frame + 1
simulation_context.stop()
simulation_app.close()
| 8,054 | Python | 42.074866 | 106 | 0.706109 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.ros2_bridge/camera_periodic.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import argparse
import sys
import carb
from omni.isaac.kit import SimulationApp
CAMERA_STAGE_PATH = "/Camera"
ROS_CAMERA_GRAPH_PATH = "/ROS_Camera"
BACKGROUND_STAGE_PATH = "/background"
BACKGROUND_USD_PATH = "/Isaac/Environments/Simple_Warehouse/warehouse_with_forklifts.usd"
CONFIG = {"renderer": "RayTracedLighting", "headless": False}
simulation_app = SimulationApp(CONFIG)
import omni
import omni.graph.core as og
import usdrt.Sdf
from omni.isaac.core import SimulationContext
from omni.isaac.core.utils import extensions, nucleus, stage
from omni.kit.viewport.utility import get_active_viewport
from pxr import Gf, Usd, UsdGeom
# enable ROS bridge extension
extensions.enable_extension("omni.isaac.ros2_bridge")
simulation_app.update()
simulation_context = SimulationContext(stage_units_in_meters=1.0)
# Locate Isaac Sim assets folder to load environment and robot stages
assets_root_path = nucleus.get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
simulation_app.close()
sys.exit()
# Loading the simple_room environment
stage.add_reference_to_stage(assets_root_path + BACKGROUND_USD_PATH, BACKGROUND_STAGE_PATH)
# Creating a Camera prim
camera_prim = UsdGeom.Camera(omni.usd.get_context().get_stage().DefinePrim(CAMERA_STAGE_PATH, "Camera"))
xform_api = UsdGeom.XformCommonAPI(camera_prim)
xform_api.SetTranslate(Gf.Vec3d(-1, 5, 1))
xform_api.SetRotate((90, 0, 0), UsdGeom.XformCommonAPI.RotationOrderXYZ)
camera_prim.GetHorizontalApertureAttr().Set(21)
camera_prim.GetVerticalApertureAttr().Set(16)
camera_prim.GetProjectionAttr().Set("perspective")
camera_prim.GetFocalLengthAttr().Set(24)
camera_prim.GetFocusDistanceAttr().Set(400)
simulation_app.update()
# Creating an on-demand push graph with cameraHelper nodes to generate ROS image publishers
keys = og.Controller.Keys
(ros_camera_graph, _, _, _) = og.Controller.edit(
{
"graph_path": ROS_CAMERA_GRAPH_PATH,
"evaluator_name": "push",
"pipeline_stage": og.GraphPipelineStage.GRAPH_PIPELINE_STAGE_ONDEMAND,
},
{
keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("createViewport", "omni.isaac.core_nodes.IsaacCreateViewport"),
("getRenderProduct", "omni.isaac.core_nodes.IsaacGetViewportRenderProduct"),
("setCamera", "omni.isaac.core_nodes.IsaacSetCameraOnRenderProduct"),
("cameraHelperRgb", "omni.isaac.ros2_bridge.ROS2CameraHelper"),
("cameraHelperInfo", "omni.isaac.ros2_bridge.ROS2CameraHelper"),
("cameraHelperDepth", "omni.isaac.ros2_bridge.ROS2CameraHelper"),
],
keys.CONNECT: [
("OnTick.outputs:tick", "createViewport.inputs:execIn"),
("createViewport.outputs:execOut", "getRenderProduct.inputs:execIn"),
("createViewport.outputs:viewport", "getRenderProduct.inputs:viewport"),
("getRenderProduct.outputs:execOut", "setCamera.inputs:execIn"),
("getRenderProduct.outputs:renderProductPath", "setCamera.inputs:renderProductPath"),
("setCamera.outputs:execOut", "cameraHelperRgb.inputs:execIn"),
("setCamera.outputs:execOut", "cameraHelperInfo.inputs:execIn"),
("setCamera.outputs:execOut", "cameraHelperDepth.inputs:execIn"),
("getRenderProduct.outputs:renderProductPath", "cameraHelperRgb.inputs:renderProductPath"),
("getRenderProduct.outputs:renderProductPath", "cameraHelperInfo.inputs:renderProductPath"),
("getRenderProduct.outputs:renderProductPath", "cameraHelperDepth.inputs:renderProductPath"),
],
keys.SET_VALUES: [
("createViewport.inputs:viewportId", 0),
("cameraHelperRgb.inputs:frameId", "sim_camera"),
("cameraHelperRgb.inputs:topicName", "rgb"),
("cameraHelperRgb.inputs:type", "rgb"),
("cameraHelperInfo.inputs:frameId", "sim_camera"),
("cameraHelperInfo.inputs:topicName", "camera_info"),
("cameraHelperInfo.inputs:type", "camera_info"),
("cameraHelperDepth.inputs:frameId", "sim_camera"),
("cameraHelperDepth.inputs:topicName", "depth"),
("cameraHelperDepth.inputs:type", "depth"),
("setCamera.inputs:cameraPrim", [usdrt.Sdf.Path(CAMERA_STAGE_PATH)]),
],
},
)
# Run the ROS Camera graph once to generate ROS image publishers in SDGPipeline
og.Controller.evaluate_sync(ros_camera_graph)
simulation_app.update()
# Inside the SDGPipeline graph, Isaac Simulation Gate nodes are added to control the execution rate of each of the ROS image and camera info publishers.
# By default the step input of each Isaac Simulation Gate node is set to a value of 1 to execute every frame.
# We can change this value to N for each Isaac Simulation Gate node individually to publish every N number of frames.
viewport_api = get_active_viewport()
if viewport_api is not None:
import omni.syntheticdata._syntheticdata as sd
# Get name of rendervar for RGB sensor type
rv_rgb = omni.syntheticdata.SyntheticData.convert_sensor_type_to_rendervar(sd.SensorType.Rgb.name)
# Get path to IsaacSimulationGate node in RGB pipeline
rgb_camera_gate_path = omni.syntheticdata.SyntheticData._get_node_path(
rv_rgb + "IsaacSimulationGate", viewport_api.get_render_product_path()
)
# Get name of rendervar for DistanceToImagePlane sensor type
rv_depth = omni.syntheticdata.SyntheticData.convert_sensor_type_to_rendervar(
sd.SensorType.DistanceToImagePlane.name
)
# Get path to IsaacSimulationGate node in Depth pipeline
depth_camera_gate_path = omni.syntheticdata.SyntheticData._get_node_path(
rv_depth + "IsaacSimulationGate", viewport_api.get_render_product_path()
)
# Get path to IsaacSimulationGate node in CameraInfo pipeline
camera_info_gate_path = omni.syntheticdata.SyntheticData._get_node_path(
"PostProcessDispatch" + "IsaacSimulationGate", viewport_api.get_render_product_path()
)
# Set Rgb execution step to 5 frames
rgb_step_size = 5
# Set Depth execution step to 60 frames
depth_step_size = 60
# Set Camera info execution step to every frame
info_step_size = 1
# Set step input of the Isaac Simulation Gate nodes upstream of ROS publishers to control their execution rate
og.Controller.attribute(rgb_camera_gate_path + ".inputs:step").set(rgb_step_size)
og.Controller.attribute(depth_camera_gate_path + ".inputs:step").set(depth_step_size)
og.Controller.attribute(camera_info_gate_path + ".inputs:step").set(info_step_size)
# Need to initialize physics getting any articulation..etc
simulation_context.initialize_physics()
simulation_context.play()
frame = 0
while simulation_app.is_running():
# Run with a fixed step size
simulation_context.step(render=True)
if simulation_context.is_playing():
# Rotate camera by 0.5 degree every frame
xform_api.SetRotate((90, 0, frame / 4.0), UsdGeom.XformCommonAPI.RotationOrderXYZ)
frame = frame + 1
simulation_context.stop()
simulation_app.close()
| 7,608 | Python | 41.747191 | 152 | 0.720294 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.ros2_bridge/moveit.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import argparse
import sys
import carb
import numpy as np
from omni.isaac.kit import SimulationApp
FRANKA_STAGE_PATH = "/Franka"
FRANKA_USD_PATH = "/Isaac/Robots/Franka/franka_alt_fingers.usd"
BACKGROUND_STAGE_PATH = "/background"
BACKGROUND_USD_PATH = "/Isaac/Environments/Simple_Room/simple_room.usd"
CONFIG = {"renderer": "RayTracedLighting", "headless": False}
# Example ROS2 bridge sample demonstrating the manual loading of stages
# and creation of ROS components
simulation_app = SimulationApp(CONFIG)
import omni.graph.core as og
import usdrt.Sdf
from omni.isaac.core import SimulationContext
from omni.isaac.core.utils import extensions, nucleus, prims, rotations, stage, viewports
from pxr import Gf
# enable ROS2 bridge extension
extensions.enable_extension("omni.isaac.ros2_bridge")
simulation_context = SimulationContext(stage_units_in_meters=1.0)
# Locate Isaac Sim assets folder to load environment and robot stages
assets_root_path = nucleus.get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
simulation_app.close()
sys.exit()
# Preparing stage
viewports.set_camera_view(eye=np.array([1.2, 1.2, 0.8]), target=np.array([0, 0, 0.5]))
# Loading the simple_room environment
stage.add_reference_to_stage(assets_root_path + BACKGROUND_USD_PATH, BACKGROUND_STAGE_PATH)
# Loading the franka robot USD
prims.create_prim(
FRANKA_STAGE_PATH,
"Xform",
position=np.array([0, -0.64, 0]),
orientation=rotations.gf_rotation_to_np_array(Gf.Rotation(Gf.Vec3d(0, 0, 1), 90)),
usd_path=assets_root_path + FRANKA_USD_PATH,
)
simulation_app.update()
# Creating a action graph with ROS component nodes
try:
og.Controller.edit(
{"graph_path": "/ActionGraph", "evaluator_name": "execution"},
{
og.Controller.Keys.CREATE_NODES: [
("OnImpulseEvent", "omni.graph.action.OnImpulseEvent"),
("ReadSimTime", "omni.isaac.core_nodes.IsaacReadSimulationTime"),
("Context", "omni.isaac.ros2_bridge.ROS2Context"),
("PublishJointState", "omni.isaac.ros2_bridge.ROS2PublishJointState"),
("SubscribeJointState", "omni.isaac.ros2_bridge.ROS2SubscribeJointState"),
("ArticulationController", "omni.isaac.core_nodes.IsaacArticulationController"),
("PublishClock", "omni.isaac.ros2_bridge.ROS2PublishClock"),
],
og.Controller.Keys.CONNECT: [
("OnImpulseEvent.outputs:execOut", "PublishJointState.inputs:execIn"),
("OnImpulseEvent.outputs:execOut", "SubscribeJointState.inputs:execIn"),
("OnImpulseEvent.outputs:execOut", "PublishClock.inputs:execIn"),
("OnImpulseEvent.outputs:execOut", "ArticulationController.inputs:execIn"),
("Context.outputs:context", "PublishJointState.inputs:context"),
("Context.outputs:context", "SubscribeJointState.inputs:context"),
("Context.outputs:context", "PublishClock.inputs:context"),
("ReadSimTime.outputs:simulationTime", "PublishJointState.inputs:timeStamp"),
("ReadSimTime.outputs:simulationTime", "PublishClock.inputs:timeStamp"),
("SubscribeJointState.outputs:jointNames", "ArticulationController.inputs:jointNames"),
(
"SubscribeJointState.outputs:positionCommand",
"ArticulationController.inputs:positionCommand",
),
(
"SubscribeJointState.outputs:velocityCommand",
"ArticulationController.inputs:velocityCommand",
),
("SubscribeJointState.outputs:effortCommand", "ArticulationController.inputs:effortCommand"),
],
og.Controller.Keys.SET_VALUES: [
# Setting the /Franka target prim to Articulation Controller node
("ArticulationController.inputs:usePath", True),
("ArticulationController.inputs:robotPath", FRANKA_STAGE_PATH),
("PublishJointState.inputs:topicName", "isaac_joint_states"),
("SubscribeJointState.inputs:topicName", "isaac_joint_commands"),
("PublishJointState.inputs:targetPrim", [usdrt.Sdf.Path(FRANKA_STAGE_PATH)]),
("PublishTF.inputs:targetPrims", [usdrt.Sdf.Path(FRANKA_STAGE_PATH)]),
],
},
)
except Exception as e:
print(e)
simulation_app.update()
# need to initialize physics getting any articulation..etc
simulation_context.initialize_physics()
simulation_context.play()
while simulation_app.is_running():
# Run with a fixed step size
simulation_context.step(render=True)
# Tick the Publish/Subscribe JointState and Publish Clock nodes each frame
og.Controller.set(og.Controller.attribute("/ActionGraph/OnImpulseEvent.state:enableImpulse"), True)
simulation_context.stop()
simulation_app.close()
| 5,442 | Python | 41.523437 | 109 | 0.684491 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.ros2_bridge/subscriber.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import argparse
import carb
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"renderer": "RayTracedLighting", "headless": False})
import omni
from omni.isaac.core import World
from omni.isaac.core.objects import VisualCuboid
from omni.isaac.core.utils.extensions import enable_extension
# enable ROS2 bridge extension
enable_extension("omni.isaac.ros2_bridge")
simulation_app.update()
import time
# Note that this is not the system level rclpy, but one compiled for omniverse
import numpy as np
import rclpy
from rclpy.node import Node
from std_msgs.msg import Empty
class Subscriber(Node):
def __init__(self):
super().__init__("tutorial_subscriber")
# setting up the world with a cube
self.timeline = omni.timeline.get_timeline_interface()
self.ros_world = World(stage_units_in_meters=1.0)
self.ros_world.scene.add_default_ground_plane()
# add a cube in the world
cube_path = "/cube"
self.ros_world.scene.add(
VisualCuboid(prim_path=cube_path, name="cube_1", position=np.array([0, 0, 10]), size=0.2)
)
self._cube_position = np.array([0, 0, 0])
# setup the ROS2 subscriber here
self.ros_sub = self.create_subscription(Empty, "move_cube", self.move_cube_callback, 10)
self.ros_world.reset()
def move_cube_callback(self, data):
# callback function to set the cube position to a new one upon receiving a (empty) ROS2 message
if self.ros_world.is_playing():
self._cube_position = np.array([np.random.rand() * 0.40, np.random.rand() * 0.40, 0.10])
def run_simulation(self):
self.timeline.play()
while simulation_app.is_running():
self.ros_world.step(render=True)
rclpy.spin_once(self, timeout_sec=0.0)
if self.ros_world.is_playing():
if self.ros_world.current_time_step_index == 0:
self.ros_world.reset()
# the actual setting the cube pose is done here
self.ros_world.scene.get_object("cube_1").set_world_pose(self._cube_position)
# Cleanup
self.timeline.stop()
self.destroy_node()
simulation_app.close()
if __name__ == "__main__":
rclpy.init()
subscriber = Subscriber()
subscriber.run_simulation()
| 2,787 | Python | 33 | 103 | 0.670255 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.universal_robots/pick_place.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import numpy as np
from omni.isaac.core import World
from omni.isaac.universal_robots.controllers.pick_place_controller import PickPlaceController
from omni.isaac.universal_robots.tasks import PickPlace
my_world = World(stage_units_in_meters=1.0)
my_task = PickPlace()
my_world.add_task(my_task)
my_world.reset()
task_params = my_task.get_params()
my_ur10 = my_world.scene.get_object(task_params["robot_name"]["value"])
my_controller = PickPlaceController(name="pick_place_controller", gripper=my_ur10.gripper, robot_articulation=my_ur10)
articulation_controller = my_ur10.get_articulation_controller()
i = 0
while simulation_app.is_running():
my_world.step(render=True)
if my_world.is_playing():
if my_world.current_time_step_index == 0:
my_world.reset()
my_controller.reset()
observations = my_world.get_observations()
actions = my_controller.forward(
picking_position=observations[task_params["cube_name"]["value"]]["position"],
placing_position=observations[task_params["cube_name"]["value"]]["target_position"],
current_joint_positions=observations[task_params["robot_name"]["value"]]["joint_positions"],
end_effector_offset=np.array([0, 0, 0.02]),
)
if my_controller.is_done():
print("done picking and placing")
articulation_controller.apply_action(actions)
simulation_app.close()
| 1,955 | Python | 42.466666 | 118 | 0.724297 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.universal_robots/multiple_tasks.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import carb
import numpy as np
from omni.isaac.core import World
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.dofbot.controllers import PickPlaceController
from omni.isaac.dofbot.tasks import PickPlace
from omni.isaac.franka.controllers.stacking_controller import StackingController as FrankaStackingController
from omni.isaac.franka.tasks import Stacking as FrankaStacking
from omni.isaac.universal_robots.controllers import StackingController as UR10StackingController
from omni.isaac.universal_robots.tasks import Stacking as UR10Stacking
from omni.isaac.wheeled_robots.controllers.differential_controller import DifferentialController
from omni.isaac.wheeled_robots.controllers.holonomic_controller import HolonomicController
from omni.isaac.wheeled_robots.robots import WheeledRobot
from omni.isaac.wheeled_robots.robots.holonomic_robot_usd_setup import HolonomicRobotUsdSetup
my_world = World(stage_units_in_meters=1.0)
tasks = []
num_of_tasks = 3
tasks.append(FrankaStacking(name="task_0", offset=np.array([0, -2, 0])))
my_world.add_task(tasks[-1])
tasks.append(UR10Stacking(name="task_1", offset=np.array([0.5, 0.5, 0])))
my_world.add_task(tasks[-1])
tasks.append(PickPlace(offset=np.array([0, -1, 0])))
my_world.add_task(tasks[-1])
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
kaya_asset_path = assets_root_path + "/Isaac/Robots/Kaya/kaya.usd"
my_kaya = my_world.scene.add(
WheeledRobot(
prim_path="/World/Kaya",
name="my_kaya",
wheel_dof_names=["axle_0_joint", "axle_1_joint", "axle_2_joint"],
create_robot=True,
usd_path=kaya_asset_path,
position=np.array([-1, 0, 0]),
)
)
jetbot_asset_path = assets_root_path + "/Isaac/Robots/Jetbot/jetbot.usd"
my_jetbot = my_world.scene.add(
WheeledRobot(
prim_path="/World/Jetbot",
name="my_jetbot",
wheel_dof_names=["left_wheel_joint", "right_wheel_joint"],
create_robot=True,
usd_path=jetbot_asset_path,
position=np.array([-1.5, -1.5, 0]),
)
)
my_world.reset()
robots = []
for i in range(num_of_tasks):
task_params = tasks[i].get_params()
robots.append(my_world.scene.get_object(task_params["robot_name"]["value"]))
controllers = []
controllers.append(
FrankaStackingController(
name="pick_place_controller",
gripper=robots[0].gripper,
robot_articulation=robots[0],
picking_order_cube_names=tasks[0].get_cube_names(),
robot_observation_name=robots[0].name,
)
)
controllers[-1].reset()
controllers.append(
UR10StackingController(
name="pick_place_controller",
gripper=robots[1].gripper,
robot_articulation=robots[1],
picking_order_cube_names=tasks[1].get_cube_names(),
robot_observation_name=robots[1].name,
)
)
controllers[-1].reset()
controllers.append(
PickPlaceController(name="pick_place_controller", gripper=robots[2].gripper, robot_articulation=robots[2])
)
kaya_setup = HolonomicRobotUsdSetup(
robot_prim_path=my_kaya.prim_path, com_prim_path="/World/Kaya/base_link/control_offset"
)
(
wheel_radius,
wheel_positions,
wheel_orientations,
mecanum_angles,
wheel_axis,
up_axis,
) = kaya_setup.get_holonomic_controller_params()
kaya_controller = HolonomicController(
name="holonomic_controller",
wheel_radius=wheel_radius,
wheel_positions=wheel_positions,
wheel_orientations=wheel_orientations,
mecanum_angles=mecanum_angles,
wheel_axis=wheel_axis,
up_axis=up_axis,
)
jetbot_controller = DifferentialController(name="simple_control", wheel_radius=0.03, wheel_base=0.1125)
pick_place_task_params = tasks[2].get_params()
articulation_controllers = []
for i in range(num_of_tasks):
articulation_controllers.append(robots[i].get_articulation_controller())
i = 0
my_world.pause()
while simulation_app.is_running():
my_world.step(render=True)
if my_world.is_playing():
if my_world.current_time_step_index == 0:
my_world.reset()
controllers[0].reset()
controllers[1].reset()
controllers[2].reset()
kaya_controller.reset()
jetbot_controller.reset()
observations = my_world.get_observations()
actions = controllers[0].forward(observations=observations, end_effector_offset=np.array([0, 0, 0]))
articulation_controllers[0].apply_action(actions)
actions = controllers[1].forward(observations=observations, end_effector_offset=np.array([0, 0, 0.02]))
articulation_controllers[1].apply_action(actions)
actions = controllers[2].forward(
picking_position=observations[pick_place_task_params["cube_name"]["value"]]["position"],
placing_position=observations[pick_place_task_params["cube_name"]["value"]]["target_position"],
current_joint_positions=observations[pick_place_task_params["robot_name"]["value"]]["joint_positions"],
end_effector_offset=np.array([0, -0.06, 0]),
)
articulation_controllers[2].apply_action(actions)
if i >= 0 and i < 500:
my_kaya.apply_wheel_actions(kaya_controller.forward(command=[0.2, 0.0, 0.0]))
my_jetbot.apply_wheel_actions(jetbot_controller.forward(command=[0.1, 0]))
elif i >= 500 and i < 1000:
# TODO: change with new USD
my_kaya.apply_wheel_actions(kaya_controller.forward(command=[0, 0.2, 0.0]))
my_jetbot.apply_wheel_actions(jetbot_controller.forward(command=[0.0, np.pi / 10]))
elif i >= 1000 and i < 1500:
# TODO: change with new USD
my_kaya.apply_wheel_actions(kaya_controller.forward(command=[0, 0.0, 0.6]))
my_jetbot.apply_wheel_actions(jetbot_controller.forward(command=[0.1, 0]))
i += 1
simulation_app.close()
| 6,457 | Python | 38.619632 | 115 | 0.696144 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.universal_robots/stacking.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import numpy as np
from omni.isaac.core import World
from omni.isaac.universal_robots.controllers import StackingController
from omni.isaac.universal_robots.tasks import Stacking
my_world = World(stage_units_in_meters=1.0)
my_task = Stacking()
my_world.add_task(my_task)
my_world.reset()
robot_name = my_task.get_params()["robot_name"]["value"]
my_ur10 = my_world.scene.get_object(robot_name)
my_controller = StackingController(
name="stacking_controller",
gripper=my_ur10.gripper,
robot_articulation=my_ur10,
picking_order_cube_names=my_task.get_cube_names(),
robot_observation_name=robot_name,
)
articulation_controller = my_ur10.get_articulation_controller()
i = 0
while simulation_app.is_running():
my_world.step(render=True)
if my_world.is_playing():
if my_world.current_time_step_index == 0:
my_world.reset()
my_controller.reset()
observations = my_world.get_observations()
actions = my_controller.forward(observations=observations, end_effector_offset=np.array([0.0, 0.0, 0.02]))
articulation_controller.apply_action(actions)
simulation_app.close()
| 1,669 | Python | 36.11111 | 114 | 0.745956 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.universal_robots/follow_target_with_rmpflow.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
from omni.isaac.core import World
from omni.isaac.universal_robots.controllers.rmpflow_controller import RMPFlowController
from omni.isaac.universal_robots.tasks import FollowTarget
my_world = World(stage_units_in_meters=1.0)
my_task = FollowTarget(name="follow_target_task", attach_gripper=True)
my_world.add_task(my_task)
my_world.reset()
task_params = my_world.get_task("follow_target_task").get_params()
ur10_name = task_params["robot_name"]["value"]
target_name = task_params["target_name"]["value"]
my_ur10 = my_world.scene.get_object(ur10_name)
my_controller = RMPFlowController(name="target_follower_controller", robot_articulation=my_ur10, attach_gripper=True)
articulation_controller = my_ur10.get_articulation_controller()
while simulation_app.is_running():
my_world.step(render=True)
if my_world.is_playing():
if my_world.current_time_step_index == 0:
my_world.reset()
my_controller.reset()
observations = my_world.get_observations()
actions = my_controller.forward(
target_end_effector_position=observations[target_name]["position"],
target_end_effector_orientation=observations[target_name]["orientation"],
)
articulation_controller.apply_action(actions)
simulation_app.close()
| 1,816 | Python | 43.317072 | 117 | 0.749449 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.universal_robots/follow_target_with_ik.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import carb
from omni.isaac.core import World
from omni.isaac.universal_robots import KinematicsSolver
from omni.isaac.universal_robots.tasks import FollowTarget
my_world = World(stage_units_in_meters=1.0)
my_task = FollowTarget(name="follow_target_task", attach_gripper=True)
my_world.add_task(my_task)
my_world.reset()
task_params = my_world.get_task("follow_target_task").get_params()
ur10_name = task_params["robot_name"]["value"]
target_name = task_params["target_name"]["value"]
my_ur10 = my_world.scene.get_object(ur10_name)
my_controller = KinematicsSolver(my_ur10, attach_gripper=True)
articulation_controller = my_ur10.get_articulation_controller()
while simulation_app.is_running():
my_world.step(render=True)
if my_world.is_playing():
if my_world.current_time_step_index == 0:
my_world.reset()
observations = my_world.get_observations()
actions, succ = my_controller.compute_inverse_kinematics(
target_position=observations[target_name]["position"],
target_orientation=observations[target_name]["orientation"],
)
if succ:
articulation_controller.apply_action(actions)
else:
carb.log_warn("IK did not converge to a solution. No action is being taken.")
simulation_app.close()
| 1,832 | Python | 40.65909 | 90 | 0.734716 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.universal_robots/pick_place2.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import numpy as np
from omni.isaac.core import World
from omni.isaac.core.utils.collisions import ray_cast
from omni.isaac.core.utils.rotations import euler_angles_to_quat
from omni.isaac.universal_robots.controllers.pick_place_controller import PickPlaceController
from omni.isaac.universal_robots.tasks import BinFilling
my_world = World(stage_units_in_meters=1.0)
my_task = BinFilling()
my_world.add_task(my_task)
my_world.reset()
task_params = my_task.get_params()
my_ur10 = my_world.scene.get_object(task_params["robot_name"]["value"])
my_controller = PickPlaceController(name="pick_place_controller", gripper=my_ur10.gripper, robot_articulation=my_ur10)
articulation_controller = my_ur10.get_articulation_controller()
i = 0
while simulation_app.is_running():
my_world.step(render=True)
if my_world.is_playing():
if my_world.current_time_step_index == 0:
my_world.reset()
my_controller.reset()
observations = my_world.get_observations()
actions = my_controller.forward(
picking_position=observations[task_params["bin_name"]["value"]]["position"],
placing_position=observations[task_params["bin_name"]["value"]]["target_position"],
current_joint_positions=observations[task_params["robot_name"]["value"]]["joint_positions"],
# end_effector_offset=np.array([0, 0, -0.075])
end_effector_offset=np.array([0, -0.098, 0.03]),
end_effector_orientation=euler_angles_to_quat(np.array([np.pi, 0, np.pi / 2.0])),
)
if my_controller.get_current_event() > 2 and my_controller.get_current_event() < 6:
print(
ray_cast(
position=observations[task_params["robot_name"]["value"]]["end_effector_position"],
orientation=observations[task_params["robot_name"]["value"]]["end_effector_orientation"],
offset=np.array([0.162, 0, 0]),
)
)
if my_controller.is_done():
print("done picking and placing")
articulation_controller.apply_action(actions)
simulation_app.close()
| 2,667 | Python | 45.807017 | 118 | 0.685789 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.universal_robots/bin_filling.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import numpy as np
from omni.isaac.core import World
from omni.isaac.core.utils.rotations import euler_angles_to_quat
from omni.isaac.universal_robots.controllers.pick_place_controller import PickPlaceController
from omni.isaac.universal_robots.tasks import BinFilling
my_world = World(stage_units_in_meters=1.0)
my_task = BinFilling()
my_world.add_task(my_task)
my_world.reset()
task_params = my_task.get_params()
my_ur10 = my_world.scene.get_object(task_params["robot_name"]["value"])
my_controller = PickPlaceController(name="pick_place_controller", gripper=my_ur10.gripper, robot_articulation=my_ur10)
articulation_controller = my_ur10.get_articulation_controller()
i = 0
added_screws = False
while simulation_app.is_running():
if my_world.is_playing():
my_world.step(render=True)
if my_world.current_time_step_index == 0:
my_world.reset()
my_controller.reset()
added_screws = False
observations = my_world.get_observations()
actions = my_controller.forward(
picking_position=observations[task_params["bin_name"]["value"]]["position"],
placing_position=observations[task_params["bin_name"]["value"]]["target_position"],
current_joint_positions=observations[task_params["robot_name"]["value"]]["joint_positions"],
end_effector_offset=np.array([0, -0.098, 0.03]),
end_effector_orientation=euler_angles_to_quat(np.array([np.pi, 0, np.pi / 2.0])),
)
if not added_screws and my_controller.get_current_event() == 6 and not my_controller.is_paused():
my_controller.pause()
my_task.add_screws(screws_number=20)
added_screws = True
if my_controller.is_done():
print("done picking and placing")
articulation_controller.apply_action(actions)
else:
my_world.render()
simulation_app.close()
| 2,434 | Python | 43.272726 | 118 | 0.70378 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.kaya/kaya_move.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import carb
import numpy as np
from omni.isaac.core import World
from omni.isaac.core.prims.xform_prim import XFormPrim
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.wheeled_robots.controllers.holonomic_controller import HolonomicController
from omni.isaac.wheeled_robots.robots import WheeledRobot
from omni.isaac.wheeled_robots.robots.holonomic_robot_usd_setup import HolonomicRobotUsdSetup
my_world = World(stage_units_in_meters=1.0)
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
kaya_asset_path = assets_root_path + "/Isaac/Robots/Kaya/kaya.usd"
my_kaya = my_world.scene.add(
WheeledRobot(
prim_path="/World/Kaya",
name="my_kaya",
wheel_dof_names=["axle_0_joint", "axle_1_joint", "axle_2_joint"],
create_robot=True,
usd_path=kaya_asset_path,
position=np.array([0, 0.0, 0.02]),
orientation=np.array([1.0, 0.0, 0.0, 0.0]),
)
)
my_world.scene.add_default_ground_plane()
kaya_setup = HolonomicRobotUsdSetup(
robot_prim_path=my_kaya.prim_path, com_prim_path="/World/Kaya/base_link/control_offset"
)
(
wheel_radius,
wheel_positions,
wheel_orientations,
mecanum_angles,
wheel_axis,
up_axis,
) = kaya_setup.get_holonomic_controller_params()
my_controller = HolonomicController(
name="holonomic_controller",
wheel_radius=wheel_radius,
wheel_positions=wheel_positions,
wheel_orientations=wheel_orientations,
mecanum_angles=mecanum_angles,
wheel_axis=wheel_axis,
up_axis=up_axis,
)
my_world.reset()
i = 0
while simulation_app.is_running():
my_world.step(render=True)
if my_world.is_playing():
if my_world.current_time_step_index == 0:
my_world.reset()
my_controller.reset()
if i >= 0 and i < 500:
my_kaya.apply_wheel_actions(my_controller.forward(command=[0.4, 0.0, 0.0]))
elif i >= 500 and i < 1000:
my_kaya.apply_wheel_actions(my_controller.forward(command=[0.0, 0.4, 0.0]))
elif i >= 1000 and i < 1200:
my_kaya.apply_wheel_actions(my_controller.forward(command=[0.0, 0.0, 0.05]))
elif i == 1200:
i = 0
i += 1
simulation_app.close()
| 2,828 | Python | 33.084337 | 93 | 0.694484 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.franka/pick_place.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import numpy as np
from omni.isaac.core import World
from omni.isaac.franka.controllers.pick_place_controller import PickPlaceController
from omni.isaac.franka.tasks import PickPlace
my_world = World(stage_units_in_meters=1.0)
my_task = PickPlace()
my_world.add_task(my_task)
my_world.reset()
task_params = my_task.get_params()
my_franka = my_world.scene.get_object(task_params["robot_name"]["value"])
my_controller = PickPlaceController(
name="pick_place_controller", gripper=my_franka.gripper, robot_articulation=my_franka
)
articulation_controller = my_franka.get_articulation_controller()
i = 0
while simulation_app.is_running():
my_world.step(render=True)
if my_world.is_playing():
if my_world.current_time_step_index == 0:
my_world.reset()
my_controller.reset()
observations = my_world.get_observations()
actions = my_controller.forward(
picking_position=observations[task_params["cube_name"]["value"]]["position"],
placing_position=observations[task_params["cube_name"]["value"]]["target_position"],
current_joint_positions=observations[task_params["robot_name"]["value"]]["joint_positions"],
end_effector_offset=np.array([0, 0.005, 0]),
)
if my_controller.is_done():
print("done picking and placing")
articulation_controller.apply_action(actions)
simulation_app.close()
| 1,950 | Python | 40.510637 | 104 | 0.721538 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.franka/multiple_tasks.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import numpy as np
from omni.isaac.core import World
from omni.isaac.franka.controllers.pick_place_controller import PickPlaceController
from omni.isaac.franka.tasks import PickPlace
my_world = World(stage_units_in_meters=1.0)
tasks = []
num_of_tasks = 2
for i in range(num_of_tasks):
tasks.append(PickPlace(name="task" + str(i), offset=np.array([0, (i * 2) - 3, 0])))
my_world.add_task(tasks[-1])
my_world.reset()
frankas = []
cube_names = []
for i in range(num_of_tasks):
task_params = tasks[i].get_params()
frankas.append(my_world.scene.get_object(task_params["robot_name"]["value"]))
cube_names.append(task_params["cube_name"]["value"])
controllers = []
for i in range(num_of_tasks):
controllers.append(
PickPlaceController(name="pick_place_controller", gripper=frankas[i].gripper, robot_articulation=frankas[i])
)
controllers[-1].reset()
articulation_controllers = []
for i in range(num_of_tasks):
articulation_controllers.append(frankas[i].get_articulation_controller())
my_world.pause()
while simulation_app.is_running():
my_world.step(render=True)
if my_world.is_playing():
if my_world.current_time_step_index == 0:
my_world.reset()
for i in range(num_of_tasks):
controllers[i].reset()
observations = my_world.get_observations()
for i in range(num_of_tasks):
articulation_controllers.append(frankas[i].get_articulation_controller())
actions = controllers[i].forward(
picking_position=observations[cube_names[i]]["position"],
placing_position=observations[cube_names[i]]["target_position"],
current_joint_positions=observations[frankas[i].name]["joint_positions"],
end_effector_offset=np.array([0, 0, 0]),
)
articulation_controllers[i].apply_action(actions)
simulation_app.close()
| 2,447 | Python | 37.857142 | 116 | 0.691868 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.franka/stacking.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
from omni.isaac.core import World
from omni.isaac.franka.controllers.stacking_controller import StackingController
from omni.isaac.franka.tasks import Stacking
my_world = World(stage_units_in_meters=1.0)
my_task = Stacking()
my_world.add_task(my_task)
my_world.reset()
robot_name = my_task.get_params()["robot_name"]["value"]
my_franka = my_world.scene.get_object(robot_name)
my_controller = StackingController(
name="stacking_controller",
gripper=my_franka.gripper,
robot_articulation=my_franka,
picking_order_cube_names=my_task.get_cube_names(),
robot_observation_name=robot_name,
)
articulation_controller = my_franka.get_articulation_controller()
i = 0
while simulation_app.is_running():
my_world.step(render=True)
if my_world.is_playing():
if my_world.current_time_step_index == 0:
my_world.reset()
my_controller.reset()
observations = my_world.get_observations()
actions = my_controller.forward(observations=observations)
articulation_controller.apply_action(actions)
simulation_app.close()
| 1,610 | Python | 35.613636 | 80 | 0.749689 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.franka/follow_target_with_rmpflow.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
from omni.isaac.core import World
from omni.isaac.franka.controllers.rmpflow_controller import RMPFlowController
from omni.isaac.franka.tasks import FollowTarget
my_world = World(stage_units_in_meters=1.0)
my_task = FollowTarget(name="follow_target_task")
my_world.add_task(my_task)
my_world.reset()
task_params = my_world.get_task("follow_target_task").get_params()
franka_name = task_params["robot_name"]["value"]
target_name = task_params["target_name"]["value"]
my_franka = my_world.scene.get_object(franka_name)
my_controller = RMPFlowController(name="target_follower_controller", robot_articulation=my_franka)
articulation_controller = my_franka.get_articulation_controller()
while simulation_app.is_running():
my_world.step(render=True)
if my_world.is_playing():
if my_world.current_time_step_index == 0:
my_world.reset()
my_controller.reset()
observations = my_world.get_observations()
actions = my_controller.forward(
target_end_effector_position=observations[target_name]["position"],
target_end_effector_orientation=observations[target_name]["orientation"],
)
articulation_controller.apply_action(actions)
simulation_app.close()
| 1,764 | Python | 42.048779 | 98 | 0.747732 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.franka/follow_target_with_ik.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import carb
from omni.isaac.core import World
from omni.isaac.franka import KinematicsSolver
from omni.isaac.franka.controllers.rmpflow_controller import RMPFlowController
from omni.isaac.franka.tasks import FollowTarget
my_world = World(stage_units_in_meters=1.0)
my_task = FollowTarget(name="follow_target_task")
my_world.add_task(my_task)
my_world.reset()
task_params = my_world.get_task("follow_target_task").get_params()
franka_name = task_params["robot_name"]["value"]
target_name = task_params["target_name"]["value"]
my_franka = my_world.scene.get_object(franka_name)
my_controller = KinematicsSolver(my_franka)
articulation_controller = my_franka.get_articulation_controller()
while simulation_app.is_running():
my_world.step(render=True)
if my_world.is_playing():
if my_world.current_time_step_index == 0:
my_world.reset()
observations = my_world.get_observations()
actions, succ = my_controller.compute_inverse_kinematics(
target_position=observations[target_name]["position"],
target_orientation=observations[target_name]["orientation"],
)
if succ:
articulation_controller.apply_action(actions)
else:
carb.log_warn("IK did not converge to a solution. No action is being taken.")
simulation_app.close()
| 1,859 | Python | 40.333332 | 90 | 0.739107 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.franka/franka_gripper.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import argparse
from omni.isaac.core import World
from omni.isaac.core.utils.types import ArticulationAction
from omni.isaac.franka import Franka
parser = argparse.ArgumentParser()
parser.add_argument("--test", default=False, action="store_true", help="Run in test mode")
args, unknown = parser.parse_known_args()
my_world = World(stage_units_in_meters=1.0)
my_franka = my_world.scene.add(Franka(prim_path="/World/Franka", name="my_franka"))
my_world.scene.add_default_ground_plane()
my_world.reset()
i = 0
while simulation_app.is_running():
my_world.step(render=True)
if my_world.is_playing():
if my_world.current_time_step_index == 0:
my_world.reset()
i += 1
gripper_positions = my_franka.gripper.get_joint_positions()
if i < 500:
my_franka.gripper.apply_action(
ArticulationAction(joint_positions=[gripper_positions[0] - (0.005), gripper_positions[1] - (0.005)])
)
if i > 500:
my_franka.gripper.apply_action(
ArticulationAction(joint_positions=[gripper_positions[0] + (0.005), gripper_positions[1] + (0.005)])
)
if i == 1000:
i = 0
if args.test is True:
break
simulation_app.close()
| 1,791 | Python | 33.461538 | 116 | 0.686209 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.debug_draw/rtx_radar.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import sys
import carb
from omni.isaac.kit import SimulationApp
# Example for creating a RTX lidar sensor and publishing PCL data
simulation_app = SimulationApp({"headless": False})
import omni
import omni.kit.viewport.utility
import omni.replicator.core as rep
from omni.isaac.core import SimulationContext
from omni.isaac.core.utils import nucleus, stage
from omni.isaac.core.utils.extensions import enable_extension
from pxr import Gf
# enable ROS bridge extension
enable_extension("omni.isaac.debug_draw")
simulation_app.update()
# Locate Isaac Sim assets folder to load environment and robot stages
assets_root_path = nucleus.get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
simulation_app.close()
sys.exit()
simulation_app.update()
# Loading the simple_room environment
stage.add_reference_to_stage(
assets_root_path + "/Isaac/Environments/Simple_Warehouse/full_warehouse.usd", "/background"
)
simulation_app.update()
radar_config = "Example"
if len(sys.argv) == 2:
radar_config = sys.argv[1]
# Create the lidar sensor that generates data into "RtxSensorCpu"
# Sensor needs to be rotated 90 degrees about X so that its Z up
# Possible options are Example_Rotary and Example_Solid_State
# drive sim applies 0.5,-0.5,-0.5,w(-0.5), we have to apply the reverse
_, sensor = omni.kit.commands.execute(
"IsaacSensorCreateRtxRadar",
path="/sensor",
parent=None,
config=radar_config,
translation=(-0.937, 1.745, 0.8940),
orientation=Gf.Quatd(0.70711, 0.70711, 0, 0), # Gf.Quatd is w,i,j,k
)
hydra_texture = rep.create.render_product(sensor.GetPath(), [1, 1], name="Isaac")
simulation_context = SimulationContext(physics_dt=1.0 / 60.0, rendering_dt=1.0 / 60.0, stage_units_in_meters=1.0)
simulation_app.update()
# Create the debug draw pipeline in the post process graph
writer = rep.writers.get("RtxRadar" + "DebugDrawPointCloud")
writer.attach([hydra_texture])
simulation_app.update()
simulation_context.play()
while simulation_app.is_running():
simulation_app.update()
# cleanup and shutdown
simulation_context.stop()
simulation_app.close()
| 2,596 | Python | 31.873417 | 113 | 0.758089 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.debug_draw/rtx_lidar.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import sys
import carb
from omni.isaac.kit import SimulationApp
# Example for creating a RTX lidar sensor and publishing PCL data
simulation_app = SimulationApp({"headless": False})
import omni
import omni.kit.viewport.utility
import omni.replicator.core as rep
from omni.isaac.core import SimulationContext
from omni.isaac.core.utils import nucleus, stage
from omni.isaac.core.utils.extensions import enable_extension
from pxr import Gf
# enable ROS bridge extension
enable_extension("omni.isaac.debug_draw")
simulation_app.update()
# Locate Isaac Sim assets folder to load environment and robot stages
assets_root_path = nucleus.get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
simulation_app.close()
sys.exit()
simulation_app.update()
# Loading the simple_room environment
stage.add_reference_to_stage(
assets_root_path + "/Isaac/Environments/Simple_Warehouse/full_warehouse.usd", "/background"
)
simulation_app.update()
lidar_config = "Example_Rotary"
if len(sys.argv) == 2:
lidar_config = sys.argv[1]
# Create the lidar sensor that generates data into "RtxSensorCpu"
# Sensor needs to be rotated 90 degrees about X so that its Z up
# Possible options are Example_Rotary and Example_Solid_State
# drive sim applies 0.5,-0.5,-0.5,w(-0.5), we have to apply the reverse
_, sensor = omni.kit.commands.execute(
"IsaacSensorCreateRtxLidar",
path="/sensor",
parent=None,
config=lidar_config,
translation=(0, 0, 1.0),
orientation=Gf.Quatd(1.0, 0.0, 0.0, 0.0), # Gf.Quatd is w,i,j,k
)
hydra_texture = rep.create.render_product(sensor.GetPath(), [1, 1], name="Isaac")
simulation_context = SimulationContext(physics_dt=1.0 / 60.0, rendering_dt=1.0 / 60.0, stage_units_in_meters=1.0)
simulation_app.update()
# Create the debug draw pipeline in the post process graph
writer = rep.writers.get("RtxLidar" + "DebugDrawPointCloud" + "Buffer")
writer.attach([hydra_texture])
simulation_app.update()
simulation_context.play()
while simulation_app.is_running():
simulation_app.update()
# cleanup and shutdown
simulation_context.stop()
simulation_app.close()
| 2,599 | Python | 31.5 | 113 | 0.75606 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.cortex/example_command_api_main.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import time
import numpy as np
import omni
from omni.isaac.core.objects import DynamicCuboid, VisualCuboid
from omni.isaac.cortex.cortex_world import CortexWorld
from omni.isaac.cortex.df import DfNetwork, DfState, DfStateMachineDecider, DfStateSequence
from omni.isaac.cortex.dfb import DfBasicContext
from omni.isaac.cortex.robot import add_franka_to_stage
class NullspaceShiftState(DfState):
def __init__(self):
super().__init__()
self.config_mean = np.array([0.00, -1.3, 0.00, -2.87, 0.00, 2.00, 0.75])
self.target_p = np.array([0.7, 0.0, 0.5])
self.construction_time = time.time()
def enter(self):
# Change the posture configuration while maintaining a consistent target.
posture_config = self.config_mean + np.random.randn(7)
self.context.robot.arm.send_end_effector(target_position=self.target_p, posture_config=posture_config)
self.entry_time = time.time()
# Close the gripper if open and open the gripper if closed. It closes more quickly than it
# opens.
gripper = self.context.robot.gripper
if gripper.get_width() > 0.05:
gripper.close(speed=0.5)
else:
gripper.open(speed=0.1)
print("[%f] <enter> sampling posture config" % (self.entry_time - self.construction_time))
def step(self):
if time.time() - self.entry_time < 2.0:
return self
return None
def main():
world = CortexWorld()
robot = world.add_robot(add_franka_to_stage(name="franka", prim_path="/World/franka"))
world.scene.add_default_ground_plane()
decider_network = DfNetwork(
DfStateMachineDecider(DfStateSequence([NullspaceShiftState()], loop=True)), context=DfBasicContext(robot)
)
world.add_decider_network(decider_network)
world.run(simulation_app)
simulation_app.close()
if __name__ == "__main__":
main()
| 2,443 | Python | 33.422535 | 113 | 0.695456 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.cortex/example_cortex_sync_belief_main.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import argparse
from omni.isaac.kit import SimulationApp
parser = argparse.ArgumentParser("example_cortex_sync_belief")
parser.add_argument(
"--behavior",
type=str,
default=None,
help="Which behavior to run. See behavior/franka for available behavior files. By default, it launches no behavior.",
)
parser.add_argument(
"--auto_sync_objects", action="store_true", help="Automatically sync the objects with their measured poses."
)
args, _ = parser.parse_known_args()
simulation_app = SimulationApp({"headless": False})
import numpy as np
from behaviors.franka.franka_behaviors import ContextStateMonitor, behaviors
from omni.isaac.core.objects import DynamicCuboid
from omni.isaac.core.utils.extensions import enable_extension
from omni.isaac.cortex.cortex_object import CortexObject
from omni.isaac.cortex.cortex_utils import load_behavior_module
from omni.isaac.cortex.cortex_world import CortexWorld
from omni.isaac.cortex.robot import add_franka_to_stage
enable_extension("omni.isaac.cortex_sync")
from omni.isaac.cortex_sync.cortex_ros import CortexControlRos, CortexObjectsRos, cortex_init_ros_node
class CubeSpec:
def __init__(self, name, color):
self.name = name
self.color = np.array(color)
def main():
cortex_init_ros_node("example_cortex_sync_belief")
world = CortexWorld()
robot = world.add_robot(add_franka_to_stage(name="franka", prim_path="/World/Franka"))
obs_specs = [
CubeSpec("RedCube", [0.7, 0.0, 0.0]),
CubeSpec("BlueCube", [0.0, 0.0, 0.7]),
CubeSpec("YellowCube", [0.7, 0.7, 0.0]),
CubeSpec("GreenCube", [0.0, 0.7, 0.0]),
]
width = 0.0515
cortex_objects = {}
for i, (x, spec) in enumerate(zip(np.linspace(0.3, 0.7, len(obs_specs)), obs_specs)):
obj = world.scene.add(
DynamicCuboid(
prim_path="/World/Obs/{}".format(spec.name),
name=spec.name,
size=width,
color=spec.color,
translation=np.array([x, -0.4, width / 2]),
)
)
cortex_objects[spec.name] = CortexObject(obj)
robot.register_obstacle(cortex_objects[spec.name])
world.scene.add_default_ground_plane()
cortex_control = CortexControlRos(robot)
cortex_objects_ros = CortexObjectsRos(cortex_objects, auto_sync_objects=args.auto_sync_objects)
decider_network = None
context_monitor = ContextStateMonitor(print_dt=0.25)
if args.behavior in behaviors:
decider_network = behaviors[args.behavior].make_decider_network(robot)
elif args.behavior is not None:
decider_network = load_behavior_module(args.behavior).make_decider_network(robot)
if decider_network:
decider_network.context.add_monitor(context_monitor.monitor)
world.run(simulation_app)
simulation_app.close()
if __name__ == "__main__":
main()
| 3,330 | Python | 34.43617 | 121 | 0.694895 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.cortex/follow_example_main.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import numpy as np
from omni.isaac.core.objects import VisualSphere
from omni.isaac.cortex.cortex_world import CortexWorld
from omni.isaac.cortex.df import DfNetwork, DfState, DfStateMachineDecider
from omni.isaac.cortex.dfb import DfBasicContext
from omni.isaac.cortex.robot import add_franka_to_stage
class FollowState(DfState):
"""The context object is available as self.context. We have access to everything in the context
object, which in this case is everything in the robot object (the command API and the follow
sphere).
"""
@property
def robot(self):
return self.context.robot
@property
def follow_sphere(self):
return self.context.robot.follow_sphere
def enter(self):
self.robot.gripper.close()
self.follow_sphere.set_world_pose(*self.robot.arm.get_fk_pq().as_tuple())
def step(self):
target_position, _ = self.follow_sphere.get_world_pose()
self.robot.arm.send_end_effector(target_position=target_position)
return self # Always transition back to this state.
def main():
world = CortexWorld()
robot = world.add_robot(add_franka_to_stage(name="franka", prim_path="/World/Franka"))
# Add a sphere to the scene to follow, and store it off in a new member as part of the robot.
robot.follow_sphere = world.scene.add(
VisualSphere(
name="follow_sphere", prim_path="/World/FollowSphere", radius=0.02, color=np.array([0.7, 0.0, 0.7])
)
)
world.scene.add_default_ground_plane()
# Add a simple state machine decider network with the single state defined above. This state
# will be persistently stepped because it always returns itself.
world.add_decider_network(DfNetwork(DfStateMachineDecider(FollowState()), context=DfBasicContext(robot)))
world.run(simulation_app)
simulation_app.close()
if __name__ == "__main__":
main()
| 2,441 | Python | 34.911764 | 111 | 0.722655 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.cortex/example_cortex_sync_main.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import argparse
from omni.isaac.kit import SimulationApp
parser = argparse.ArgumentParser("example_cortex_sync")
parser.add_argument(
"--behavior",
type=str,
default="block_stacking_behavior",
help="Which behavior to run. See behavior/franka for available behavior files.",
)
parser.add_argument(
"--auto_sync_objects", action="store_true", help="Automatically sync the objects with their measured poses."
)
args, _ = parser.parse_known_args()
simulation_app = SimulationApp({"headless": False})
import numpy as np
from behaviors.franka.franka_behaviors import ContextStateMonitor, behaviors
from omni.isaac.core.objects import DynamicCuboid
from omni.isaac.core.prims.xform_prim import XFormPrim
from omni.isaac.core.utils.extensions import enable_extension
from omni.isaac.cortex.cortex_object import CortexObject
from omni.isaac.cortex.cortex_utils import load_behavior_module
from omni.isaac.cortex.cortex_world import CortexWorld
from omni.isaac.cortex.robot import add_franka_to_stage
enable_extension("omni.isaac.cortex_sync")
from omni.isaac.cortex_sync.cortex_ros import (
CortexControlRos,
CortexObjectsRos,
CortexSimObjectsRos,
CortexSimRobotRos,
cortex_init_ros_node,
)
class CubeSpec:
def __init__(self, name, color):
self.name = name
self.color = np.array(color)
def main():
cortex_init_ros_node("example_cortex_sync")
world = CortexWorld()
robot = world.add_robot(add_franka_to_stage(name="franka", prim_path="/World/Franka"))
sim_prim = XFormPrim(prim_path="/Sim")
sim_prim.set_world_pose(position=np.array([-2.0, 0.0, 0.0]))
sim_robot = world.add_robot(
add_franka_to_stage(name="franka_sim", prim_path="/Sim/Franka", use_motion_commander=False)
)
obs_specs = [
CubeSpec("RedCube", [0.7, 0.0, 0.0]),
CubeSpec("BlueCube", [0.0, 0.0, 0.7]),
CubeSpec("YellowCube", [0.7, 0.7, 0.0]),
CubeSpec("GreenCube", [0.0, 0.7, 0.0]),
]
width = 0.0515
cortex_objects = {}
sim_objects = {}
for i, (x, spec) in enumerate(zip(np.linspace(0.3, 0.7, len(obs_specs)), obs_specs)):
obj = world.scene.add(
DynamicCuboid(
prim_path="/World/Obs/{}".format(spec.name),
name=spec.name,
size=width,
color=spec.color,
translation=np.array([x, -0.4, width / 2]),
)
)
cortex_objects[spec.name] = CortexObject(obj)
robot.register_obstacle(cortex_objects[spec.name])
sim_obj = world.scene.add(
DynamicCuboid(
prim_path="/Sim/Obs/{}".format(spec.name),
name="{}_sim".format(spec.name),
size=width,
color=spec.color,
translation=np.array([x, -0.4, width / 2]),
)
)
sim_objects[spec.name] = sim_obj
world.scene.add_default_ground_plane()
cortex_sim = CortexSimRobotRos(sim_robot)
cortex_sim_objects_ros = CortexSimObjectsRos(sim_objects)
cortex_control = CortexControlRos(robot)
cortex_objects_ros = CortexObjectsRos(cortex_objects, auto_sync_objects=args.auto_sync_objects)
decider_network = None
context_monitor = ContextStateMonitor(print_dt=0.25)
if args.behavior in behaviors:
decider_network = behaviors[args.behavior].make_decider_network(robot)
elif args.behavior is not None:
decider_network = load_behavior_module(args.behavior).make_decider_network(robot)
if decider_network:
decider_network.context.add_monitor(context_monitor.monitor)
world.add_decider_network(decider_network)
world.run(simulation_app)
simulation_app.close()
if __name__ == "__main__":
main()
| 4,202 | Python | 33.450819 | 112 | 0.669205 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.cortex/demo_ur10_conveyor_main.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import random
import numpy as np
import omni.isaac.cortex.math_util as math_util
import omni.isaac.cortex.sample_behaviors.ur10.bin_stacking_behavior as behavior
from omni.isaac.core.objects import VisualCapsule, VisualSphere
from omni.isaac.core.prims.xform_prim import XFormPrim
from omni.isaac.core.tasks import BaseTask
from omni.isaac.core.utils.stage import add_reference_to_stage
from omni.isaac.cortex.cortex_rigid_prim import CortexRigidPrim
from omni.isaac.cortex.cortex_utils import get_assets_root_path_or_die
from omni.isaac.cortex.cortex_world import CortexWorld
from omni.isaac.cortex.robot import CortexUr10
class Ur10Assets:
def __init__(self):
self.assets_root_path = get_assets_root_path_or_die()
self.ur10_table_usd = (
self.assets_root_path + "/Isaac/Samples/Leonardo/Stage/ur10_bin_stacking_short_suction.usd"
)
self.small_klt_usd = self.assets_root_path + "/Isaac/Props/KLT_Bin/small_KLT.usd"
self.background_usd = self.assets_root_path + "/Isaac/Environments/Simple_Warehouse/warehouse.usd"
self.rubiks_cube_usd = self.assets_root_path + "/Isaac/Props/Rubiks_Cube/rubiks_cube.usd"
def print_diagnostics(diagnostic):
print("=========== logical state ==========")
if diagnostic.bin_name:
print("active bin info:")
print("- bin_obj.name: {}".format(diagnostic.bin_name))
print("- bin_base: {}".format(diagnostic.bin_base))
print("- grasp_T:\n{}".format(diagnostic.grasp))
print("- is_grasp_reached: {}".format(diagnostic.grasp_reached))
print("- is_attached: {}".format(diagnostic.attached))
print("- needs_flip: {}".format(diagnostic.needs_flip))
else:
print("<no active bin>")
print("------------------------------------")
def random_bin_spawn_transform():
x = random.uniform(-0.15, 0.15)
y = 1.5
z = -0.15
position = np.array([x, y, z])
z = random.random() * 0.02 - 0.01
w = random.random() * 0.02 - 0.01
norm = np.sqrt(z**2 + w**2)
quat = math_util.Quaternion([w / norm, 0, 0, z / norm])
if random.random() > 0.5:
print("<flip>")
# flip the bin so it's upside down
quat = quat * math_util.Quaternion([0, 0, 1, 0])
else:
print("<no flip>")
return position, quat.vals
class BinStackingTask(BaseTask):
def __init__(self, env_path, assets):
super().__init__("bin_stacking")
self.assets = assets
self.env_path = "/World/Ur10Table"
self.bins = []
self.stashed_bins = []
self.on_conveyor = None
def _spawn_bin(self, rigid_bin):
x, q = random_bin_spawn_transform()
rigid_bin.set_world_pose(position=x, orientation=q)
rigid_bin.set_linear_velocity(np.array([0, -0.30, 0]))
rigid_bin.set_visibility(True)
def post_reset(self) -> None:
if len(self.bins) > 0:
for rigid_bin in self.bins:
self.scene.remove_object(rigid_bin.name)
self.bins.clear()
self.on_conveyor = None
def pre_step(self, time_step_index, simulation_time) -> None:
"""Spawn a new randomly oriented bin if the previous bin has been placed."""
spawn_new = False
if self.on_conveyor is None:
spawn_new = True
else:
(x, y, z), _ = self.on_conveyor.get_world_pose()
is_on_conveyor = y > 0.0 and -0.4 < x and x < 0.4
if not is_on_conveyor:
spawn_new = True
if spawn_new:
name = "bin_{}".format(len(self.bins))
prim_path = self.env_path + "/bins/{}".format(name)
add_reference_to_stage(usd_path=self.assets.small_klt_usd, prim_path=prim_path)
self.on_conveyor = self.scene.add(CortexRigidPrim(name=name, prim_path=prim_path))
self._spawn_bin(self.on_conveyor)
self.bins.append(self.on_conveyor)
def main():
world = CortexWorld()
env_path = "/World/Ur10Table"
ur10_assets = Ur10Assets()
add_reference_to_stage(usd_path=ur10_assets.ur10_table_usd, prim_path=env_path)
add_reference_to_stage(usd_path=ur10_assets.background_usd, prim_path="/World/Background")
background_prim = XFormPrim(
"/World/Background", position=[10.00, 2.00, -1.18180], orientation=[0.7071, 0, 0, 0.7071]
)
robot = world.add_robot(CortexUr10(name="robot", prim_path="{}/ur10".format(env_path)))
obs = world.scene.add(
VisualSphere(
"/World/Ur10Table/Obstacles/FlipStationSphere",
name="flip_station_sphere",
position=np.array([0.73, 0.76, -0.13]),
radius=0.2,
visible=False,
)
)
robot.register_obstacle(obs)
obs = world.scene.add(
VisualSphere(
"/World/Ur10Table/Obstacles/NavigationDome",
name="navigation_dome_obs",
position=[-0.031, -0.018, -1.086],
radius=1.1,
visible=False,
)
)
robot.register_obstacle(obs)
az = np.array([1.0, 0.0, -0.3])
ax = np.array([0.0, 1.0, 0.0])
ay = np.cross(az, ax)
R = math_util.pack_R(ax, ay, az)
quat = math_util.matrix_to_quat(R)
obs = world.scene.add(
VisualCapsule(
"/World/Ur10Table/Obstacles/NavigationBarrier",
name="navigation_barrier_obs",
position=[0.471, 0.276, -0.463 - 0.1],
orientation=quat,
radius=0.5,
height=0.9,
visible=False,
)
)
robot.register_obstacle(obs)
obs = world.scene.add(
VisualCapsule(
"/World/Ur10Table/Obstacles/NavigationFlipStation",
name="navigation_flip_station_obs",
position=np.array([0.766, 0.755, -0.5]),
radius=0.5,
height=0.5,
visible=False,
)
)
robot.register_obstacle(obs)
world.add_task(BinStackingTask(env_path, ur10_assets))
world.add_decider_network(behavior.make_decider_network(robot, print_diagnostics))
world.run(simulation_app)
simulation_app.close()
if __name__ == "__main__":
main()
| 6,687 | Python | 33.474227 | 106 | 0.612233 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.cortex/franka_examples_main.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import argparse
from omni.isaac.kit import SimulationApp
parser = argparse.ArgumentParser("franka_examples")
parser.add_argument(
"--behavior",
type=str,
default="block_stacking_behavior",
help="Which behavior to run. See behavior/franka for available behavior files.",
)
args, _ = parser.parse_known_args()
simulation_app = SimulationApp({"headless": False})
import numpy as np
from behaviors.franka.franka_behaviors import ContextStateMonitor, behaviors
from omni.isaac.core.objects import DynamicCuboid, VisualCuboid
from omni.isaac.cortex.cortex_utils import load_behavior_module
from omni.isaac.cortex.cortex_world import Behavior, CortexWorld, LogicalStateMonitor
from omni.isaac.cortex.robot import add_franka_to_stage
from omni.isaac.cortex.tools import SteadyRate
class CubeSpec:
def __init__(self, name, color):
self.name = name
self.color = np.array(color)
def main():
world = CortexWorld()
context_monitor = ContextStateMonitor(print_dt=0.25)
robot = world.add_robot(add_franka_to_stage(name="franka", prim_path="/World/Franka"))
obs_specs = [
CubeSpec("RedCube", [0.7, 0.0, 0.0]),
CubeSpec("BlueCube", [0.0, 0.0, 0.7]),
CubeSpec("YellowCube", [0.7, 0.7, 0.0]),
CubeSpec("GreenCube", [0.0, 0.7, 0.0]),
]
width = 0.0515
for i, (x, spec) in enumerate(zip(np.linspace(0.3, 0.7, len(obs_specs)), obs_specs)):
obj = world.scene.add(
DynamicCuboid(
prim_path="/World/Obs/{}".format(spec.name),
name=spec.name,
size=width,
color=spec.color,
position=np.array([x, -0.4, width / 2]),
)
)
robot.register_obstacle(obj)
world.scene.add_default_ground_plane()
print()
print("loading behavior: {}".format(args.behavior))
print()
if args.behavior in behaviors:
decider_network = behaviors[args.behavior].make_decider_network(robot)
else:
decider_network = load_behavior_module(args.behavior).make_decider_network(robot)
decider_network.context.add_monitor(context_monitor.monitor)
world.add_decider_network(decider_network)
world.run(simulation_app)
simulation_app.close()
if __name__ == "__main__":
main()
| 2,733 | Python | 32.753086 | 90 | 0.678741 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.cortex/example_cortex_sync_sim_main.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import argparse
from omni.isaac.kit import SimulationApp
parser = argparse.ArgumentParser("example_cortex_sync_sim")
args, _ = parser.parse_known_args()
simulation_app = SimulationApp({"headless": False})
import numpy as np
from omni.isaac.core.objects import DynamicCuboid
from omni.isaac.core.utils.extensions import enable_extension
from omni.isaac.cortex.cortex_utils import load_behavior_module
from omni.isaac.cortex.cortex_world import CortexWorld
from omni.isaac.cortex.robot import add_franka_to_stage
enable_extension("omni.isaac.cortex_sync")
from omni.isaac.cortex_sync.cortex_ros import CortexSimObjectsRos, CortexSimRobotRos, cortex_init_ros_node
class CubeSpec:
def __init__(self, name, color):
self.name = name
self.color = np.array(color)
def main():
cortex_init_ros_node("example_cortex_sync_sim")
world = CortexWorld()
sim_robot = world.add_robot(
add_franka_to_stage(name="franka_sim", prim_path="/Sim/Franka", use_motion_commander=False)
)
obs_specs = [
CubeSpec("RedCube", [0.7, 0.0, 0.0]),
CubeSpec("BlueCube", [0.0, 0.0, 0.7]),
CubeSpec("YellowCube", [0.7, 0.7, 0.0]),
CubeSpec("GreenCube", [0.0, 0.7, 0.0]),
]
width = 0.0515
sim_objects = {}
for i, (x, spec) in enumerate(zip(np.linspace(0.3, 0.7, len(obs_specs)), obs_specs)):
sim_obj = world.scene.add(
DynamicCuboid(
prim_path="/Sim/Obs/{}".format(spec.name),
name="{}_sim".format(spec.name),
size=width,
color=spec.color,
translation=np.array([x, -0.4, width / 2]),
)
)
sim_objects[spec.name] = sim_obj
world.scene.add_default_ground_plane()
cortex_sim = CortexSimRobotRos(sim_robot)
cortex_sim_objects_ros = CortexSimObjectsRos(sim_objects)
world.run(simulation_app)
simulation_app.close()
if __name__ == "__main__":
main()
| 2,400 | Python | 31.013333 | 106 | 0.668333 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.cortex/follow_example_modified_main.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import numpy as np
from omni.isaac.core.objects import VisualSphere
from omni.isaac.cortex.cortex_world import CortexWorld
from omni.isaac.cortex.df import DfNetwork, DfState, DfStateMachineDecider
from omni.isaac.cortex.dfb import DfRobotApiContext
from omni.isaac.cortex.robot import add_franka_to_stage
class FollowState(DfState):
"""The context object is available as self.context. We have access to everything in the context
object, which in this case is everything in the robot object (the command API and the follow
sphere).
"""
@property
def robot(self):
return self.context.robot
@property
def follow_sphere(self):
return self.context.robot.follow_sphere
def enter(self):
self.follow_sphere.set_world_pose(*self.robot.arm.get_fk_pq().as_tuple())
def step(self):
target_position, _ = self.follow_sphere.get_world_pose()
self.robot.arm.send_end_effector(target_position=target_position)
return self # Always transition back to this state.
class FollowContext(DfRobotApiContext):
def __init__(self, robot):
super().__init__(robot)
self.reset()
self.add_monitors(
[FollowContext.monitor_end_effector, FollowContext.monitor_gripper, FollowContext.monitor_diagnostics]
)
def reset(self):
self.is_target_reached = False
def monitor_end_effector(self):
eff_p = self.robot.arm.get_fk_p()
target_p, _ = self.robot.follow_sphere.get_world_pose()
self.is_target_reached = np.linalg.norm(target_p - eff_p) < 0.01
def monitor_gripper(self):
if self.is_target_reached:
self.robot.gripper.close()
else:
self.robot.gripper.open()
def monitor_diagnostics(self):
print("is_target_reached: {}".format(self.is_target_reached))
def main():
world = CortexWorld()
robot = world.add_robot(add_franka_to_stage(name="franka", prim_path="/World/Franka"))
# Add a sphere to the scene to follow, and store it off in a new member as part of the robot.
robot.follow_sphere = world.scene.add(
VisualSphere(
name="follow_sphere", prim_path="/World/FollowSphere", radius=0.02, color=np.array([0.7, 0.0, 0.7])
)
)
world.scene.add_default_ground_plane()
# Add a simple state machine decider network with the single state defined above. This state
# will be persistently stepped because it always returns itself.
world.add_decider_network(DfNetwork(DfStateMachineDecider(FollowState()), context=FollowContext(robot)))
world.run(simulation_app)
simulation_app.close()
if __name__ == "__main__":
main()
| 3,228 | Python | 33.351063 | 114 | 0.696406 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.cortex/behaviors/franka/franka_behaviors.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.cortex.dfb import DfDiagnosticsMonitor
from omni.isaac.cortex.sample_behaviors.franka import (
block_stacking_behavior,
peck_decider_network,
peck_game,
peck_state_machine,
)
from omni.isaac.cortex.sample_behaviors.franka.simple import simple_decider_network, simple_state_machine
behaviors = {
"block_stacking_behavior": block_stacking_behavior,
"peck_decider_network": peck_decider_network,
"peck_game": peck_game,
"peck_state_machine": peck_state_machine,
"simple_decider_network": simple_decider_network,
"simple_state_machine": simple_state_machine,
}
class ContextStateMonitor(DfDiagnosticsMonitor):
"""
State monitor to read the context and pass it to the UI.
For these behaviors, the context has a `diagnostic_message` that contains the text to be displayed, and each
behavior implements its own monitor to update that.
"""
def __init__(self, print_dt, diagnostic_fn=None):
super().__init__(print_dt=print_dt)
def print_diagnostics(self, context):
if hasattr(context, "diagnostics_message"):
print("====================================")
print(context.diagnostics_message)
| 1,637 | Python | 37.093022 | 112 | 0.719609 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.jetbot/jetbot_move.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import argparse
from omni.isaac.kit import SimulationApp
parser = argparse.ArgumentParser()
parser.add_argument("--test", default=False, action="store_true", help="Run in test mode")
args, unknown = parser.parse_known_args()
simulation_app = SimulationApp({"headless": False})
import carb
import numpy as np
from omni.isaac.core import World
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.wheeled_robots.controllers.differential_controller import DifferentialController
from omni.isaac.wheeled_robots.robots import WheeledRobot
my_world = World(stage_units_in_meters=1.0)
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
jetbot_asset_path = assets_root_path + "/Isaac/Robots/Jetbot/jetbot.usd"
my_jetbot = my_world.scene.add(
WheeledRobot(
prim_path="/World/Jetbot",
name="my_jetbot",
wheel_dof_names=["left_wheel_joint", "right_wheel_joint"],
create_robot=True,
usd_path=jetbot_asset_path,
position=np.array([0, 0.0, 2.0]),
)
)
my_world.scene.add_default_ground_plane()
my_controller = DifferentialController(name="simple_control", wheel_radius=0.03, wheel_base=0.1125)
my_world.reset()
i = 0
while simulation_app.is_running():
my_world.step(render=True)
if my_world.is_playing():
if my_world.current_time_step_index == 0:
my_world.reset()
my_controller.reset()
if i >= 0 and i < 1000:
# forward
my_jetbot.apply_wheel_actions(my_controller.forward(command=[0.05, 0]))
print(my_jetbot.get_linear_velocity())
elif i >= 1000 and i < 1300:
# rotate
my_jetbot.apply_wheel_actions(my_controller.forward(command=[0.0, np.pi / 12]))
print(my_jetbot.get_angular_velocity())
elif i >= 1300 and i < 2000:
# forward
my_jetbot.apply_wheel_actions(my_controller.forward(command=[0.05, 0]))
elif i == 2000:
i = 0
i += 1
if args.test is True:
break
simulation_app.close()
| 2,568 | Python | 34.680555 | 99 | 0.682243 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.jetbot/stable_baselines_example/eval.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import carb
from env import JetBotEnv
try:
from stable_baselines3 import PPO
except Exception as e:
carb.log_error(e)
carb.log_error(
"please install stable-baselines3 in the current python environment or run the following to install into the builtin python environment ./python.sh -m pip install stable-baselines3 "
)
exit()
policy_path = "./cnn_policy/jetbot_policy.zip"
my_env = JetBotEnv(headless=False)
model = PPO.load(policy_path)
for _ in range(20):
obs, _ = my_env.reset()
done = False
while not done:
actions, _ = model.predict(observation=obs, deterministic=True)
obs, reward, done, truncated, info = my_env.step(actions)
my_env.render()
my_env.close()
| 1,166 | Python | 30.54054 | 190 | 0.731561 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.jetbot/stable_baselines_example/env.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import math
import carb
import gymnasium
import numpy as np
from gymnasium import spaces
class JetBotEnv(gymnasium.Env):
metadata = {"render.modes": ["human"]}
def __init__(
self,
skip_frame=1,
physics_dt=1.0 / 60.0,
rendering_dt=1.0 / 60.0,
max_episode_length=256,
seed=0,
headless=True,
) -> None:
from omni.isaac.kit import SimulationApp
self.headless = headless
self._simulation_app = SimulationApp({"headless": self.headless, "anti_aliasing": 0})
self._skip_frame = skip_frame
self._dt = physics_dt * self._skip_frame
self._max_episode_length = max_episode_length
self._steps_after_reset = int(rendering_dt / physics_dt)
from omni.isaac.core import World
from omni.isaac.core.objects import VisualCuboid
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.wheeled_robots.controllers.differential_controller import DifferentialController
from omni.isaac.wheeled_robots.robots import WheeledRobot
self._my_world = World(physics_dt=physics_dt, rendering_dt=rendering_dt, stage_units_in_meters=1.0)
self._my_world.scene.add_default_ground_plane()
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
return
jetbot_asset_path = assets_root_path + "/Isaac/Robots/Jetbot/jetbot.usd"
self.jetbot = self._my_world.scene.add(
WheeledRobot(
prim_path="/jetbot",
name="my_jetbot",
wheel_dof_names=["left_wheel_joint", "right_wheel_joint"],
create_robot=True,
usd_path=jetbot_asset_path,
position=np.array([0, 0.0, 0.03]),
orientation=np.array([1.0, 0.0, 0.0, 0.0]),
)
)
self.jetbot_controller = DifferentialController(name="simple_control", wheel_radius=0.0325, wheel_base=0.1125)
self.goal = self._my_world.scene.add(
VisualCuboid(
prim_path="/new_cube_1",
name="visual_cube",
position=np.array([0.60, 0.30, 0.05]),
size=0.1,
color=np.array([1.0, 0, 0]),
)
)
self.seed(seed)
self.reward_range = (-float("inf"), float("inf"))
gymnasium.Env.__init__(self)
self.action_space = spaces.Box(low=-1, high=1.0, shape=(2,), dtype=np.float32)
self.observation_space = spaces.Box(low=float("inf"), high=float("inf"), shape=(16,), dtype=np.float32)
self.max_velocity = 1
self.max_angular_velocity = math.pi
self.reset_counter = 0
return
def get_dt(self):
return self._dt
def step(self, action):
previous_jetbot_position, _ = self.jetbot.get_world_pose()
# action forward velocity , angular velocity on [-1, 1]
raw_forward = action[0]
raw_angular = action[1]
# we want to force the jetbot to always drive forward
# so we transform to [0,1]. we also scale by our max velocity
forward = (raw_forward + 1.0) / 2.0
forward_velocity = forward * self.max_velocity
# we scale the angular, but leave it on [-1,1] so the
# jetbot can remain an ambiturner.
angular_velocity = raw_angular * self.max_angular_velocity
# we apply our actions to the jetbot
for i in range(self._skip_frame):
self.jetbot.apply_wheel_actions(
self.jetbot_controller.forward(command=[forward_velocity, angular_velocity])
)
self._my_world.step(render=False)
observations = self.get_observations()
info = {}
done = False
truncated = False
if self._my_world.current_time_step_index - self._steps_after_reset >= self._max_episode_length:
done = True
truncated = True
goal_world_position, _ = self.goal.get_world_pose()
current_jetbot_position, _ = self.jetbot.get_world_pose()
previous_dist_to_goal = np.linalg.norm(goal_world_position - previous_jetbot_position)
current_dist_to_goal = np.linalg.norm(goal_world_position - current_jetbot_position)
reward = previous_dist_to_goal - current_dist_to_goal
if current_dist_to_goal < 0.1:
done = True
return observations, reward, done, truncated, info
def reset(self, seed=None):
self._my_world.reset()
self.reset_counter = 0
# randomize goal location in circle around robot
alpha = 2 * math.pi * np.random.rand()
r = 1.00 * math.sqrt(np.random.rand()) + 0.20
self.goal.set_world_pose(np.array([math.sin(alpha) * r, math.cos(alpha) * r, 0.05]))
observations = self.get_observations()
return observations, {}
def get_observations(self):
self._my_world.render()
jetbot_world_position, jetbot_world_orientation = self.jetbot.get_world_pose()
jetbot_linear_velocity = self.jetbot.get_linear_velocity()
jetbot_angular_velocity = self.jetbot.get_angular_velocity()
goal_world_position, _ = self.goal.get_world_pose()
obs = np.concatenate(
[
jetbot_world_position,
jetbot_world_orientation,
jetbot_linear_velocity,
jetbot_angular_velocity,
goal_world_position,
]
)
return obs
def render(self, mode="human"):
return
def close(self):
self._simulation_app.close()
return
def seed(self, seed=None):
self.np_random, seed = gymnasium.utils.seeding.np_random(seed)
np.random.seed(seed)
return [seed]
| 6,310 | Python | 38.198758 | 118 | 0.606339 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.jetbot/stable_baselines_example/train.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import argparse
import carb
import torch as th
from env import JetBotEnv
parser = argparse.ArgumentParser()
parser.add_argument("--test", default=False, action="store_true", help="Run in test mode")
args, unknown = parser.parse_known_args()
log_dir = "./cnn_policy"
# set headles to false to visualize training
my_env = JetBotEnv(headless=True)
# in test mode we manually install sb3
if args.test is True:
import omni.kit.pipapi
omni.kit.pipapi.install("stable-baselines3==2.0.0", module="stable_baselines3")
omni.kit.pipapi.install("tensorboard")
# import stable baselines
try:
from stable_baselines3 import PPO
from stable_baselines3.common.callbacks import CheckpointCallback
from stable_baselines3.ppo import MlpPolicy
except Exception as e:
carb.log_error(e)
carb.log_error(
"please install stable-baselines3 in the current python environment or run the following to install into the builtin python environment ./python.sh -m pip install stable-baselines3"
)
exit()
try:
import tensorboard
except Exception as e:
carb.log_error(e)
carb.log_error(
"please install tensorboard in the current python environment or run the following to install into the builtin python environment ./python.sh -m pip install tensorboard"
)
exit()
policy_kwargs = dict(activation_fn=th.nn.ReLU, net_arch=[dict(vf=[128, 128, 128], pi=[128, 128, 128])])
policy = MlpPolicy
total_timesteps = 500000
if args.test is True:
total_timesteps = 10000
checkpoint_callback = CheckpointCallback(save_freq=10000, save_path=log_dir, name_prefix="jetbot_policy_checkpoint")
model = PPO(
policy,
my_env,
policy_kwargs=policy_kwargs,
verbose=1,
n_steps=2560,
batch_size=64,
learning_rate=0.000125,
gamma=0.9,
ent_coef=7.5e-08,
clip_range=0.3,
n_epochs=5,
gae_lambda=1.0,
max_grad_norm=0.9,
vf_coef=0.95,
device="cuda:0",
tensorboard_log=log_dir,
)
model.learn(total_timesteps=total_timesteps, callback=[checkpoint_callback])
model.save(log_dir + "/jetbot_policy")
my_env.close()
| 2,533 | Python | 29.53012 | 189 | 0.730754 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.ocs2/franka_arm_ocs2.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
"""Launch Omniverse Toolkit first."""
# kit
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
"""Rest everything follows."""
# python
import os
import numpy as np
from omni.isaac.core.objects.sphere import VisualSphere
from omni.isaac.core.utils.extensions import enable_extension
from omni.isaac.core.utils.types import ArticulationAction
from omni.isaac.core.utils.viewports import set_camera_view
# isaac-core
from omni.isaac.core.world import World
# isaac-franka
from omni.isaac.franka import Franka
# isaac-ocs2
enable_extension("omni.isaac.ocs2")
from omni.isaac.ocs2.end_effector_pose_tracking_mpc import EndEffectorPoseTrackingMpc
# print settings
np.set_printoptions(formatter={"float_kind": "{:.2f}".format})
def main():
"""Sets the Franka control mode to "velocity" and tests the MPC."""
# Add MPC
config = {
"urdf_path": "data/franka/urdf/panda.urdf",
"lib_folder": "/tmp/ocs2/auto_generated/franka",
"mpc_config_path": "data/franka/mpc/task.info",
}
mpc_interface = EndEffectorPoseTrackingMpc(config["mpc_config_path"], config["lib_folder"], config["urdf_path"])
# Receive the number of arm dimensions
arm_num_dof = mpc_interface.state_dim
# print info about MPC
print(mpc_interface)
# Load kit helper
my_world = World(stage_units_in_meters=1.0, physics_dt=0.01)
# Set main camera
set_camera_view([1.5, 1.5, 1.5], [0.0, 0.0, 0.0])
# Spawn things into stage
# -- ground
my_world.scene.add_default_ground_plane()
# -- robot
robot = my_world.scene.add(Franka("/World/Robot"))
# -- markers
goal_vis_prim = my_world.scene.add(
VisualSphere("/World/Vis/ee_goal", name="ee_goal", radius=0.01, color=np.asarray([1.0, 0.0, 0.0]))
)
ee_vis_prim = my_world.scene.add(
VisualSphere("/World/Vis/ee_curr", name="ee_curr", radius=0.01, color=np.asarray([0.0, 0.0, 1.0]))
)
# Play the simulator
my_world.reset()
# Set control mode
robot._articulation_view.switch_control_mode("velocity")
robot.disable_gravity()
# Now we are ready!
print("[INFO]: Setup complete...")
# Define simulation stepping
dt = 0.01
sim_time = 0.0
# Define goals for the arm
ee_goal_index = 0
ee_goals = [
[0.5, 0.5, 0.7, 0.707, 0, 0.707, 0],
[0.5, -0.4, 0.6, 0.707, 0.707, 0.0, 0.0],
[0.5, 0, 0.5, 0.0, 1.0, 0.0, 0.0],
]
# Define a goal for the arm
ee_goal_pose = np.array(ee_goals[ee_goal_index])
# Obtain measurements
arm_joint_pos = robot.get_joint_positions()[:arm_num_dof]
ee_curr_pose = robot.end_effector.get_world_pose()
ee_curr_pose = np.concatenate((ee_curr_pose[0], ee_curr_pose[1]), axis=0)
# Update visualization
goal_vis_prim.set_world_pose(ee_goal_pose[:3], ee_goal_pose[3:])
ee_vis_prim.set_world_pose(ee_curr_pose[:3], ee_curr_pose[3:])
# Define target trajectory
mpc_interface.set_target_trajectory(
time_traj=[sim_time, sim_time + 2], state_traj=[ee_curr_pose, ee_goal_pose], input_traj=[None, None]
)
# Reset the MPC
mpc_interface.reset(sim_time, arm_joint_pos)
# Simulate physics
for count in range(100000):
# obtain current measurements
arm_joint_pos = robot.get_joint_positions()[:arm_num_dof]
# compute arm's optimal control command
arm_cmd = mpc_interface.advance(sim_time, arm_joint_pos)
# print mpc cost
# perform actions
action = ArticulationAction(joint_velocities=arm_cmd, joint_indices=[range(arm_num_dof)])
robot.apply_action(action)
# perform step
my_world.step()
# update sim-time
sim_time += dt
# obtain new measurements
ee_curr_pose = robot.end_effector.get_world_pose()
ee_curr_pose = np.concatenate((ee_curr_pose[0], ee_curr_pose[1]), axis=0)
# compute the waypoint error
error = np.linalg.norm(ee_curr_pose[:3] - ee_goal_pose[:3])
# update visualization
ee_vis_prim.set_world_pose(ee_curr_pose[:3], ee_curr_pose[3:])
# get next waypoint
if error < 0.014:
# print goal state
print(
f"\tMPC cost: { mpc_interface.get_current_cost()}\n",
f"\tCurrent EE state:\n"
f"\t\tI_r_IE : {ee_curr_pose[:3]} \n"
f"\t\tq_IE : {ee_curr_pose[3:]} \n"
f"\tGoal EE state:\n"
f"\t\tI_r_IE_des: {ee_goals[ee_goal_index][:3]} \n"
f"\t\tq_IE_des : {ee_goals[ee_goal_index][3:]} \n"
"----------------------------------------------",
)
# next goal
ee_goal_index += 1
if ee_goal_index >= len(ee_goals):
ee_goal_index = 0
# Define a goal for the arm
ee_goal_pose = np.array(ee_goals[ee_goal_index])
# Update prims
goal_vis_prim.set_world_pose(ee_goal_pose[:3], ee_goal_pose[3:])
# Define target trajectory
mpc_interface.set_target_trajectory(
time_traj=[sim_time, sim_time + 2], state_traj=[ee_curr_pose, ee_goal_pose], input_traj=[None, None]
)
if __name__ == "__main__":
# Run OCS2 example with Franka
main()
# Close the simulator
simulation_app.close()
# EOF
| 5,836 | Python | 34.16265 | 116 | 0.61292 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.quadruped/go1_ros1_standalone.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
"""
Introduction
This is a demo for the go1 robot's ros integration. In this example, the robot's foot position and contact forces are
being published to "/isaac_a1/output" topic, and these values can be plotted using plotjugler.
"""
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import carb
import numpy as np
import omni.appwindow # Contains handle to keyboard
import omni.graph.core as og
from omni.isaac.core import World
from omni.isaac.core.utils.extensions import enable_extension
from omni.isaac.quadruped.robots import Unitree
# enable ROS bridge extension
enable_extension("omni.isaac.ros_bridge")
simulation_app.update()
# check if rosmaster node is running
# this is to prevent this sample from waiting indefinetly if roscore is not running
# can be removed in regular usage
import rosgraph
if not rosgraph.is_master_online():
carb.log_error("Please run roscore before executing this script")
simulation_app.close()
exit()
import rospy
from std_msgs.msg import Float32MultiArray
class Go1_runner(object):
def __init__(self, physics_dt, render_dt) -> None:
"""
[Summary]
creates the simulation world with preset physics_dt and render_dt and creates a unitree go1 robot
Argument:
physics_dt {float} -- Physics downtime of the scene.
render_dt {float} -- Render downtime of the scene.
"""
self._world = World(stage_units_in_meters=1.0, physics_dt=physics_dt, rendering_dt=render_dt)
self._go1 = self._world.scene.add(
Unitree(
prim_path="/World/Go1", name="Go1", position=np.array([0, 0, 0.40]), physics_dt=physics_dt, model="Go1"
)
)
self._world.scene.add_default_ground_plane(
z_position=0,
name="default_ground_plane",
prim_path="/World/defaultGroundPlane",
static_friction=0.2,
dynamic_friction=0.2,
restitution=0.01,
)
self._world.reset()
self._enter_toggled = 0
self._base_command = [0.0, 0.0, 0.0, 0]
self._event_flag = False
# bindings for keyboard to command
self._input_keyboard_mapping = {
# forward command
"NUMPAD_8": [1.8, 0.0, 0.0],
"UP": [1.8, 0.0, 0.0],
# back command
"NUMPAD_2": [-1.8, 0.0, 0.0],
"DOWN": [-1.8, 0.0, 0.0],
# left command
"NUMPAD_6": [0.0, -1.8, 0.0],
"RIGHT": [0.0, -1.8, 0.0],
# right command
"NUMPAD_4": [0.0, 1.8, 0.0],
"LEFT": [0.0, 1.8, 0.0],
# yaw command (positive)
"NUMPAD_7": [0.0, 0.0, 1.0],
"N": [0.0, 0.0, 1.0],
# yaw command (negative)
"NUMPAD_9": [0.0, 0.0, -1.0],
"M": [0.0, 0.0, -1.0],
}
# Creating an ondemand push graph with ROS Clock, everything in the ROS environment must synchronize with this clock
try:
keys = og.Controller.Keys
(self._clock_graph, _, _, _) = og.Controller.edit(
{
"graph_path": "/ROS_Clock",
"evaluator_name": "push",
"pipeline_stage": og.GraphPipelineStage.GRAPH_PIPELINE_STAGE_ONDEMAND,
},
{
keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("readSimTime", "omni.isaac.core_nodes.IsaacReadSimulationTime"),
("publishClock", "omni.isaac.ros_bridge.ROS1PublishClock"),
],
keys.CONNECT: [
("OnTick.outputs:tick", "publishClock.inputs:execIn"),
("readSimTime.outputs:simulationTime", "publishClock.inputs:timeStamp"),
],
},
)
except Exception as e:
print(e)
simulation_app.close()
exit()
self._pub = rospy.Publisher("/isaac_a1/output", Float32MultiArray, queue_size=10)
return
def setup(self) -> None:
"""
[Summary]
Set unitree robot's default stance, set up keyboard listener and add physics callback
"""
self._go1.set_state(self._go1._default_a1_state)
self._appwindow = omni.appwindow.get_default_app_window()
self._input = carb.input.acquire_input_interface()
self._keyboard = self._appwindow.get_keyboard()
self._sub_keyboard = self._input.subscribe_to_keyboard_events(self._keyboard, self._sub_keyboard_event)
self._world.add_physics_callback("a1_advance", callback_fn=self.on_physics_step)
def on_physics_step(self, step_size) -> None:
"""
[Summary]
Physics call back, switch robot mode and call robot advance function to compute and apply joint torque
"""
if self._event_flag:
self._go1._qp_controller.switch_mode()
self._event_flag = False
self._go1.advance(step_size, self._base_command)
# Tick the ROS Clock
og.Controller.evaluate_sync(self._clock_graph)
self._pub.publish(Float32MultiArray(data=self.get_footforce_data()))
def get_footforce_data(self) -> np.array:
"""
[Summary]
get foot force and position data
"""
data = np.concatenate((self._go1.foot_force, self._go1._qp_controller._ctrl_states._foot_pos_abs[:, 2]))
return data
def run(self) -> None:
"""
[Summary]
Step simulation based on rendering downtime
"""
# change to sim running
while simulation_app.is_running():
self._world.step(render=True)
return
def _sub_keyboard_event(self, event, *args, **kwargs) -> None:
"""
[Summary]
Subscriber callback to when kit is updated.
"""
# reset event
self._event_flag = False
# when a key is pressedor released the command is adjusted w.r.t the key-mapping
if event.type == carb.input.KeyboardEventType.KEY_PRESS:
# on pressing, the command is incremented
if event.input.name in self._input_keyboard_mapping:
self._base_command[0:3] += np.array(self._input_keyboard_mapping[event.input.name])
self._event_flag = True
# enter, toggle the last command
if event.input.name == "ENTER" and self._enter_toggled is False:
self._enter_toggled = True
if self._base_command[3] == 0:
self._base_command[3] = 1
else:
self._base_command[3] = 0
self._event_flag = True
elif event.type == carb.input.KeyboardEventType.KEY_RELEASE:
# on release, the command is decremented
if event.input.name in self._input_keyboard_mapping:
self._base_command[0:3] -= np.array(self._input_keyboard_mapping[event.input.name])
self._event_flag = True
# enter, toggle the last command
if event.input.name == "ENTER":
self._enter_toggled = False
# since no error, we are fine :)
return True
def main() -> None:
"""
[Summary]
Instantiate ros node and start a1 runner
"""
rospy.init_node("go1_standalone", anonymous=False, disable_signals=True, log_level=rospy.ERROR)
rospy.set_param("use_sim_time", True)
physics_downtime = 1 / 400.0
runner = Go1_runner(physics_dt=physics_downtime, render_dt=16 * physics_downtime)
simulation_app.update()
runner.setup()
# an extra reset is needed to register
runner._world.reset()
runner._world.reset()
runner.run()
rospy.signal_shutdown("go1 complete")
simulation_app.close()
if __name__ == "__main__":
main()
| 8,427 | Python | 33.260162 | 124 | 0.583007 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.quadruped/a1_vision_ros2_standalone.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import argparse
import json
import carb
import numpy as np
import omni.appwindow # Contains handle to keyboard
import omni.graph.core as og
from omni.isaac.core import World
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.prims import define_prim, get_prim_at_path
from omni.isaac.quadruped.robots import UnitreeVision
# enable ROS2 bridge extension
ext_manager = omni.kit.app.get_app().get_extension_manager()
ext_manager.set_extension_enabled_immediate("omni.isaac.ros2_bridge", True)
class A1_runner(object):
def __init__(self, physics_dt, render_dt, way_points=None) -> None:
"""
Summary
Creates the simulation world with preset physics_dt and render_dt and creates a unitree a1 robot (with ROS2 cameras) inside the warehouse
Also instantiate a ROS2 clock
Argument:
physics_dt {float} -- Physics downtime of the scene.
render_dt {float} -- Render downtime of the scene.
way_points {List[List[float]]} -- x coordinate, y coordinate, heading (in rad)
"""
self._world = World(stage_units_in_meters=1.0, physics_dt=physics_dt, rendering_dt=render_dt)
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
prim = get_prim_at_path("/World/Warehouse")
if not prim.IsValid():
prim = define_prim("/World/Warehouse", "Xform")
asset_path = assets_root_path + "/Isaac/Environments/Simple_Warehouse/warehouse.usd"
prim.GetReferences().AddReference(asset_path)
self._a1 = self._world.scene.add(
UnitreeVision(
prim_path="/World/A1",
name="A1",
position=np.array([0, 0, 0.40]),
physics_dt=physics_dt,
model="A1",
way_points=way_points,
is_ros2=True,
)
)
# Publish camera images every 3 frames
simulation_app.update()
self._a1.setCameraExeutionStep(3)
# Creating an ondemand push graph with ROS Clock, everything in the ROS environment must synchronize with this clock
try:
keys = og.Controller.Keys
(self._clock_graph, _, _, _) = og.Controller.edit(
{
"graph_path": "/ROS_Clock",
"evaluator_name": "push",
"pipeline_stage": og.GraphPipelineStage.GRAPH_PIPELINE_STAGE_ONDEMAND,
},
{
keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("readSimTime", "omni.isaac.core_nodes.IsaacReadSimulationTime"),
("publishClock", "omni.isaac.ros2_bridge.ROS2PublishClock"),
],
keys.CONNECT: [
("OnTick.outputs:tick", "publishClock.inputs:execIn"),
("readSimTime.outputs:simulationTime", "publishClock.inputs:timeStamp"),
],
},
)
except Exception as e:
print(e)
simulation_app.close()
exit()
self._world.reset()
self._enter_toggled = 0
self._base_command = [0.0, 0.0, 0.0, 0]
self._event_flag = False
# bindings for keyboard to command
self._input_keyboard_mapping = {
# forward command
"NUMPAD_8": [1.8, 0.0, 0.0],
"UP": [1.8, 0.0, 0.0],
# back command
"NUMPAD_2": [-1.8, 0.0, 0.0],
"DOWN": [-1.8, 0.0, 0.0],
# left command
"NUMPAD_6": [0.0, -1.8, 0.0],
"RIGHT": [0.0, -1.8, 0.0],
# right command
"NUMPAD_4": [0.0, 1.8, 0.0],
"LEFT": [0.0, 1.8, 0.0],
# yaw command (positive)
"NUMPAD_7": [0.0, 0.0, 1.0],
"N": [0.0, 0.0, 1.0],
# yaw command (negative)
"NUMPAD_9": [0.0, 0.0, -1.0],
"M": [0.0, 0.0, -1.0],
}
def setup(self, way_points=None):
"""
[Summary]
Set unitree robot's default stance, set up keyboard listener and add physics callback
"""
self._a1.set_state(self._a1._default_a1_state)
self._appwindow = omni.appwindow.get_default_app_window()
self._input = carb.input.acquire_input_interface()
self._keyboard = self._appwindow.get_keyboard()
self._sub_keyboard = self._input.subscribe_to_keyboard_events(self._keyboard, self._sub_keyboard_event)
self._world.add_physics_callback("a1_advance", callback_fn=self.on_physics_step)
if way_points is None:
self._path_follow = False
else:
self._path_follow = True
def on_physics_step(self, step_size):
"""
[Summary]
Physics call back, switch robot mode and call robot advance function to compute and apply joint torque
"""
if self._event_flag:
self._a1._qp_controller.switch_mode()
self._event_flag = False
self._a1.advance(step_size, self._base_command, self._path_follow)
og.Controller.evaluate_sync(self._clock_graph)
def run(self):
"""
[Summary]
Step simulation based on rendering downtime
"""
# change to sim running
while simulation_app.is_running():
self._world.step(render=True)
return
def _sub_keyboard_event(self, event, *args, **kwargs):
"""
[Summary]
Keyboard subscriber callback to when kit is updated.
"""
# reset event
self._event_flag = False
# when a key is pressedor released the command is adjusted w.r.t the key-mapping
if event.type == carb.input.KeyboardEventType.KEY_PRESS:
# on pressing, the command is incremented
if event.input.name in self._input_keyboard_mapping:
self._base_command[0:3] += np.array(self._input_keyboard_mapping[event.input.name])
self._event_flag = True
# enter, toggle the last command
if event.input.name == "ENTER" and self._enter_toggled is False:
self._enter_toggled = True
if self._base_command[3] == 0:
self._base_command[3] = 1
else:
self._base_command[3] = 0
self._event_flag = True
elif event.type == carb.input.KeyboardEventType.KEY_RELEASE:
# on release, the command is decremented
if event.input.name in self._input_keyboard_mapping:
self._base_command[0:3] -= np.array(self._input_keyboard_mapping[event.input.name])
self._event_flag = True
# enter, toggle the last command
if event.input.name == "ENTER":
self._enter_toggled = False
# since no error, we are fine :)
return True
parser = argparse.ArgumentParser(description="a1 quadruped demo")
parser.add_argument("-w", "--waypoint", type=str, metavar="", required=False, help="file path to the waypoints")
args, unknown = parser.parse_known_args()
def main():
"""
[Summary]
Instantiate ros node and start a1 runner
"""
physics_downtime = 1 / 400.0
if args.waypoint:
waypoint_pose = []
try:
print(str(args.waypoint))
file = open(str(args.waypoint))
waypoint_data = json.load(file)
for waypoint in waypoint_data:
waypoint_pose.append(np.array([waypoint["x"], waypoint["y"], waypoint["rad"]]))
except FileNotFoundError:
print("error file not found, ending")
simulation_app.close()
return
runner = A1_runner(physics_dt=physics_downtime, render_dt=8 * physics_downtime, way_points=waypoint_pose)
simulation_app.update()
runner.setup(way_points=waypoint)
else:
runner = A1_runner(physics_dt=physics_downtime, render_dt=8 * physics_downtime, way_points=None)
simulation_app.update()
runner.setup(way_points=None)
# an extra reset is needed to register
runner._world.reset()
runner._world.reset()
runner.run()
simulation_app.close()
if __name__ == "__main__":
main()
| 9,002 | Python | 34.444882 | 145 | 0.575428 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.quadruped/a1_direct_ros1_standalone.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
"""
Introduction
We start a runner which publishes robot sensor data as ROS1 topics and listens to outside ROS1 topic "isaac_a1/joint_torque_cmd".
The runner set robot joint torques directly using the external ROS1 topic "isaac_a1/joint_torque_cmd".
The runner instantiate robot UnitreeDirect, which directly takes in joint torques and sends torques to lowlevel joint controllers
This is a very simple example to demonstrate how to treat Isaac Sim as a simulation component with in the ROS1 ecosystem
"""
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import carb
import numpy as np
import omni.appwindow # Contains handle to keyboard
import omni.graph.core as og
from omni.isaac.core import World
from omni.isaac.core.utils.extensions import enable_extension
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.prims import define_prim, get_prim_at_path
from omni.isaac.quadruped.robots import UnitreeDirect
from omni.isaac.quadruped.utils.a1_classes import A1Measurement
# enable ROS bridge extension
enable_extension("omni.isaac.ros_bridge")
simulation_app.update()
# check if rosmaster node is running
# this is to prevent this sample from waiting indefinetly if roscore is not running
# can be removed in regular usage
import rosgraph
if not rosgraph.is_master_online():
carb.log_error("Please run roscore before executing this script")
simulation_app.close()
exit()
# ros-python and ROS1 messages
import geometry_msgs.msg as geometry_msgs
import rospy
import sensor_msgs.msg as sensor_msgs
class A1_direct_runner(object):
def __init__(self, physics_dt, render_dt) -> None:
"""
[Summary]
creates the simulation world with preset physics_dt and render_dt and creates a unitree a1 robot inside the warehouse
Argument:
physics_dt {float} -- Physics downtime of the scene.
render_dt {float} -- Render downtime of the scene.
"""
self._world = World(stage_units_in_meters=1.0, physics_dt=physics_dt, rendering_dt=render_dt)
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
prim = get_prim_at_path("/World/Warehouse")
if not prim.IsValid():
prim = define_prim("/World/Warehouse", "Xform")
asset_path = assets_root_path + "/Isaac/Environments/Simple_Warehouse/warehouse.usd"
prim.GetReferences().AddReference(asset_path)
self._a1 = self._world.scene.add(
UnitreeDirect(
prim_path="/World/A1", name="A1", position=np.array([0, 0, 0.40]), physics_dt=physics_dt, model="A1"
)
)
self._world.reset()
# Creating an ondemand push graph with ROS Clock, everything in the ROS environment must synchronize with this clock
try:
keys = og.Controller.Keys
(self._clock_graph, _, _, _) = og.Controller.edit(
{
"graph_path": "/ROS_Clock",
"evaluator_name": "push",
"pipeline_stage": og.GraphPipelineStage.GRAPH_PIPELINE_STAGE_ONDEMAND,
},
{
keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("readSimTime", "omni.isaac.core_nodes.IsaacReadSimulationTime"),
("publishClock", "omni.isaac.ros_bridge.ROS1PublishClock"),
],
keys.CONNECT: [
("OnTick.outputs:tick", "publishClock.inputs:execIn"),
("readSimTime.outputs:simulationTime", "publishClock.inputs:timeStamp"),
],
},
)
except Exception as e:
print(e)
simulation_app.close()
exit()
##
# ROS publishers
##
# a) ground truth body pose
self._pub_body_pose = rospy.Publisher("isaac_a1/gt_body_pose", geometry_msgs.PoseStamped, queue_size=21)
self._msg_body_pose = geometry_msgs.PoseStamped()
self._msg_body_pose.header.frame_id = "base_link"
# b) joint angle and foot force
self._pub_joint_state = rospy.Publisher("isaac_a1/joint_foot", sensor_msgs.JointState, queue_size=21)
self._msg_joint_state = sensor_msgs.JointState()
self._msg_joint_state.name = [
"FL0",
"FL1",
"FL2",
"FR0",
"FR1",
"FR2",
"RL0",
"RL1",
"RL2",
"RR0",
"RR1",
"RR2",
"FL_foot",
"FR_foot",
"RL_foot",
"RR_foot",
]
self._msg_joint_state.position = [0.0] * 16
self._msg_joint_state.velocity = [0.0] * 16
self._msg_joint_state.effort = [0.0] * 16
# c) IMU measurements
self._pub_imu_debug = rospy.Publisher("isaac_a1/imu_data", sensor_msgs.Imu, queue_size=21)
self._msg_imu_debug = sensor_msgs.Imu()
self._msg_imu_debug.header.frame_id = "base_link"
# d) ground truth body pose with a fake covariance
self._pub_body_pose_with_cov = rospy.Publisher(
"isaac_a1/gt_body_pose_with_cov", geometry_msgs.PoseWithCovarianceStamped, queue_size=21
)
self._msg_body_pose_with_cov = geometry_msgs.PoseWithCovarianceStamped()
self._msg_body_pose_with_cov.header.frame_id = "base_link"
##
# ROS subscribers
##
self._sub_joint_cmd = rospy.Subscriber(
"isaac_a1/joint_torque_cmd", sensor_msgs.JointState, self.joint_command_callback
)
# buffer to store the robot command
self._ros_command = np.zeros(12)
def setup(self):
"""
[Summary]
add physics callback
"""
self._app_window = omni.appwindow.get_default_app_window()
self._world.add_physics_callback("robot_sim_step", callback_fn=self.robot_simulation_step)
# start ROS publisher and subscribers
def run(self):
"""
[Summary]
Step simulation based on rendering downtime
"""
# change to sim running
while simulation_app.is_running():
self._world.step(render=True)
return
def publish_ros_data(self, measurement: A1Measurement):
"""
[Summary]
Publish body pose, joint state, imu data
"""
# update all header timestamps
ros_timestamp = rospy.get_rostime()
self._msg_body_pose.header.stamp = ros_timestamp
self._msg_joint_state.header.stamp = ros_timestamp
self._msg_imu_debug.header.stamp = ros_timestamp
self._msg_body_pose_with_cov.header.stamp = ros_timestamp
# a) ground truth pose
self._update_body_pose_msg(measurement)
self._pub_body_pose.publish(self._msg_body_pose)
# b) joint state and contact force
self._update_msg_joint_state(measurement)
self._pub_joint_state.publish(self._msg_joint_state)
# c) IMU
self._update_imu_msg(measurement)
self._pub_imu_debug.publish(self._msg_imu_debug)
# d) ground truth pose with covariance
self._update_body_pose_with_cov_msg(measurement)
self._pub_body_pose_with_cov.publish(self._msg_body_pose_with_cov)
return
"""call backs"""
def robot_simulation_step(self, step_size):
"""
[Summary]
Call robot update and advance, and tick ros bridge
"""
self._a1.update()
self._a1.advance()
# Tick the ROS Clock
og.Controller.evaluate_sync(self._clock_graph)
# Publish ROS data
self.publish_ros_data(self._a1._measurement)
def joint_command_callback(self, data):
"""
[Summary]
Joint command call back, set command torque for the joints
"""
for i in range(12):
self._ros_command[i] = data.effort[i]
self._a1.set_command_torque(self._ros_command)
"""
Utilities functions.
"""
def _update_body_pose_msg(self, measurement: A1Measurement):
"""
[Summary]
Updates the body pose message.
"""
# base position
self._msg_body_pose.pose.position.x = measurement.state.base_frame.pos[0]
self._msg_body_pose.pose.position.y = measurement.state.base_frame.pos[1]
self._msg_body_pose.pose.position.z = measurement.state.base_frame.pos[2]
# base orientation
self._msg_body_pose.pose.orientation.w = measurement.state.base_frame.quat[3]
self._msg_body_pose.pose.orientation.x = measurement.state.base_frame.quat[0]
self._msg_body_pose.pose.orientation.y = measurement.state.base_frame.quat[1]
self._msg_body_pose.pose.orientation.z = measurement.state.base_frame.quat[2]
def _update_msg_joint_state(self, measurement: A1Measurement):
"""
[Summary]
Updates the joint state message.
"""
# joint position and velocity
for i in range(12):
self._msg_joint_state.position[i] = measurement.state.joint_pos[i]
self._msg_joint_state.velocity[i] = measurement.state.joint_vel[i]
# foot force
for i in range(4):
# notice this order is: FL, FR, RL, RR
self._msg_joint_state.effort[12 + i] = measurement.foot_forces[i]
def _update_imu_msg(self, measurement: A1Measurement):
"""
[Summary]
Updates the IMU message.
"""
# accelerometer data
self._msg_imu_debug.linear_acceleration.x = measurement.base_lin_acc[0]
self._msg_imu_debug.linear_acceleration.y = measurement.base_lin_acc[1]
self._msg_imu_debug.linear_acceleration.z = measurement.base_lin_acc[2]
# gyroscope data
self._msg_imu_debug.angular_velocity.x = measurement.base_ang_vel[0]
self._msg_imu_debug.angular_velocity.y = measurement.base_ang_vel[1]
self._msg_imu_debug.angular_velocity.z = measurement.base_ang_vel[2]
def _update_body_pose_with_cov_msg(self, measurement: A1Measurement):
"""
[Summary]
Updates the body pose with fake covariance message.
"""
# base position
self._msg_body_pose_with_cov.pose.pose.position.x = measurement.state.base_frame.pos[0]
self._msg_body_pose_with_cov.pose.pose.position.y = measurement.state.base_frame.pos[1]
self._msg_body_pose_with_cov.pose.pose.position.z = measurement.state.base_frame.pos[2]
# base orientation
self._msg_body_pose_with_cov.pose.pose.orientation.w = measurement.state.base_frame.quat[3]
self._msg_body_pose_with_cov.pose.pose.orientation.x = measurement.state.base_frame.quat[0]
self._msg_body_pose_with_cov.pose.pose.orientation.y = measurement.state.base_frame.quat[1]
self._msg_body_pose_with_cov.pose.pose.orientation.z = measurement.state.base_frame.quat[2]
# Setting fake covariance
for i in range(6):
self._msg_body_pose_with_cov.pose.covariance[i * 6 + i] = 0.001
def main():
"""
[Summary]
The function launches the simulator, creates the robot, and run the simulation steps
"""
# first enable ros node, make sure using simulation time
rospy.init_node("isaac_a1", anonymous=False, disable_signals=True, log_level=rospy.ERROR)
rospy.set_param("use_sim_time", True)
physics_downtime = 1 / 400.0
runner = A1_direct_runner(physics_dt=physics_downtime, render_dt=physics_downtime)
simulation_app.update()
runner.setup()
# an extra reset is needed to register
runner._world.reset()
runner._world.reset()
runner.run()
rospy.signal_shutdown("a1 direct complete")
simulation_app.close()
if __name__ == "__main__":
main()
| 12,471 | Python | 34.942363 | 129 | 0.622805 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.quadruped/anymal_standalone.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import carb
import numpy as np
import omni.appwindow # Contains handle to keyboard
from omni.isaac.core import World
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.prims import define_prim, get_prim_at_path
from omni.isaac.quadruped.robots import Anymal
from pxr import Gf, UsdGeom
class Anymal_runner(object):
def __init__(self, physics_dt, render_dt) -> None:
"""
Summary
creates the simulation world with preset physics_dt and render_dt and creates an anymal robot inside the warehouse
Argument:
physics_dt {float} -- Physics downtime of the scene.
render_dt {float} -- Render downtime of the scene.
"""
self._world = World(stage_units_in_meters=1.0, physics_dt=physics_dt, rendering_dt=render_dt)
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
# spawn warehouse scene
prim = get_prim_at_path("/World/GroundPlane")
if not prim.IsValid():
prim = define_prim("/World/GroundPlane", "Xform")
asset_path = assets_root_path + "/Isaac/Environments/Simple_Warehouse/warehouse.usd"
prim.GetReferences().AddReference(asset_path)
self._anymal = self._world.scene.add(
Anymal(
prim_path="/World/Anymal",
name="Anymal",
usd_path=assets_root_path + "/Isaac/Robots/ANYbotics/anymal_c.usd",
position=np.array([0, 0, 0.70]),
)
)
self._world.reset()
self._enter_toggled = 0
self._base_command = np.zeros(3)
# bindings for keyboard to command
self._input_keyboard_mapping = {
# forward command
"NUMPAD_8": [1.0, 0.0, 0.0],
"UP": [1.0, 0.0, 0.0],
# back command
"NUMPAD_2": [-1.0, 0.0, 0.0],
"DOWN": [-1.0, 0.0, 0.0],
# left command
"NUMPAD_6": [0.0, -1.0, 0.0],
"RIGHT": [0.0, -1.0, 0.0],
# right command
"NUMPAD_4": [0.0, 1.0, 0.0],
"LEFT": [0.0, 1.0, 0.0],
# yaw command (positive)
"NUMPAD_7": [0.0, 0.0, 1.0],
"N": [0.0, 0.0, 1.0],
# yaw command (negative)
"NUMPAD_9": [0.0, 0.0, -1.0],
"M": [0.0, 0.0, -1.0],
}
self.needs_reset = False
def setup(self) -> None:
"""
[Summary]
Set up keyboard listener and add physics callback
"""
self._appwindow = omni.appwindow.get_default_app_window()
self._input = carb.input.acquire_input_interface()
self._keyboard = self._appwindow.get_keyboard()
self._sub_keyboard = self._input.subscribe_to_keyboard_events(self._keyboard, self._sub_keyboard_event)
self._world.add_physics_callback("anymal_advance", callback_fn=self.on_physics_step)
def on_physics_step(self, step_size) -> None:
"""
[Summary]
Physics call back, switch robot mode and call robot advance function to compute and apply joint torque
"""
if self.needs_reset:
self._world.reset(True)
self.needs_reset = False
self._anymal.advance(step_size, self._base_command)
def run(self) -> None:
"""
[Summary]
Step simulation based on rendering downtime
"""
# change to sim running
while simulation_app.is_running():
self._world.step(render=True)
if not self._world.is_simulating():
self.needs_reset = True
return
def _sub_keyboard_event(self, event, *args, **kwargs) -> bool:
"""
[Summary]
Keyboard subscriber callback to when kit is updated.
"""
# reset event
self._event_flag = False
# when a key is pressed for released the command is adjusted w.r.t the key-mapping
if event.type == carb.input.KeyboardEventType.KEY_PRESS:
# on pressing, the command is incremented
if event.input.name in self._input_keyboard_mapping:
self._base_command[0:3] += np.array(self._input_keyboard_mapping[event.input.name])
elif event.type == carb.input.KeyboardEventType.KEY_RELEASE:
# on release, the command is decremented
if event.input.name in self._input_keyboard_mapping:
self._base_command[0:3] -= np.array(self._input_keyboard_mapping[event.input.name])
return True
def main():
"""
[Summary]
Parse arguments and instantiate the ANYmal runner
"""
physics_dt = 1 / 200.0
render_dt = 1 / 60.0
runner = Anymal_runner(physics_dt=physics_dt, render_dt=render_dt)
simulation_app.update()
runner.setup()
# an extra reset is needed to register
runner._world.reset()
runner._world.reset()
runner.run()
simulation_app.close()
if __name__ == "__main__":
main()
| 5,611 | Python | 32.2071 | 122 | 0.594903 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.quadruped/a1_vision_ros1_standalone.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
"""
Introduction:
In this demo, the quadruped is publishing data from a pair of stereovision cameras and imu data for the VINS fusion
visual interial odometry algorithm. Users can use the keyboard mapping to control the motion of the quadruped while the
quadruped localize itself.
"""
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import carb
import numpy as np
import omni.appwindow # Contains handle to keyboard
import omni.graph.core as og
from omni.isaac.core import World
from omni.isaac.core.utils.extensions import enable_extension
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.prims import define_prim, get_prim_at_path
from omni.isaac.quadruped.robots import UnitreeVision
# enable ROS bridge extension
enable_extension("omni.isaac.ros_bridge")
simulation_app.update()
# check if rosmaster node is running
# this is to prevent this sample from waiting indefinetly if roscore is not running
# can be removed in regular usage
import rosgraph
if not rosgraph.is_master_online():
carb.log_error("Please run roscore before executing this script")
simulation_app.close()
exit()
import rospy
import sensor_msgs.msg as sensor_msgs
from std_msgs.msg import Float32MultiArray
class A1_stereo_vision(object):
def __init__(self, physics_dt, render_dt) -> None:
"""
[Summary]
creates the simulation world with preset physics_dt and render_dt and creates a unitree a1 robot (with ros cameras) inside a custom
environment, set up ros publishers for the isaac_a1/imu_data and isaac_a1/foot_force topic
Argument:
physics_dt {float} -- Physics downtime of the scene.
render_dt {float} -- Render downtime of the scene.
"""
self._world = World(stage_units_in_meters=1.0, physics_dt=physics_dt, rendering_dt=render_dt)
prim = get_prim_at_path("/World/Warehouse")
if not prim.IsValid():
prim = define_prim("/World/Warehouse", "Xform")
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets server")
asset_path = assets_root_path + "/Isaac/Samples/ROS/Scenario/visual_odometry_testing.usd"
prim.GetReferences().AddReference(asset_path)
self._a1 = self._world.scene.add(
UnitreeVision(
prim_path="/World/A1", name="A1", position=np.array([0, 0, 0.27]), physics_dt=physics_dt, model="A1"
)
)
# Publish camera images every 3 frames
simulation_app.update()
self._a1.setCameraExeutionStep(3)
self._world.reset()
self._enter_toggled = 0
self._base_command = [0.0, 0.0, 0.0, 0]
self._event_flag = False
# bindings for keyboard to command
self._input_keyboard_mapping = {
# forward command
"NUMPAD_8": [1.8, 0.0, 0.0],
"UP": [1.8, 0.0, 0.0],
# back command
"NUMPAD_2": [-1.8, 0.0, 0.0],
"DOWN": [-1.8, 0.0, 0.0],
# left command
"NUMPAD_6": [0.0, -1.8, 0.0],
"RIGHT": [0.0, -1.8, 0.0],
# right command
"NUMPAD_4": [0.0, 1.8, 0.0],
"LEFT": [0.0, 1.8, 0.0],
# yaw command (positive)
"NUMPAD_7": [0.0, 0.0, 1.0],
"N": [0.0, 0.0, 1.0],
# yaw command (negative)
"NUMPAD_9": [0.0, 0.0, -1.0],
"M": [0.0, 0.0, -1.0],
}
# Creating an ondemand push graph with ROS Clock, everything in the ROS environment must synchronize with this clock
try:
keys = og.Controller.Keys
(self._clock_graph, _, _, _) = og.Controller.edit(
{
"graph_path": "/ROS_Clock",
"evaluator_name": "push",
"pipeline_stage": og.GraphPipelineStage.GRAPH_PIPELINE_STAGE_ONDEMAND,
},
{
keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("readSimTime", "omni.isaac.core_nodes.IsaacReadSimulationTime"),
("publishClock", "omni.isaac.ros_bridge.ROS1PublishClock"),
],
keys.CONNECT: [
("OnTick.outputs:tick", "publishClock.inputs:execIn"),
("readSimTime.outputs:simulationTime", "publishClock.inputs:timeStamp"),
],
},
)
except Exception as e:
print(e)
simulation_app.close()
exit()
self._footforce_pub = rospy.Publisher("isaac_a1/foot_force", Float32MultiArray, queue_size=10)
self._imu_pub = rospy.Publisher("isaac_a1/imu_data", sensor_msgs.Imu, queue_size=21)
self._step_count = 0
self._publish_interval = 2
self._foot_force = Float32MultiArray()
self._imu_msg = sensor_msgs.Imu()
self._imu_msg.header.frame_id = "base_link"
def setup(self) -> None:
"""
[Summary]
Set unitree robot's default stance, set up keyboard listener and add physics callback
"""
self._a1.set_state(self._a1._default_a1_state)
self._appwindow = omni.appwindow.get_default_app_window()
self._input = carb.input.acquire_input_interface()
self._keyboard = self._appwindow.get_keyboard()
self._sub_keyboard = self._input.subscribe_to_keyboard_events(self._keyboard, self._sub_keyboard_event)
self._world.add_physics_callback("a1_advance", callback_fn=self.on_physics_step)
def on_physics_step(self, step_size) -> None:
"""
[Summary]
Physics call back, switch robot mode and call robot advance function to compute and apply joint torque
"""
if self._event_flag:
self._a1._qp_controller.switch_mode()
self._event_flag = False
self._a1.advance(step_size, self._base_command)
og.Controller.evaluate_sync(self._clock_graph)
self._step_count += 1
if self._step_count % self._publish_interval == 0:
ros_time = rospy.get_rostime()
self.update_footforce_data()
self._footforce_pub.publish(self._foot_force)
self.update_imu_data()
self._imu_msg.header.stamp = ros_time
self._imu_pub.publish(self._imu_msg)
self._step_count = 0
def update_footforce_data(self) -> None:
"""
[Summary]
Update foot position and foot force data for ros publisher
"""
self._foot_force.data = np.concatenate(
(self._a1.foot_force, self._a1._qp_controller._ctrl_states._foot_pos_abs[:, 2])
)
def update_imu_data(self) -> None:
"""
[Summary]
Update imu data for ros publisher
"""
self._imu_msg.orientation.x = self._a1._state.base_frame.quat[0]
self._imu_msg.orientation.y = self._a1._state.base_frame.quat[1]
self._imu_msg.orientation.z = self._a1._state.base_frame.quat[2]
self._imu_msg.orientation.w = self._a1._state.base_frame.quat[3]
self._imu_msg.linear_acceleration.x = self._a1._measurement.base_lin_acc[0]
self._imu_msg.linear_acceleration.y = self._a1._measurement.base_lin_acc[1]
self._imu_msg.linear_acceleration.z = self._a1._measurement.base_lin_acc[2]
self._imu_msg.angular_velocity.x = self._a1._measurement.base_ang_vel[0]
self._imu_msg.angular_velocity.y = self._a1._measurement.base_ang_vel[1]
self._imu_msg.angular_velocity.z = self._a1._measurement.base_ang_vel[2]
def run(self) -> None:
"""
[Summary]
Step simulation based on rendering downtime
"""
# change to sim running
while simulation_app.is_running():
self._world.step(render=True)
return
def _sub_keyboard_event(self, event, *args, **kwargs) -> bool:
"""
[Summary]
Keyboard subscriber callback to when kit is updated.
""" # reset event
self._event_flag = False
# when a key is pressedor released the command is adjusted w.r.t the key-mapping
if event.type == carb.input.KeyboardEventType.KEY_PRESS:
# on pressing, the command is incremented
if event.input.name in self._input_keyboard_mapping:
self._base_command[0:3] += np.array(self._input_keyboard_mapping[event.input.name])
self._event_flag = True
# enter, toggle the last command
if event.input.name == "ENTER" and self._enter_toggled is False:
self._enter_toggled = True
if self._base_command[3] == 0:
self._base_command[3] = 1
else:
self._base_command[3] = 0
self._event_flag = True
elif event.type == carb.input.KeyboardEventType.KEY_RELEASE:
# on release, the command is decremented
if event.input.name in self._input_keyboard_mapping:
self._base_command[0:3] -= np.array(self._input_keyboard_mapping[event.input.name])
self._event_flag = True
# enter, toggle the last command
if event.input.name == "ENTER":
self._enter_toggled = False
# since no error, we are fine :)
return True
def main() -> None:
"""
[Summary]
Instantiate ros node and start a1 runner
"""
rospy.init_node("isaac_a1", anonymous=False, disable_signals=True, log_level=rospy.ERROR)
rospy.set_param("use_sim_time", True)
physics_downtime = 1 / 400.0
runner = A1_stereo_vision(physics_dt=physics_downtime, render_dt=8 * physics_downtime)
simulation_app.update()
runner.setup()
# an extra reset is needed to register
runner._world.reset()
runner._world.reset()
runner.run()
rospy.signal_shutdown("a1 vision complete")
simulation_app.close()
if __name__ == "__main__":
main()
| 10,664 | Python | 35.649484 | 139 | 0.597618 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.quadruped/a1_standalone.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
import argparse
import json
import carb
import numpy as np
import omni.appwindow # Contains handle to keyboard
from omni.isaac.core import World
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.prims import define_prim, get_prim_at_path
from omni.isaac.quadruped.robots import Unitree
from pxr import Gf, UsdGeom
class A1_runner(object):
def __init__(self, physics_dt, render_dt, way_points=None) -> None:
"""
Summary
creates the simulation world with preset physics_dt and render_dt and creates a unitree a1 robot inside the warehouse
Argument:
physics_dt {float} -- Physics downtime of the scene.
render_dt {float} -- Render downtime of the scene.
way_points {List[List[float]]} -- x coordinate, y coordinate, heading (in rad)
"""
self._world = World(stage_units_in_meters=1.0, physics_dt=physics_dt, rendering_dt=render_dt)
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
# spawn warehouse scene
prim = get_prim_at_path("/World/Warehouse")
if not prim.IsValid():
prim = define_prim("/World/Warehouse", "Xform")
asset_path = assets_root_path + "/Isaac/Environments/Simple_Warehouse/warehouse.usd"
prim.GetReferences().AddReference(asset_path)
self._a1 = self._world.scene.add(
Unitree(
prim_path="/World/A1",
name="A1",
position=np.array([0, 0, 0.40]),
physics_dt=physics_dt,
model="A1",
way_points=way_points,
)
)
self._world.reset()
self._enter_toggled = 0
self._base_command = [0.0, 0.0, 0.0, 0]
self._event_flag = False
# bindings for keyboard to command
self._input_keyboard_mapping = {
# forward command
"NUMPAD_8": [1.8, 0.0, 0.0],
"UP": [1.8, 0.0, 0.0],
# back command
"NUMPAD_2": [-1.8, 0.0, 0.0],
"DOWN": [-1.8, 0.0, 0.0],
# left command
"NUMPAD_6": [0.0, -1.8, 0.0],
"RIGHT": [0.0, -1.8, 0.0],
# right command
"NUMPAD_4": [0.0, 1.8, 0.0],
"LEFT": [0.0, 1.8, 0.0],
# yaw command (positive)
"NUMPAD_7": [0.0, 0.0, 1.0],
"N": [0.0, 0.0, 1.0],
# yaw command (negative)
"NUMPAD_9": [0.0, 0.0, -1.0],
"M": [0.0, 0.0, -1.0],
}
def setup(self, way_points=None) -> None:
"""
[Summary]
Set unitree robot's default stance, set up keyboard listener and add physics callback
"""
self._a1.set_state(self._a1._default_a1_state)
self._appwindow = omni.appwindow.get_default_app_window()
self._input = carb.input.acquire_input_interface()
self._keyboard = self._appwindow.get_keyboard()
self._sub_keyboard = self._input.subscribe_to_keyboard_events(self._keyboard, self._sub_keyboard_event)
self._world.add_physics_callback("a1_advance", callback_fn=self.on_physics_step)
if way_points is None:
self._path_follow = False
else:
self._path_follow = True
def on_physics_step(self, step_size) -> None:
"""
[Summary]
Physics call back, switch robot mode and call robot advance function to compute and apply joint torque
"""
if self._event_flag:
self._a1._qp_controller.switch_mode()
self._event_flag = False
self._a1.advance(step_size, self._base_command, self._path_follow)
def run(self) -> None:
"""
[Summary]
Step simulation based on rendering downtime
"""
# change to sim running
while simulation_app.is_running():
self._world.step(render=True)
return
def _sub_keyboard_event(self, event, *args, **kwargs) -> bool:
"""
[Summary]
Keyboard subscriber callback to when kit is updated.
"""
# reset event
self._event_flag = False
# when a key is pressed for released the command is adjusted w.r.t the key-mapping
if event.type == carb.input.KeyboardEventType.KEY_PRESS:
# on pressing, the command is incremented
if event.input.name in self._input_keyboard_mapping:
self._base_command[0:3] += np.array(self._input_keyboard_mapping[event.input.name])
self._event_flag = True
# enter, toggle the last command
if event.input.name == "ENTER" and self._enter_toggled is False:
self._enter_toggled = True
if self._base_command[3] == 0:
self._base_command[3] = 1
else:
self._base_command[3] = 0
self._event_flag = True
elif event.type == carb.input.KeyboardEventType.KEY_RELEASE:
# on release, the command is decremented
if event.input.name in self._input_keyboard_mapping:
self._base_command[0:3] -= np.array(self._input_keyboard_mapping[event.input.name])
self._event_flag = True
# enter, toggle the last command
if event.input.name == "ENTER":
self._enter_toggled = False
# since no error, we are fine :)
return True
parser = argparse.ArgumentParser(description="a1 quadruped demo")
parser.add_argument("-w", "--waypoint", type=str, metavar="", required=False, help="file path to the waypoints")
args, unknown = parser.parse_known_args()
def main():
"""
[Summary]
Parse arguments and instantiate A1 runner
"""
physics_downtime = 1 / 400.0
if args.waypoint:
waypoint_pose = []
try:
print(str(args.waypoint))
file = open(str(args.waypoint))
waypoint_data = json.load(file)
for waypoint in waypoint_data:
waypoint_pose.append(np.array([waypoint["x"], waypoint["y"], waypoint["rad"]]))
# print(str(waypoint_pose))
except FileNotFoundError:
print("error file not found, ending")
simulation_app.close()
return
runner = A1_runner(physics_dt=physics_downtime, render_dt=16 * physics_downtime, way_points=waypoint_pose)
simulation_app.update()
runner.setup(way_points=waypoint)
else:
runner = A1_runner(physics_dt=physics_downtime, render_dt=16 * physics_downtime, way_points=None)
simulation_app.update()
runner.setup(None)
# an extra reset is needed to register
runner._world.reset()
runner._world.reset()
runner.run()
simulation_app.close()
if __name__ == "__main__":
main()
| 7,495 | Python | 33.385321 | 125 | 0.584523 |
2820207922/isaac_ws/standalone_examples/api/omni.importer.urdf/urdf_import.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
# URDF import, configuration and simulation sample
kit = SimulationApp({"renderer": "RayTracedLighting", "headless": True})
import omni.kit.commands
from omni.isaac.core.articulations import Articulation
from omni.isaac.core.utils.extensions import get_extension_path_from_name
from pxr import Gf, PhysxSchema, Sdf, UsdLux, UsdPhysics
# Setting up import configuration:
status, import_config = omni.kit.commands.execute("URDFCreateImportConfig")
import_config.merge_fixed_joints = False
import_config.convex_decomp = False
import_config.import_inertia_tensor = True
import_config.fix_base = False
import_config.distance_scale = 100
# Get path to extension data:
extension_path = get_extension_path_from_name("omni.importer.urdf")
# Import URDF, stage_path contains the path the path to the usd prim in the stage.
status, stage_path = omni.kit.commands.execute(
"URDFParseAndImportFile",
urdf_path=extension_path + "/data/urdf/robots/carter/urdf/carter.urdf",
import_config=import_config,
get_articulation_root=True,
)
# Get stage handle
stage = omni.usd.get_context().get_stage()
# Enable physics
scene = UsdPhysics.Scene.Define(stage, Sdf.Path("/physicsScene"))
# Set gravity
scene.CreateGravityDirectionAttr().Set(Gf.Vec3f(0.0, 0.0, -1.0))
scene.CreateGravityMagnitudeAttr().Set(9.81)
# Set solver settings
PhysxSchema.PhysxSceneAPI.Apply(stage.GetPrimAtPath("/physicsScene"))
physxSceneAPI = PhysxSchema.PhysxSceneAPI.Get(stage, "/physicsScene")
physxSceneAPI.CreateEnableCCDAttr(True)
physxSceneAPI.CreateEnableStabilizationAttr(True)
physxSceneAPI.CreateEnableGPUDynamicsAttr(False)
physxSceneAPI.CreateBroadphaseTypeAttr("MBP")
physxSceneAPI.CreateSolverTypeAttr("TGS")
# Add ground plane
omni.kit.commands.execute(
"AddGroundPlaneCommand",
stage=stage,
planePath="/groundPlane",
axis="Z",
size=1500.0,
position=Gf.Vec3f(0, 0, -50),
color=Gf.Vec3f(0.5),
)
# Add lighting
distantLight = UsdLux.DistantLight.Define(stage, Sdf.Path("/DistantLight"))
distantLight.CreateIntensityAttr(500)
# Get handle to the Drive API for both wheels
left_wheel_drive = UsdPhysics.DriveAPI.Get(stage.GetPrimAtPath("/carter/chassis_link/left_wheel"), "angular")
right_wheel_drive = UsdPhysics.DriveAPI.Get(stage.GetPrimAtPath("/carter/chassis_link/right_wheel"), "angular")
# Set the velocity drive target in degrees/second
left_wheel_drive.GetTargetVelocityAttr().Set(150)
right_wheel_drive.GetTargetVelocityAttr().Set(150)
# Set the drive damping, which controls the strength of the velocity drive
left_wheel_drive.GetDampingAttr().Set(15000)
right_wheel_drive.GetDampingAttr().Set(15000)
# Set the drive stiffness, which controls the strength of the position drive
# In this case because we want to do velocity control this should be set to zero
left_wheel_drive.GetStiffnessAttr().Set(0)
right_wheel_drive.GetStiffnessAttr().Set(0)
# Start simulation
omni.timeline.get_timeline_interface().play()
# perform one simulation step so physics is loaded and dynamic control works.
kit.update()
art = Articulation(prim_path=stage_path)
art.initialize()
if not art.handles_initialized:
print(f"{stage_path} is not an articulation")
else:
print(f"Got articulation {stage_path} with handle {art.articulation_handle}")
# perform simulation
for frame in range(100):
kit.update()
# Shutdown and exit
omni.timeline.get_timeline_interface().stop()
kit.close()
| 3,877 | Python | 36.288461 | 111 | 0.780242 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.ros_bridge/carter_multiple_robot_navigation.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import sys
import carb
from omni.isaac.kit import SimulationApp
HOSPITAL_USD_PATH = "/Isaac/Samples/ROS/Scenario/multiple_robot_carter_hospital_navigation.usd"
OFFICE_USD_PATH = "/Isaac/Samples/ROS/Scenario/multiple_robot_carter_office_navigation.usd"
# Default environment: Hospital
ENV_USD_PATH = HOSPITAL_USD_PATH
if len(sys.argv) > 1:
if sys.argv[1] == "office":
# Choosing Office environment
ENV_USD_PATH = OFFICE_USD_PATH
elif sys.argv[1] != "hospital":
carb.log_warn("Environment name is invalid. Choosing default Hospital environment.")
else:
carb.log_warn("Environment name not specified. Choosing default Hospital environment.")
CONFIG = {"renderer": "RayTracedLighting", "headless": False}
# Example ROS bridge sample demonstrating the manual loading of Multiple Robot Navigation scenario
simulation_app = SimulationApp(CONFIG)
import omni
from omni.isaac.core import SimulationContext
from omni.isaac.core.utils import extensions, nucleus, prims, rotations, stage, viewports
from omni.isaac.core.utils.extensions import enable_extension
from pxr import Sdf
# enable ROS bridge extension
enable_extension("omni.isaac.ros_bridge")
# check if rosmaster node is running
# this is to prevent this sample from waiting indefinetly if roscore is not running
# can be removed in regular usage
import rosgraph
if not rosgraph.is_master_online():
carb.log_error("Please run roscore before executing this script")
simulation_app.close()
exit()
# Locate assets root folder to load sample
assets_root_path = nucleus.get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
simulation_app.close()
sys.exit()
usd_path = assets_root_path + ENV_USD_PATH
omni.usd.get_context().open_stage(usd_path, None)
# Wait two frames so that stage starts loading
simulation_app.update()
simulation_app.update()
print("Loading stage...")
from omni.isaac.core.utils.stage import is_stage_loading
while is_stage_loading():
simulation_app.update()
print("Loading Complete")
simulation_context = SimulationContext(stage_units_in_meters=1.0)
simulation_app.update()
simulation_context.play()
simulation_app.update()
while simulation_app.is_running():
# runs with a realtime clock
simulation_app.update()
simulation_context.stop()
simulation_app.close()
| 2,808 | Python | 29.868132 | 98 | 0.763889 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.ros_bridge/rtx_lidar.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import sys
import carb
from omni.isaac.kit import SimulationApp
# Example for creating a RTX lidar sensor and publishing PCL data
simulation_app = SimulationApp({"headless": False})
import omni
import omni.kit.viewport.utility
import omni.replicator.core as rep
from omni.isaac.core import SimulationContext
from omni.isaac.core.utils import nucleus, stage
from omni.isaac.core.utils.extensions import enable_extension
from pxr import Gf
# enable ROS bridge extension
enable_extension("omni.isaac.ros_bridge")
simulation_app.update()
# check if rosmaster node is running
# this is to prevent this sample from waiting indefinetly if roscore is not running
# can be removed in regular usage
import rosgraph
if not rosgraph.is_master_online():
carb.log_error("Please run roscore before executing this script")
simulation_app.close()
exit()
# Locate Isaac Sim assets folder to load environment and robot stages
assets_root_path = nucleus.get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
simulation_app.close()
sys.exit()
simulation_app.update()
# Loading the simple_room environment
stage.add_reference_to_stage(
assets_root_path + "/Isaac/Environments/Simple_Warehouse/full_warehouse.usd", "/background"
)
simulation_app.update()
# Create the lidar sensor that generates data into "RtxSensorCpu"
# Sensor needs to be rotated 90 degrees about X so that its Z up
# Possible options are Example_Rotary and Example_Solid_State
# drive sim applies 0.5,-0.5,-0.5,w(-0.5), we have to apply the reverse
_, sensor = omni.kit.commands.execute(
"IsaacSensorCreateRtxLidar",
path="/sensor",
parent=None,
config="Example_Rotary",
translation=(0, 0, 1.0),
orientation=Gf.Quatd(1.0, 0.0, 0.0, 0.0), # Gf.Quatd is w,i,j,k
)
hydra_texture = rep.create.render_product(sensor.GetPath(), [1, 1], name="Isaac")
simulation_context = SimulationContext(physics_dt=1.0 / 60.0, rendering_dt=1.0 / 60.0, stage_units_in_meters=1.0)
simulation_app.update()
# Create Point cloud publisher pipeline in the post process graph
writer = rep.writers.get("RtxLidar" + "ROS1PublishPointCloud")
writer.initialize(topicName="point_cloud", frameId="sim_lidar")
writer.attach([hydra_texture])
# Create the debug draw pipeline in the post process graph
writer = rep.writers.get("RtxLidar" + "DebugDrawPointCloud")
writer.attach([hydra_texture])
# Create LaserScan publisher pipeline in the post process graph
writer = rep.writers.get("RtxLidar" + "ROS1PublishLaserScan")
writer.initialize(topicName="laser_scan", frameId="sim_lidar")
writer.attach([hydra_texture])
simulation_app.update()
simulation_context.play()
while simulation_app.is_running():
simulation_app.update()
# cleanup and shutdown
simulation_context.stop()
simulation_app.close()
| 3,269 | Python | 32.71134 | 113 | 0.762619 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.ros_bridge/clock.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import argparse
import time
import carb
from omni.isaac.kit import SimulationApp
parser = argparse.ArgumentParser(description="ROS Clock Example")
parser.add_argument("--test", action="store_true")
args, unknown = parser.parse_known_args()
# Example ROS bridge sample showing rospy and rosclock interaction
simulation_app = SimulationApp({"renderer": "RayTracedLighting", "headless": True})
import omni
import omni.graph.core as og
from omni.isaac.core import SimulationContext
from omni.isaac.core.utils.extensions import enable_extension
if args.test:
from omni.isaac.ros_bridge.scripts.roscore import Roscore
from omni.isaac.ros_bridge.tests.common import wait_for_rosmaster
roscore = Roscore()
wait_for_rosmaster()
# enable ROS bridge extension
enable_extension("omni.isaac.ros_bridge")
simulation_app.update()
# check if rosmaster node is running
# this is to prevent this sample from waiting indefinetly if roscore is not running
# can be removed in regular usage
import rosgraph
if not rosgraph.is_master_online():
carb.log_error("Please run roscore before executing this script")
simulation_app.close()
exit()
import rospy
# Note that this is not the system level rospy, but one compiled for omniverse
from rosgraph_msgs.msg import Clock
clock_topic = "sim_time"
manual_clock_topic = "manual_time"
simulation_context = SimulationContext(physics_dt=1.0 / 60.0, rendering_dt=1.0 / 60.0, stage_units_in_meters=1.0)
# Creating a action graph with ROS component nodes
try:
og.Controller.edit(
{"graph_path": "/ActionGraph", "evaluator_name": "execution"},
{
og.Controller.Keys.CREATE_NODES: [
("ReadSimTime", "omni.isaac.core_nodes.IsaacReadSimulationTime"),
("OnPlaybackTick", "omni.graph.action.OnPlaybackTick"),
("PublishClock", "omni.isaac.ros_bridge.ROS1PublishClock"),
("OnImpulseEvent", "omni.graph.action.OnImpulseEvent"),
("PublishManualClock", "omni.isaac.ros_bridge.ROS1PublishClock"),
],
og.Controller.Keys.CONNECT: [
# Connecting execution of OnPlaybackTick node to PublishClock to automatically publish each frame
("OnPlaybackTick.outputs:tick", "PublishClock.inputs:execIn"),
# Connecting execution of OnImpulseEvent node to PublishManualClock so it will only publish when an impulse event is triggered
("OnImpulseEvent.outputs:execOut", "PublishManualClock.inputs:execIn"),
# Connecting simulationTime data of ReadSimTime to the clock publisher nodes
("ReadSimTime.outputs:simulationTime", "PublishClock.inputs:timeStamp"),
("ReadSimTime.outputs:simulationTime", "PublishManualClock.inputs:timeStamp"),
],
og.Controller.Keys.SET_VALUES: [
# Assigning topic names to clock publishers
("PublishClock.inputs:topicName", clock_topic),
("PublishManualClock.inputs:topicName", manual_clock_topic),
],
},
)
except Exception as e:
print(e)
simulation_app.update()
simulation_app.update()
# Define ROS callbacks
def sim_clock_callback(data):
print("sim time:", data.clock.to_sec())
def manual_clock_callback(data):
print("manual stepped sim time:", data.clock.to_sec())
# Create rospy ndoe
rospy.init_node("isaac_sim_clock", anonymous=True, disable_signals=True, log_level=rospy.ERROR)
# create subscribers
sim_clock_sub = rospy.Subscriber(clock_topic, Clock, sim_clock_callback)
manual_clock_sub = rospy.Subscriber(manual_clock_topic, Clock, manual_clock_callback)
time.sleep(1.0)
# need to initialize physics getting any articulation..etc
simulation_context.initialize_physics()
simulation_context.play()
# perform a fixed number of steps with fixed step size
for frame in range(20):
# publish manual clock every 10 frames
if frame % 10 == 0:
og.Controller.set(og.Controller.attribute("/ActionGraph/OnImpulseEvent.state:enableImpulse"), True)
simulation_context.render() # This updates rendering/app loop which calls the sim clock
simulation_context.step(render=False) # runs with a non-realtime clock
# This sleep is to make this sample run a bit more deterministically for the subscriber callback
# In general this sleep is not needed
time.sleep(0.1)
# perform a fixed number of steps with realtime clock
for frame in range(20):
# publish manual clock every 10 frames
if frame % 10 == 0:
og.Controller.set(og.Controller.attribute("/ActionGraph/OnImpulseEvent.state:enableImpulse"), True)
simulation_app.update() # runs with a realtime clock
# This sleep is to make this sample run a bit more deterministically for the subscriber callback
# In general this sleep is not needed
time.sleep(0.1)
# cleanup and shutdown
sim_clock_sub.unregister()
manual_clock_sub.unregister()
simulation_context.stop()
if args.test:
roscore = None
simulation_app.close()
| 5,492 | Python | 35.865772 | 142 | 0.716497 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.ros_bridge/carter_stereo.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import argparse
import carb
from omni.isaac.kit import SimulationApp
parser = argparse.ArgumentParser(description="Carter Stereo Example")
parser.add_argument("--test", action="store_true")
args, unknown = parser.parse_known_args()
# Example ROS bridge sample showing manual control over messages
simulation_app = SimulationApp({"renderer": "RayTracedLighting", "headless": False})
import omni
import omni.graph.core as og
from omni.isaac.core import SimulationContext
from omni.isaac.core.utils.extensions import enable_extension
from omni.isaac.core.utils.nucleus import get_assets_root_path
from pxr import Sdf
# enable ROS bridge extension
enable_extension("omni.isaac.ros_bridge")
# check if rosmaster node is running
# this is to prevent this sample from waiting indefinetly if roscore is not running
# can be removed in regular usage
import rosgraph
if not rosgraph.is_master_online():
carb.log_error("Please run roscore before executing this script")
simulation_app.close()
exit()
# Locate assets root folder to load sample
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
simulation_app.close()
exit()
usd_path = assets_root_path + "/Isaac/Samples/ROS/Scenario/carter_warehouse_navigation.usd"
omni.usd.get_context().open_stage(usd_path, None)
# Wait two frames so that stage starts loading
simulation_app.update()
simulation_app.update()
print("Loading stage...")
from omni.isaac.core.utils.stage import is_stage_loading
while is_stage_loading():
simulation_app.update()
print("Loading Complete")
simulation_context = SimulationContext(stage_units_in_meters=1.0)
ros_cameras_graph_path = "/World/Carter_ROS/ROS_Cameras"
# Enabling rgb and depth image publishers for left camera. Cameras will automatically publish images each frame
og.Controller.set(
og.Controller.attribute(ros_cameras_graph_path + "/isaac_create_render_product_left.inputs:enabled"), True
)
# Enabling rgb and depth image publishers for right camera. Cameras will automatically publish images each frame
og.Controller.set(
og.Controller.attribute(ros_cameras_graph_path + "/isaac_create_render_product_right.inputs:enabled"), True
)
simulation_context.play()
simulation_context.step()
# Simulate for one second to warm up sim and let everything settle
for frame in range(60):
simulation_context.step()
# Dock the second camera window
left_viewport = omni.ui.Workspace.get_window("Viewport")
right_viewport = omni.ui.Workspace.get_window("Viewport 2")
if right_viewport is not None and left_viewport is not None:
right_viewport.dock_in(left_viewport, omni.ui.DockPosition.RIGHT)
right_viewport = None
left_viewport = None
import rosgraph
if not rosgraph.is_master_online():
carb.log_error("Please run roscore before executing this script")
simulation_app.close()
exit()
import rospy
# Create a rostopic to publish message to spin robot in place
# Note that this is not the system level rospy, but one compiled for omniverse
from geometry_msgs.msg import Twist
rospy.init_node("carter_stereo", anonymous=True, disable_signals=True, log_level=rospy.ERROR)
pub = rospy.Publisher("cmd_vel", Twist, queue_size=10)
frame = 0
while simulation_app.is_running():
# Run with a fixed step size
simulation_context.step(render=True)
# Publish the ROS Twist message every 2 frames
if frame % 2 == 0:
message = Twist()
message.angular.z = 0.5 # spin in place
pub.publish(message)
if args.test and frame > 120:
break
frame = frame + 1
pub.unregister()
rospy.signal_shutdown("carter_stereo complete")
simulation_context.stop()
simulation_app.close()
| 4,162 | Python | 32.039682 | 112 | 0.760692 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.ros_bridge/contact.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import carb
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"renderer": "RayTracedLighting", "headless": True})
import omni
import omni.kit.commands
from omni.isaac.core import World
from omni.isaac.core.objects import DynamicCuboid
from omni.isaac.core.utils.extensions import enable_extension
from omni.isaac.sensor import _sensor
from pxr import Gf
# enable ROS bridge extension
enable_extension("omni.isaac.ros_bridge")
# check if rosmaster node is running
# this is to prevent this sample from waiting indefinetly if roscore is not running
# can be removed in regular usage
import rosgraph
if not rosgraph.is_master_online():
carb.log_error("Please run roscore before executing this script")
simulation_app.close()
exit()
# Note that this is not the system level rospy, but one compiled for omniverse
import numpy as np
import rospy
try:
from isaac_tutorials.msg import ContactSensor
except ModuleNotFoundError:
carb.log_error("isaac_tutorials message definition was not found, please source the ros workspace")
simulation_app.close()
exit()
rospy.init_node("contact_sample", anonymous=True, disable_signals=True, log_level=rospy.ERROR)
timeline = omni.timeline.get_timeline_interface()
contact_pub = rospy.Publisher("/contact_report", ContactSensor, queue_size=0)
cs = _sensor.acquire_contact_sensor_interface()
meters_per_unit = 1.0
ros_world = World(stage_units_in_meters=1.0)
# add a cube in the world
cube_path = "/cube"
cube_1 = ros_world.scene.add(
DynamicCuboid(prim_path=cube_path, name="cube_1", position=np.array([0, 0, 1.5]), size=1.0)
)
simulation_app.update()
# Add a plane for cube to collide with
ros_world.scene.add_default_ground_plane()
simulation_app.update()
# putting contact sensor in the ContactSensor Message format
def format_contact(c_out, contact):
c_out.time = float(contact["time"])
c_out.value = float(contact["value"] * meters_per_unit)
c_out.in_contact = bool(contact["inContact"])
return c_out
# Setup contact sensor on cube
result, sensor = omni.kit.commands.execute(
"IsaacSensorCreateContactSensor",
path="/Contact_Sensor",
parent=cube_path,
min_threshold=0,
max_threshold=100000000,
color=Gf.Vec4f(1, 1, 1, 1),
radius=-1,
sensor_period=1.0 / 60.0,
translation=Gf.Vec3d(0, 0, 0),
)
simulation_app.update()
# initiate the message handle
c_out = ContactSensor()
# start simulation
timeline.play()
for frame in range(10000):
ros_world.step(render=False)
# Get processed contact data
reading = cs.get_sensor_readings(cube_path + "/Contact_Sensor")
if reading.shape[0]:
for r in reading:
print(r)
# pack the raw data into ContactSensor format and publish it
c = format_contact(c_out, r)
contact_pub.publish(c)
# Cleanup
timeline.stop()
contact_pub.unregister()
rospy.signal_shutdown("contact_sample complete")
simulation_app.close()
| 3,408 | Python | 28.136752 | 103 | 0.734742 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.ros_bridge/camera_manual.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import sys
import carb
from omni.isaac.kit import SimulationApp
CAMERA_STAGE_PATH = "/Camera"
ROS_CAMERA_GRAPH_PATH = "/ROS_Camera"
BACKGROUND_STAGE_PATH = "/background"
BACKGROUND_USD_PATH = "/Isaac/Environments/Simple_Warehouse/warehouse_with_forklifts.usd"
CONFIG = {"renderer": "RayTracedLighting", "headless": False}
# Example ROS bridge sample demonstrating the manual loading of stages and manual publishing of images
simulation_app = SimulationApp(CONFIG)
import omni
import omni.graph.core as og
import omni.replicator.core as rep
import usdrt.Sdf
from omni.isaac.core import SimulationContext
from omni.isaac.core.utils import extensions, nucleus, stage
from omni.kit.viewport.utility import get_active_viewport
from pxr import Gf, Usd, UsdGeom
# enable ROS bridge extension
extensions.enable_extension("omni.isaac.ros_bridge")
simulation_app.update()
# check if rosmaster node is running
# this is to prevent this sample from waiting indefinetly if roscore is not running
# can be removed in regular usage
import rosgraph
if not rosgraph.is_master_online():
carb.log_error("Please run roscore before executing this script")
simulation_app.close()
exit()
simulation_context = SimulationContext(stage_units_in_meters=1.0)
# Locate Isaac Sim assets folder to load environment and robot stages
assets_root_path = nucleus.get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
simulation_app.close()
sys.exit()
# Loading the simple_room environment
stage.add_reference_to_stage(assets_root_path + BACKGROUND_USD_PATH, BACKGROUND_STAGE_PATH)
# Creating a Camera prim
camera_prim = UsdGeom.Camera(omni.usd.get_context().get_stage().DefinePrim(CAMERA_STAGE_PATH, "Camera"))
xform_api = UsdGeom.XformCommonAPI(camera_prim)
xform_api.SetTranslate(Gf.Vec3d(-1, 5, 1))
xform_api.SetRotate((90, 0, 0), UsdGeom.XformCommonAPI.RotationOrderXYZ)
camera_prim.GetHorizontalApertureAttr().Set(21)
camera_prim.GetVerticalApertureAttr().Set(16)
camera_prim.GetProjectionAttr().Set("perspective")
camera_prim.GetFocalLengthAttr().Set(24)
camera_prim.GetFocusDistanceAttr().Set(400)
simulation_app.update()
# Creating an on-demand push graph with cameraHelper nodes to generate ROS image publishers
keys = og.Controller.Keys
(ros_camera_graph, _, _, _) = og.Controller.edit(
{
"graph_path": ROS_CAMERA_GRAPH_PATH,
"evaluator_name": "push",
"pipeline_stage": og.GraphPipelineStage.GRAPH_PIPELINE_STAGE_ONDEMAND,
},
{
keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("createViewport", "omni.isaac.core_nodes.IsaacCreateViewport"),
("getRenderProduct", "omni.isaac.core_nodes.IsaacGetViewportRenderProduct"),
("setCamera", "omni.isaac.core_nodes.IsaacSetCameraOnRenderProduct"),
("cameraHelperRgb", "omni.isaac.ros_bridge.ROS1CameraHelper"),
("cameraHelperInfo", "omni.isaac.ros_bridge.ROS1CameraHelper"),
("cameraHelperDepth", "omni.isaac.ros_bridge.ROS1CameraHelper"),
],
keys.CONNECT: [
("OnTick.outputs:tick", "createViewport.inputs:execIn"),
("createViewport.outputs:execOut", "getRenderProduct.inputs:execIn"),
("createViewport.outputs:viewport", "getRenderProduct.inputs:viewport"),
("getRenderProduct.outputs:execOut", "setCamera.inputs:execIn"),
("getRenderProduct.outputs:renderProductPath", "setCamera.inputs:renderProductPath"),
("setCamera.outputs:execOut", "cameraHelperRgb.inputs:execIn"),
("setCamera.outputs:execOut", "cameraHelperInfo.inputs:execIn"),
("setCamera.outputs:execOut", "cameraHelperDepth.inputs:execIn"),
("getRenderProduct.outputs:renderProductPath", "cameraHelperRgb.inputs:renderProductPath"),
("getRenderProduct.outputs:renderProductPath", "cameraHelperInfo.inputs:renderProductPath"),
("getRenderProduct.outputs:renderProductPath", "cameraHelperDepth.inputs:renderProductPath"),
],
keys.SET_VALUES: [
("createViewport.inputs:viewportId", 0),
("cameraHelperRgb.inputs:frameId", "sim_camera"),
("cameraHelperRgb.inputs:topicName", "rgb"),
("cameraHelperRgb.inputs:type", "rgb"),
("cameraHelperInfo.inputs:frameId", "sim_camera"),
("cameraHelperInfo.inputs:topicName", "camera_info"),
("cameraHelperInfo.inputs:type", "camera_info"),
("cameraHelperDepth.inputs:frameId", "sim_camera"),
("cameraHelperDepth.inputs:topicName", "depth"),
("cameraHelperDepth.inputs:type", "depth"),
("setCamera.inputs:cameraPrim", [usdrt.Sdf.Path(CAMERA_STAGE_PATH)]),
],
},
)
# Run the ROS Camera graph once to generate ROS image publishers in SDGPipeline
og.Controller.evaluate_sync(ros_camera_graph)
simulation_app.update()
# Use the IsaacSimulationGate step value to block execution on specific frames
SD_GRAPH_PATH = "/Render/PostProcess/SDGPipeline"
viewport_api = get_active_viewport()
if viewport_api is not None:
import omni.syntheticdata._syntheticdata as sd
curr_stage = omni.usd.get_context().get_stage()
# Required for editing the SDGPipeline graph which exists in the Session Layer
with Usd.EditContext(curr_stage, curr_stage.GetSessionLayer()):
# Get name of rendervar for RGB sensor type
rv_rgb = omni.syntheticdata.SyntheticData.convert_sensor_type_to_rendervar(sd.SensorType.Rgb.name)
# Get path to IsaacSimulationGate node in RGB pipeline
rgb_camera_gate_path = omni.syntheticdata.SyntheticData._get_node_path(
rv_rgb + "IsaacSimulationGate", viewport_api.get_render_product_path()
)
rv_depth = omni.syntheticdata.SyntheticData.convert_sensor_type_to_rendervar(
sd.SensorType.DistanceToImagePlane.name
)
# Get path to IsaacSimulationGate node in Depth pipeline
depth_camera_gate_path = omni.syntheticdata.SyntheticData._get_node_path(
rv_depth + "IsaacSimulationGate", viewport_api.get_render_product_path()
)
# Get path to IsaacSimulationGate node in CameraInfo pipeline
camera_info_gate_path = omni.syntheticdata.SyntheticData._get_node_path(
"PostProcessDispatch" + "IsaacSimulationGate", viewport_api.get_render_product_path()
)
# Need to initialize physics getting any articulation..etc
simulation_context.initialize_physics()
simulation_context.play()
frame = 0
while simulation_app.is_running() and simulation_context.is_playing():
# Run with a fixed step size
simulation_context.step(render=True)
if simulation_context.is_playing():
# Rotate camera by 0.5 degree every frame
xform_api.SetRotate((90, 0, frame / 4.0), UsdGeom.XformCommonAPI.RotationOrderXYZ)
# Set the step value for the simulation gates to zero to stop execution
og.Controller.attribute(rgb_camera_gate_path + ".inputs:step").set(0)
og.Controller.attribute(depth_camera_gate_path + ".inputs:step").set(0)
og.Controller.attribute(camera_info_gate_path + ".inputs:step").set(0)
# Publish the ROS rgb image message every 5 frames
if frame % 5 == 0:
# Enable rgb Branch node to start publishing rgb image
og.Controller.attribute(rgb_camera_gate_path + ".inputs:step").set(1)
# Publish the ROS Depth image message every 60 frames
if frame % 60 == 0:
# Enable depth Branch node to start publishing depth image
og.Controller.attribute(depth_camera_gate_path + ".inputs:step").set(1)
# Publish the ROS Camera Info message every frame
og.Controller.attribute(camera_info_gate_path + ".inputs:step").set(1)
frame = frame + 1
simulation_context.stop()
simulation_app.close()
| 8,384 | Python | 41.563452 | 106 | 0.707896 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.ros_bridge/camera_periodic.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import sys
import carb
from omni.isaac.kit import SimulationApp
CAMERA_STAGE_PATH = "/Camera"
ROS_CAMERA_GRAPH_PATH = "/ROS_Camera"
BACKGROUND_STAGE_PATH = "/background"
BACKGROUND_USD_PATH = "/Isaac/Environments/Simple_Warehouse/warehouse_with_forklifts.usd"
CONFIG = {"renderer": "RayTracedLighting", "headless": False}
simulation_app = SimulationApp(CONFIG)
import omni
import omni.graph.core as og
import usdrt.Sdf
from omni.isaac.core import SimulationContext
from omni.isaac.core.utils import extensions, nucleus, stage
from omni.kit.viewport.utility import get_active_viewport
from pxr import Gf, Usd, UsdGeom
# enable ROS bridge extension
extensions.enable_extension("omni.isaac.ros_bridge")
simulation_app.update()
# check if rosmaster node is running
# this is to prevent this sample from waiting indefinetly if roscore is not running
# can be removed in regular usage
import rosgraph
if not rosgraph.is_master_online():
carb.log_error("Please run roscore before executing this script")
simulation_app.close()
exit()
simulation_context = SimulationContext(stage_units_in_meters=1.0)
# Locate Isaac Sim assets folder to load environment and robot stages
assets_root_path = nucleus.get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
simulation_app.close()
sys.exit()
# Loading the simple_room environment
stage.add_reference_to_stage(assets_root_path + BACKGROUND_USD_PATH, BACKGROUND_STAGE_PATH)
# Creating a Camera prim
camera_prim = UsdGeom.Camera(omni.usd.get_context().get_stage().DefinePrim(CAMERA_STAGE_PATH, "Camera"))
xform_api = UsdGeom.XformCommonAPI(camera_prim)
xform_api.SetTranslate(Gf.Vec3d(-1, 5, 1))
xform_api.SetRotate((90, 0, 0), UsdGeom.XformCommonAPI.RotationOrderXYZ)
camera_prim.GetHorizontalApertureAttr().Set(21)
camera_prim.GetVerticalApertureAttr().Set(16)
camera_prim.GetProjectionAttr().Set("perspective")
camera_prim.GetFocalLengthAttr().Set(24)
camera_prim.GetFocusDistanceAttr().Set(400)
simulation_app.update()
# Creating an on-demand push graph with cameraHelper nodes to generate ROS image publishers
keys = og.Controller.Keys
(ros_camera_graph, _, _, _) = og.Controller.edit(
{
"graph_path": ROS_CAMERA_GRAPH_PATH,
"evaluator_name": "push",
"pipeline_stage": og.GraphPipelineStage.GRAPH_PIPELINE_STAGE_ONDEMAND,
},
{
keys.CREATE_NODES: [
("OnTick", "omni.graph.action.OnTick"),
("createViewport", "omni.isaac.core_nodes.IsaacCreateViewport"),
("getRenderProduct", "omni.isaac.core_nodes.IsaacGetViewportRenderProduct"),
("setCamera", "omni.isaac.core_nodes.IsaacSetCameraOnRenderProduct"),
("cameraHelperRgb", "omni.isaac.ros_bridge.ROS1CameraHelper"),
("cameraHelperInfo", "omni.isaac.ros_bridge.ROS1CameraHelper"),
("cameraHelperDepth", "omni.isaac.ros_bridge.ROS1CameraHelper"),
],
keys.CONNECT: [
("OnTick.outputs:tick", "createViewport.inputs:execIn"),
("createViewport.outputs:execOut", "getRenderProduct.inputs:execIn"),
("createViewport.outputs:viewport", "getRenderProduct.inputs:viewport"),
("getRenderProduct.outputs:execOut", "setCamera.inputs:execIn"),
("getRenderProduct.outputs:renderProductPath", "setCamera.inputs:renderProductPath"),
("setCamera.outputs:execOut", "cameraHelperRgb.inputs:execIn"),
("setCamera.outputs:execOut", "cameraHelperInfo.inputs:execIn"),
("setCamera.outputs:execOut", "cameraHelperDepth.inputs:execIn"),
("getRenderProduct.outputs:renderProductPath", "cameraHelperRgb.inputs:renderProductPath"),
("getRenderProduct.outputs:renderProductPath", "cameraHelperInfo.inputs:renderProductPath"),
("getRenderProduct.outputs:renderProductPath", "cameraHelperDepth.inputs:renderProductPath"),
],
keys.SET_VALUES: [
("createViewport.inputs:viewportId", 0),
("cameraHelperRgb.inputs:frameId", "sim_camera"),
("cameraHelperRgb.inputs:topicName", "rgb"),
("cameraHelperRgb.inputs:type", "rgb"),
("cameraHelperInfo.inputs:frameId", "sim_camera"),
("cameraHelperInfo.inputs:topicName", "camera_info"),
("cameraHelperInfo.inputs:type", "camera_info"),
("cameraHelperDepth.inputs:frameId", "sim_camera"),
("cameraHelperDepth.inputs:topicName", "depth"),
("cameraHelperDepth.inputs:type", "depth"),
("setCamera.inputs:cameraPrim", [usdrt.Sdf.Path(CAMERA_STAGE_PATH)]),
],
},
)
# Run the ROS Camera graph once to generate ROS image publishers in SDGPipeline
og.Controller.evaluate_sync(ros_camera_graph)
simulation_app.update()
# Inside the SDGPipeline graph, Isaac Simulation Gate nodes are added to control the execution rate of each of the ROS image and camera info publishers.
# By default the step input of each Isaac Simulation Gate node is set to a value of 1 to execute every frame.
# We can change this value to N for each Isaac Simulation Gate node individually to publish every N number of frames.
viewport_api = get_active_viewport()
if viewport_api is not None:
import omni.syntheticdata._syntheticdata as sd
# Get name of rendervar for RGB sensor type
rv_rgb = omni.syntheticdata.SyntheticData.convert_sensor_type_to_rendervar(sd.SensorType.Rgb.name)
# Get path to IsaacSimulationGate node in RGB pipeline
rgb_camera_gate_path = omni.syntheticdata.SyntheticData._get_node_path(
rv_rgb + "IsaacSimulationGate", viewport_api.get_render_product_path()
)
# Get name of rendervar for DistanceToImagePlane sensor type
rv_depth = omni.syntheticdata.SyntheticData.convert_sensor_type_to_rendervar(
sd.SensorType.DistanceToImagePlane.name
)
# Get path to IsaacSimulationGate node in Depth pipeline
depth_camera_gate_path = omni.syntheticdata.SyntheticData._get_node_path(
rv_depth + "IsaacSimulationGate", viewport_api.get_render_product_path()
)
# Get path to IsaacSimulationGate node in CameraInfo pipeline
camera_info_gate_path = omni.syntheticdata.SyntheticData._get_node_path(
"PostProcessDispatch" + "IsaacSimulationGate", viewport_api.get_render_product_path()
)
# Set Rgb execution step to 5 frames
rgb_step_size = 5
# Set Depth execution step to 60 frames
depth_step_size = 60
# Set Camera info execution step to every frame
info_step_size = 1
# Set step input of the Isaac Simulation Gate nodes upstream of ROS publishers to control their execution rate
og.Controller.attribute(rgb_camera_gate_path + ".inputs:step").set(rgb_step_size)
og.Controller.attribute(depth_camera_gate_path + ".inputs:step").set(depth_step_size)
og.Controller.attribute(camera_info_gate_path + ".inputs:step").set(info_step_size)
# Need to initialize physics getting any articulation..etc
simulation_context.initialize_physics()
simulation_context.play()
frame = 0
while simulation_app.is_running():
# Run with a fixed step size
simulation_context.step(render=True)
if simulation_context.is_playing():
# Rotate camera by 0.5 degree every frame
xform_api.SetRotate((90, 0, frame / 4.0), UsdGeom.XformCommonAPI.RotationOrderXYZ)
frame = frame + 1
simulation_context.stop()
simulation_app.close()
| 7,905 | Python | 41.278075 | 152 | 0.721189 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.ros_bridge/camera_noise.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import sys
import carb
from omni.isaac.kit import SimulationApp
CAMERA_STAGE_PATH = "/Camera"
ROS_CAMERA_GRAPH_PATH = "/ROS_Camera"
BACKGROUND_STAGE_PATH = "/background"
BACKGROUND_USD_PATH = "/Isaac/Environments/Simple_Warehouse/warehouse_with_forklifts.usd"
CONFIG = {"renderer": "RayTracedLighting", "headless": False}
simulation_app = SimulationApp(CONFIG)
import numpy as np
import omni
import omni.graph.core as og
import omni.replicator.core as rep
import omni.syntheticdata._syntheticdata as sd
import warp as wp
from omni.isaac.core import SimulationContext
from omni.isaac.core.utils import extensions, nucleus, stage
from omni.isaac.core.utils.render_product import set_camera_prim_path
from omni.kit.viewport.utility import get_active_viewport
from pxr import Gf, Usd, UsdGeom
# enable ROS bridge extension
extensions.enable_extension("omni.isaac.ros_bridge")
simulation_app.update()
# check if rosmaster node is running
# this is to prevent this sample from waiting indefinetly if roscore is not running
# can be removed in regular usage
import rosgraph
if not rosgraph.is_master_online():
carb.log_error("Please run roscore before executing this script")
simulation_app.close()
exit()
simulation_context = SimulationContext(stage_units_in_meters=1.0)
# Locate Isaac Sim assets folder to load environment and robot stages
assets_root_path = nucleus.get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
simulation_app.close()
sys.exit()
# Loading the simple_room environment
stage.add_reference_to_stage(assets_root_path + BACKGROUND_USD_PATH, BACKGROUND_STAGE_PATH)
# Creating a Camera prim
camera_prim = UsdGeom.Camera(omni.usd.get_context().get_stage().DefinePrim(CAMERA_STAGE_PATH, "Camera"))
xform_api = UsdGeom.XformCommonAPI(camera_prim)
xform_api.SetTranslate(Gf.Vec3d(-1, 5, 1))
xform_api.SetRotate((90, 0, 0), UsdGeom.XformCommonAPI.RotationOrderXYZ)
camera_prim.GetHorizontalApertureAttr().Set(21)
camera_prim.GetVerticalApertureAttr().Set(16)
camera_prim.GetProjectionAttr().Set("perspective")
camera_prim.GetFocalLengthAttr().Set(24)
camera_prim.GetFocusDistanceAttr().Set(400)
simulation_app.update()
# grab our render product and directly set the camera prim
render_product_path = get_active_viewport().get_render_product_path()
set_camera_prim_path(render_product_path, CAMERA_STAGE_PATH)
# GPU Noise Kernel for illustrative purposes, input is rgba, outputs rgb
@wp.kernel
def image_gaussian_noise_warp(
data_in: wp.array3d(dtype=wp.uint8), data_out: wp.array3d(dtype=wp.uint8), seed: int, sigma: float = 0.5
):
i, j = wp.tid()
dim_i = data_out.shape[0]
dim_j = data_out.shape[1]
pixel_id = i * dim_i + j
state_r = wp.rand_init(seed, pixel_id + (dim_i * dim_j * 0))
state_g = wp.rand_init(seed, pixel_id + (dim_i * dim_j * 1))
state_b = wp.rand_init(seed, pixel_id + (dim_i * dim_j * 2))
data_out[i, j, 0] = wp.uint8(float(data_in[i, j, 0]) + (255.0 * sigma * wp.randn(state_r)))
data_out[i, j, 1] = wp.uint8(float(data_in[i, j, 1]) + (255.0 * sigma * wp.randn(state_g)))
data_out[i, j, 2] = wp.uint8(float(data_in[i, j, 2]) + (255.0 * sigma * wp.randn(state_b)))
# register new augmented annotator that adds noise to rgba and then outputs to rgb to the ROS publisher can publish
rep.annotators.register(
name="rgb_gaussian_noise",
annotator=rep.annotators.augment_compose(
source_annotator=rep.annotators.get("rgb", device="cuda"),
augmentations=[
rep.annotators.Augmentation.from_function(
image_gaussian_noise_warp, sigma=0.1, seed=1234, data_out_shape=(-1, -1, 3)
),
],
),
)
# Create a new writer with the augmented image
rep.writers.register_node_writer(
name=f"CustomROS1PublishImage",
node_type_id="omni.isaac.ros_bridge.ROS1PublishImage",
annotators=[
"rgb_gaussian_noise",
omni.syntheticdata.SyntheticData.NodeConnectionTemplate(
"IsaacReadSimulationTime", attributes_mapping={"outputs:simulationTime": "inputs:timeStamp"}
),
],
category="custom",
)
# Create the new writer and attach to our render product
writer = rep.writers.get(f"CustomROS1PublishImage")
writer.initialize(topicName="rgb_augmented", frameId="sim_camera")
writer.attach([render_product_path])
simulation_app.update()
# Need to initialize physics getting any articulation..etc
simulation_context.initialize_physics()
simulation_context.play()
frame = 0
while simulation_app.is_running():
# Run with a fixed step size
simulation_context.step(render=True)
if simulation_context.is_playing():
# Rotate camera by 0.5 degree every frame
xform_api.SetRotate((90, 0, frame / 4.0), UsdGeom.XformCommonAPI.RotationOrderXYZ)
frame = frame + 1
simulation_context.stop()
simulation_app.close()
| 5,338 | Python | 35.319728 | 115 | 0.729299 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.ros_bridge/moveit.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import sys
import carb
import numpy as np
from omni.isaac.kit import SimulationApp
FRANKA_STAGE_PATH = "/Franka"
FRANKA_USD_PATH = "/Isaac/Robots/Franka/franka_alt_fingers.usd"
BACKGROUND_STAGE_PATH = "/background"
BACKGROUND_USD_PATH = "/Isaac/Environments/Simple_Room/simple_room.usd"
CONFIG = {"renderer": "RayTracedLighting", "headless": False}
# Example ROS bridge sample demonstrating the manual loading of stages
# and creation of ROS components
simulation_app = SimulationApp(CONFIG)
import omni.graph.core as og
import usdrt.Sdf
from omni.isaac.core import SimulationContext
from omni.isaac.core.utils import extensions, nucleus, prims, rotations, stage, viewports
from pxr import Gf
# enable ROS bridge extension
extensions.enable_extension("omni.isaac.ros_bridge")
simulation_app.update()
# check if rosmaster node is running
# this is to prevent this sample from waiting indefinetly if roscore is not running
# can be removed in regular usage
import rosgraph
if not rosgraph.is_master_online():
carb.log_error("Please run roscore before executing this script")
simulation_app.close()
exit()
simulation_context = SimulationContext(stage_units_in_meters=1.0)
# Locate Isaac Sim assets folder to load environment and robot stages
assets_root_path = nucleus.get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
simulation_app.close()
sys.exit()
# Preparing stage
viewports.set_camera_view(eye=np.array([1.2, 1.2, 0.8]), target=np.array([0, 0, 0.5]))
# Loading the simple_room environment
stage.add_reference_to_stage(assets_root_path + BACKGROUND_USD_PATH, BACKGROUND_STAGE_PATH)
# Loading the franka robot USD
prims.create_prim(
FRANKA_STAGE_PATH,
"Xform",
position=np.array([0, -0.64, 0]),
orientation=rotations.gf_rotation_to_np_array(Gf.Rotation(Gf.Vec3d(0, 0, 1), 90)),
usd_path=assets_root_path + FRANKA_USD_PATH,
)
simulation_app.update()
# Creating a action graph with ROS component nodes
try:
og.Controller.edit(
{"graph_path": "/ActionGraph", "evaluator_name": "execution"},
{
og.Controller.Keys.CREATE_NODES: [
("OnImpulseEvent", "omni.graph.action.OnImpulseEvent"),
("ReadSimTime", "omni.isaac.core_nodes.IsaacReadSimulationTime"),
("PublishJointState", "omni.isaac.ros_bridge.ROS1PublishJointState"),
("SubscribeJointState", "omni.isaac.ros_bridge.ROS1SubscribeJointState"),
("ArticulationController", "omni.isaac.core_nodes.IsaacArticulationController"),
("PublishTF", "omni.isaac.ros_bridge.ROS1PublishTransformTree"),
("PublishClock", "omni.isaac.ros_bridge.ROS1PublishClock"),
],
og.Controller.Keys.CONNECT: [
("OnImpulseEvent.outputs:execOut", "PublishJointState.inputs:execIn"),
("OnImpulseEvent.outputs:execOut", "SubscribeJointState.inputs:execIn"),
("OnImpulseEvent.outputs:execOut", "PublishTF.inputs:execIn"),
("OnImpulseEvent.outputs:execOut", "PublishClock.inputs:execIn"),
("OnImpulseEvent.outputs:execOut", "ArticulationController.inputs:execIn"),
("ReadSimTime.outputs:simulationTime", "PublishJointState.inputs:timeStamp"),
("ReadSimTime.outputs:simulationTime", "PublishClock.inputs:timeStamp"),
("ReadSimTime.outputs:simulationTime", "PublishTF.inputs:timeStamp"),
("SubscribeJointState.outputs:jointNames", "ArticulationController.inputs:jointNames"),
("SubscribeJointState.outputs:positionCommand", "ArticulationController.inputs:positionCommand"),
("SubscribeJointState.outputs:velocityCommand", "ArticulationController.inputs:velocityCommand"),
("SubscribeJointState.outputs:effortCommand", "ArticulationController.inputs:effortCommand"),
],
og.Controller.Keys.SET_VALUES: [
# Setting the /Franka target prim to Articulation Controller node
("ArticulationController.inputs:usePath", True),
("ArticulationController.inputs:robotPath", FRANKA_STAGE_PATH),
("PublishJointState.inputs:targetPrim", [usdrt.Sdf.Path(FRANKA_STAGE_PATH)]),
("PublishTF.inputs:targetPrims", [usdrt.Sdf.Path(FRANKA_STAGE_PATH)]),
],
},
)
except Exception as e:
print(e)
simulation_app.update()
# need to initialize physics getting any articulation..etc
simulation_context.initialize_physics()
simulation_context.play()
while simulation_app.is_running():
# Run with a fixed step size
simulation_context.step(render=True)
# Tick the Publish/Subscribe JointState, Publish TF and Publish Clock nodes each frame
og.Controller.set(og.Controller.attribute("/ActionGraph/OnImpulseEvent.state:enableImpulse"), True)
simulation_context.stop()
simulation_app.close()
| 5,436 | Python | 40.503816 | 113 | 0.706586 |
2820207922/isaac_ws/standalone_examples/api/omni.isaac.ros_bridge/subscriber.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import carb
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"renderer": "RayTracedLighting", "headless": False})
import omni
from omni.isaac.core import World
from omni.isaac.core.objects import VisualCuboid
from omni.isaac.core.utils.extensions import enable_extension
# enable ROS bridge extension
enable_extension("omni.isaac.ros_bridge")
simulation_app.update()
# check if rosmaster node is running
# this is to prevent this sample from waiting indefinetly if roscore is not running
# can be removed in regular usage
import rosgraph
if not rosgraph.is_master_online():
carb.log_error("Please run roscore before executing this script")
simulation_app.close()
exit()
import time
# Note that this is not the system level rospy, but one compiled for omniverse
import numpy as np
import rospy
from std_msgs.msg import Empty
class Subscriber:
def __init__(self):
# setting up the world with a cube
self.timeline = omni.timeline.get_timeline_interface()
self.ros_world = World(stage_units_in_meters=1.0)
self.ros_world.scene.add_default_ground_plane()
# add a cube in the world
cube_path = "/cube"
self.ros_world.scene.add(
VisualCuboid(prim_path=cube_path, name="cube_1", position=np.array([0, 0, 10]), size=0.2)
)
self._cube_position = np.array([0, 0, 0])
# setup the ros subscriber here
self.ros_sub = rospy.Subscriber("/move_cube", Empty, self.move_cube_callback, queue_size=10)
self.ros_world.reset()
def move_cube_callback(self, data):
# callback function to set the cube position to a new one upon receiving a (empty) ros message
if self.ros_world.is_playing():
self._cube_position = np.array([np.random.rand() * 0.40, np.random.rand() * 0.40, 0.10])
def run_simulation(self):
self.timeline.play()
while simulation_app.is_running():
self.ros_world.step(render=True)
if self.ros_world.is_playing():
if self.ros_world.current_time_step_index == 0:
self.ros_world.reset()
# the actual setting the cube pose is done here
self.ros_world.scene.get_object("cube_1").set_world_pose(self._cube_position)
# Cleanup
self.ros_sub.unregister()
rospy.signal_shutdown("subscriber example complete")
self.timeline.stop()
simulation_app.close()
if __name__ == "__main__":
rospy.init_node("tutorial_subscriber", anonymous=True, disable_signals=True, log_level=rospy.ERROR)
subscriber = Subscriber()
subscriber.run_simulation()
| 3,108 | Python | 34.329545 | 103 | 0.684685 |
codemaster0407/ICECTCI-Hackathon/README.md |
# ICECTCI-Hackathon
# PROBLEM STATEMENT
Problem Statement 3– Natural language processing
Title: AI-Assisted Learning for NVIDIA SDKs and Toolkits
Problem Statement: Develop an AI-powered language model (LLM) that assists users in
understanding and effectively using various NVIDIA SDKs (Software Development Kits) and
toolkits. The objective of this hackathon is to create an interactive and user-friendly platform
that provides comprehensive information, examples, and guidance on NVIDIA's SDKs and
toolkits. By leveraging the power of language models and NVIDIA toolkits, participants aim to
simplify the learning curve for developers and empower them to utilize NVIDIA's technologies
more efficiently.
### Chatbot_final.ipynb
This notebook can be used for inference on queries as per the user's interest.
### Evaluate_1.ipynb
Notebook to evaluate the fine-tuned models.
### FALCON7B_r32_a64_gen_tot
This directory contains the finetuned Falcon-7B LLM with PEFT adapters.
### pup_gorilla_model
This directory contains the finetuned Gorilla-7B LLM with PEFT adapters.
### DATA_EXTRACTION
This directory contains all the code and extracted data files from different sources.
| 1,197 | Markdown | 35.303029 | 97 | 0.802005 |
codemaster0407/ICECTCI-Hackathon/FALCON7B_r32_a64_gen_tot/README.md | ---
library_name: peft
---
## Training procedure
The following `bitsandbytes` quantization config was used during training:
- load_in_8bit: True
- llm_int8_threshold: 6.0
- llm_int8_skip_modules: None
- llm_int8_enable_fp32_cpu_offload: False
The following `bitsandbytes` quantization config was used during training:
- load_in_8bit: True
- llm_int8_threshold: 6.0
- llm_int8_skip_modules: None
- llm_int8_enable_fp32_cpu_offload: False
### Framework versions
- PEFT 0.5.0
- PEFT 0.5.0
| 491 | Markdown | 20.391303 | 74 | 0.735234 |
codemaster0407/ICECTCI-Hackathon/pup_gorilla_model/README.md | ---
library_name: peft
---
## Training procedure
The following `bitsandbytes` quantization config was used during training:
- load_in_8bit: True
- llm_int8_threshold: 6.0
- llm_int8_skip_modules: None
- llm_int8_enable_fp32_cpu_offload: False
The following `bitsandbytes` quantization config was used during training:
- load_in_8bit: True
- llm_int8_threshold: 6.0
- llm_int8_skip_modules: None
- llm_int8_enable_fp32_cpu_offload: False
### Framework versions
- PEFT 0.5.0
- PEFT 0.5.0
| 491 | Markdown | 20.391303 | 74 | 0.735234 |
KhaledSharif/omniverse-gym/setup.py | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from setuptools import setup, find_packages
INSTALL_REQUIRES = [
"numpy==1.23.5",
"protobuf==3.20.2",
"omegaconf==2.3.0",
"hydra-core==1.3.2",
"urllib3==1.26.16",
"rl-games==1.6.1",
"moviepy==1.0.3"
]
setup(
name="omniisaacgymenvs",
author="[email protected]",
version="1.0.0",
description="Omniverse Isaac Gym Envs for Robot Learning in NVIDIA Isaac Sim",
keywords=["robotics", "rl"],
include_package_data=True,
install_requires=INSTALL_REQUIRES,
packages=find_packages("."),
classifiers=["Natural Language :: English", "Programming Language :: Python :: 3.7, 3.8"],
zip_safe=False,
)
| 765 | Python | 25.413792 | 94 | 0.647059 |
KhaledSharif/omniverse-gym/run.py | import os
import gym
import hydra
import torch
from omegaconf import DictConfig
from omniisaacgymenvs.envs.vec_env_rlgames import VecEnvRLGames
from omniisaacgymenvs.utils.config_utils.path_utils import (
retrieve_checkpoint_path,
get_experience,
)
from omniisaacgymenvs.utils.hydra_cfg.hydra_utils import *
from omniisaacgymenvs.utils.hydra_cfg.reformat import omegaconf_to_dict, print_dict
from omniisaacgymenvs.utils.rlgames.rlgames_utils import RLGPUAlgoObserver, RLGPUEnv
from omniisaacgymenvs.utils.task_util import initialize_task
from rl_games.common import env_configurations, vecenv
from rl_games.torch_runner import Runner
class RLGTrainer:
def __init__(self, cfg, cfg_dict):
self.cfg = cfg
self.cfg_dict = cfg_dict
def launch_rlg_hydra(self, env):
# `create_rlgpu_env` is environment construction function which is passed to RL Games and called internally.
# We use the helper function here to specify the environment config.
self.cfg_dict["task"]["test"] = self.cfg.test
# register the rl-games adapter to use inside the runner
vecenv.register(
"RLGPU",
lambda config_name, num_actors, **kwargs: RLGPUEnv(
config_name, num_actors, **kwargs
),
)
env_configurations.register(
"rlgpu", {"vecenv_type": "RLGPU", "env_creator": lambda **kwargs: env}
)
self.rlg_config_dict = omegaconf_to_dict(self.cfg.train)
def run(self, module_path, experiment_dir):
self.rlg_config_dict["params"]["config"]["train_dir"] = os.path.join(
module_path, "runs"
)
# create runner and set the settings
runner = Runner(RLGPUAlgoObserver())
runner.load(self.rlg_config_dict)
runner.reset()
# dump config dict
os.makedirs(experiment_dir, exist_ok=True)
with open(os.path.join(experiment_dir, "config.yaml"), "w") as f:
f.write(OmegaConf.to_yaml(self.cfg))
runner.run(
{
"train": not self.cfg.test,
"play": self.cfg.test,
"checkpoint": self.cfg.checkpoint,
"sigma": None,
}
)
@hydra.main(version_base=None, config_name="config", config_path="./cfg")
def parse_hydra_configs(cfg: DictConfig):
headless = cfg.headless
# local rank (GPU id) in a current multi-gpu mode
local_rank = int(os.getenv("LOCAL_RANK", "0"))
# global rank (GPU id) in multi-gpu multi-node mode
global_rank = int(os.getenv("RANK", "0"))
if cfg.multi_gpu:
cfg.device_id = local_rank
cfg.rl_device = f"cuda:{local_rank}"
enable_viewport = "enable_cameras" in cfg.task.sim and cfg.task.sim.enable_cameras
# select kit app file
experience = get_experience(
headless,
cfg.enable_livestream,
enable_viewport,
cfg.enable_recording,
cfg.kit_app,
)
env = VecEnvRLGames(
headless=headless,
sim_device=cfg.device_id,
enable_livestream=cfg.enable_livestream,
enable_viewport=enable_viewport or cfg.enable_recording,
experience=experience,
)
# parse experiment directory
module_path = os.path.abspath(os.curdir)
experiment_dir = os.path.join(module_path, "runs", cfg.train.params.config.name)
# use gym RecordVideo wrapper for viewport recording
if cfg.enable_recording:
if cfg.recording_dir == "":
videos_dir = os.path.join(experiment_dir, "videos")
else:
videos_dir = cfg.recording_dir
video_interval = lambda step: step % cfg.recording_interval == 0
video_length = cfg.recording_length
env.is_vector_env = True
if env.metadata is None:
env.metadata = {
"render_modes": ["rgb_array"],
"render_fps": cfg.recording_fps,
}
else:
env.metadata["render_modes"] = ["rgb_array"]
env.metadata["render_fps"] = cfg.recording_fps
env = gym.wrappers.RecordVideo(
env,
video_folder=videos_dir,
step_trigger=video_interval,
video_length=video_length,
)
# ensure checkpoints can be specified as relative paths
if cfg.checkpoint:
cfg.checkpoint = retrieve_checkpoint_path(cfg.checkpoint)
if cfg.checkpoint is None:
quit()
cfg_dict = omegaconf_to_dict(cfg)
print_dict(cfg_dict)
from omni.isaac.core.utils.torch.maths import set_seed
cfg.seed = cfg.seed + global_rank if cfg.seed != -1 else cfg.seed
cfg.seed = set_seed(cfg.seed, torch_deterministic=cfg.torch_deterministic)
cfg_dict["seed"] = cfg.seed
initialize_task(cfg_dict, env)
torch.cuda.set_device(local_rank)
rlg_trainer = RLGTrainer(cfg, cfg_dict)
rlg_trainer.launch_rlg_hydra(env)
rlg_trainer.run(module_path, experiment_dir)
env.close()
if __name__ == "__main__":
parse_hydra_configs()
| 5,048 | Python | 32.437086 | 116 | 0.627377 |
KhaledSharif/omniverse-gym/README.md | # omniverse-gym
Examples of how to use NVIDIA Omniverse Isaac Sim for to solve Reinforcement Learning Games (RL-Games)
## Installation
Follow the Isaac Sim [documentation](https://docs.omniverse.nvidia.com/isaacsim/latest/installation/install_workstation.html) to install the latest Isaac Sim release (2023.1.1)
To install `omniisaacgymenvs`, first clone this repository:
```bash
git clone https://github.com/KhaledSharif/omniverse-gym.git
```
Once cloned, locate the [python executable in Isaac Sim](https://docs.omniverse.nvidia.com/isaacsim/latest/installation/install_python.html). By default, this should be `python.sh`. We will refer to this path as `PYTHON_PATH`.
To set a `PYTHON_PATH` variable in the terminal that links to the python executable, we can run a command that resembles the following. Make sure to update the paths to your local path. For Linux:
```bash
alias PYTHON_PATH=~/.local/share/ov/pkg/isaac_sim-2023.1.1/python.sh
```
Install the repository and its dependencies:
```bash
PYTHON_PATH -m pip install -e .
```
To run a simple form of PPO from `rl_games`, use the single-threaded training script:
```bash
PYTHON_PATH run.py task=Cartpole
```
The result is saved to the current working directory in a new directory called `runs`.
You can now evaluate your model by running the same environment in test (inference) mode using the saved model checkpoint.
```bash
PYTHON_PATH run.py task=Cartpole test=True checkpoint=runs/Cartpole/nn/Cartpole.pth
```
| 1,491 | Markdown | 35.390243 | 226 | 0.768612 |
KhaledSharif/omniverse-gym/cfg/config.yaml |
# Task name - used to pick the class to load
task_name: ${task.name}
# experiment name. defaults to name of training config
experiment: ''
# if set to positive integer, overrides the default number of environments
num_envs: ''
# seed - set to -1 to choose random seed
seed: 42
# set to True for deterministic performance
torch_deterministic: False
# set the maximum number of learning iterations to train for. overrides default per-environment setting
max_iterations: ''
## Device config
physics_engine: 'physx'
# whether to use cpu or gpu pipeline
pipeline: 'gpu'
# whether to use cpu or gpu physx
sim_device: 'gpu'
# used for gpu simulation only - device id for running sim and task if pipeline=gpu
device_id: 0
# device to run RL
rl_device: 'cuda:0'
# multi-GPU training
multi_gpu: False
## PhysX arguments
num_threads: 4 # Number of worker threads used by PhysX - for CPU PhysX only.
solver_type: 1 # 0: pgs, 1: tgs
# RLGames Arguments
# test - if set, run policy in inference mode (requires setting checkpoint to load)
test: False
# used to set checkpoint path
checkpoint: ''
# evaluate checkpoint
evaluation: False
# disables rendering
headless: False
# enables native livestream
enable_livestream: False
# timeout for MT script
mt_timeout: 300
# enables viewport recording
enable_recording: False
# interval between video recordings (in steps)
recording_interval: 2000
# length of the recorded video (in steps)
recording_length: 100
# fps for writing recorded video
recording_fps: 30
# directory to save recordings in
recording_dir: ''
wandb_activate: False
wandb_group: ''
wandb_name: ${train.params.config.name}
wandb_entity: ''
wandb_project: 'omniisaacgymenvs'
# path to a kit app file
kit_app: ''
# Warp
warp: False
# set default task and default training config based on task
defaults:
- _self_
- task: Cartpole
- train: ${task}PPO
- override hydra/job_logging: disabled
# set the directory where the output files get saved
hydra:
output_subdir: null
run:
dir: .
| 2,007 | YAML | 22.348837 | 103 | 0.744893 |
KhaledSharif/omniverse-gym/cfg/README.md | ## Reinforcement Learning Configuration
### What is Hydra?
Hydra is an open-source Python framework that simplifies the development of research and other complex applications. The key feature is the ability to dynamically create a hierarchical configuration by composition and override it through config files and the command line.
### What is ./config.yaml?
- Task Configuration: This section specifies the task name, experiment name, the number of environments to use for training, the random seed, and whether to use deterministic PyTorch operations.
- Device Configuration: This section configures the physics engine (PhysX), the pipeline (CPU or GPU), the device to be used for simulation (CPU or GPU), the device for running the RL algorithm, and whether to enable multi-GPU training.
- PhysX Arguments: This section sets the number of worker threads and the solver type for the PhysX physics engine.
- RL Training Arguments: These arguments control various aspects of the RL training process, such as running in test mode, loading a checkpoint, evaluation mode, headless rendering, live streaming, timeout settings, recording settings (e.g., interval, length, FPS, directory), and wandb (Weights & Biases) integration for logging and monitoring.
- Default Settings: This section sets the default task and training configuration based on the specified task (in this case, Cartpole).
Hydra Configuration: This section configures the output directory for the training logs and results using the Hydra configuration management framework.
### What is ./task/*.yaml?
- Environment Settings: This section defines the number of parallel environments, episode length, observation and action clipping, control frequency, noise in initial conditions, number of props, aggregation mode, and reward scales for different objectives (e.g., distance, rotation, finger positions).
- Simulation Settings: This section configures the simulation parameters, such as time step, gravity, ground plane, lighting, fabric usage, and whether to use GPU acceleration. It also sets the default physics material properties (friction, restitution).
- Physics Engine Settings: These settings are specific to the PhysX physics engine, including worker thread count, solver type, GPU usage, solver iteration counts, contact offsets, bounce thresholds, friction parameters, sleeping and stabilization settings, and GPU buffer capacities.
- Object-Specific Settings: These sections override specific parameters for individual objects or actors within the environment, such as the robot arm (e.g., Franka), cabinets, and props. These settings include enabling self-collisions, gyroscopic forces, solver iteration counts, sleep and stabilization thresholds, density, maximum depenetration velocity, and shape-specific parameters like contact and rest offsets.
### What is ./train/*.yaml?
Params: This section contains the main parameters for the RL algorithm and neural network architecture.
- seed: Random seed value for reproducibility.
- algo: The algorithm to be used, in this case, a2c_continuous (Advantage Actor-Critic for continuous actions).
- model: The model type, typically continuous_a2c_logstd for continuous action spaces.
- network: Configuration for the neural network architecture, including the type (actor-critic), activation functions, initialization methods, and layer sizes.
Load Checkpoint: Parameters related to loading a pre-trained model checkpoint.
- load_checkpoint: A flag to determine whether to load a checkpoint or not.
- load_path: The path to the checkpoint file to be loaded.
Config: This section contains various configuration settings for the training process.
- name: The name of the experiment or environment.
- full_experiment_name: The full name of the experiment.
- env_name: The name of the environment to be used (in this case, rlgpu).
- device: The device to be used for training (e.g., CPU or GPU).
- multi_gpu: A flag to enable multi-GPU training.
- ppo: A flag to indicate that PPO is being used.
- mixed_precision: A flag to enable mixed-precision training (useful for GPU acceleration).
- normalize_input, normalize_value, normalize_advantage: Flags for normalizing input, value, and advantage estimates.
- num_actors: The number of parallel environments to run.
- reward_shaper: Configuration for reward scaling.
- gamma, tau: Discount factors for future rewards.
- learning_rate, lr_schedule: Learning rate and its scheduling strategy.
- kl_threshold: The KL divergence threshold for adaptive KL penalty in PPO.
- score_to_win: The target score to consider the task as solved.
- max_epochs, save_best_after, save_frequency: Parameters for training duration and checkpointing.
- grad_norm, entropy_coef, truncate_grads, e_clip: Gradient-related parameters and entropy regularization.
- horizon_length, minibatch_size, mini_epochs: Parameters for batching and optimization.
- critic_coef, clip_value, seq_length, bounds_loss_coef: Additional parameters for the critic and bounding loss. | 5,022 | Markdown | 85.603447 | 418 | 0.797093 |
KhaledSharif/omniverse-gym/cfg/task/CartpoleCamera.yaml | defaults:
- Cartpole
- _self_
name: CartpoleCamera
env:
numEnvs: ${resolve_default:32,${...num_envs}}
envSpacing: 20.0
cameraWidth: 240
cameraHeight: 160
exportImages: False
sim:
rendering_dt: 0.0166 # 1/60
# set to True if you use camera sensors in the environment
enable_cameras: True
add_ground_plane: False
add_distant_light: True
| 363 | YAML | 16.333333 | 60 | 0.69697 |
KhaledSharif/omniverse-gym/cfg/task/FrankaDeformable.yaml | # used to create the object
name: FrankaDeformable
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:1024,${...num_envs}} # 2048#4096
envSpacing: 3.0
episodeLength: 100 # 150 #350 #500
enableDebugVis: False
clipObservations: 5.0
clipActions: 1.0
controlFrequencyInv: 2 # 60 Hz
startPositionNoise: 0.0
startRotationNoise: 0.0
numProps: 4
aggregateMode: 3
actionScale: 7.5
dofVelocityScale: 0.1
distRewardScale: 2.0
rotRewardScale: 0.5
aroundHandleRewardScale: 10.0
openRewardScale: 7.5
fingerDistRewardScale: 100.0
actionPenaltyScale: 0.01
fingerCloseRewardScale: 10.0
sim:
dt: 0.016 # 1/60s
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
add_ground_plane: True
use_fabric: True
enable_scene_query_support: False
disable_contact_processing: False
# set to True if you use camera sensors in the environment
enable_cameras: False
default_physics_material:
static_friction: 1.0
dynamic_friction: 1.0
restitution: 0.0
physx:
worker_thread_count: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${eq:${....sim_device},"gpu"} # set to False to run on CPU
solver_position_iteration_count: 8 # 12
solver_velocity_iteration_count: 0 # 1
contact_offset: 0.02 #0.005
rest_offset: 0.001
bounce_threshold_velocity: 0.2
friction_offset_threshold: 0.04
friction_correlation_distance: 0.025
enable_sleeping: True
enable_stabilization: True
max_depenetration_velocity: 1000.0
# GPU buffers
gpu_max_rigid_contact_count: 524288
gpu_max_rigid_patch_count: 33554432
gpu_found_lost_pairs_capacity: 524288 #20965884
gpu_found_lost_aggregate_pairs_capacity: 262144
gpu_total_aggregate_pairs_capacity: 1048576
gpu_max_soft_body_contacts: 4194304 #2097152 #16777216 #8388608 #2097152 #1048576
gpu_max_particle_contacts: 1048576 #2097152 #1048576
gpu_heap_capacity: 33554432
gpu_temp_buffer_capacity: 16777216
gpu_max_num_partitions: 8
franka:
# -1 to use default values
override_usd_defaults: False
enable_self_collisions: True
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 12
solver_velocity_iteration_count: 1
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: -1
max_depenetration_velocity: 1000.0
beaker:
# -1 to use default values
override_usd_defaults: False
make_kinematic: True
enable_self_collisions: False
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 12
solver_velocity_iteration_count: 1
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: -1
max_depenetration_velocity: 1000.0
cube:
# -1 to use default values
override_usd_defaults: False
make_kinematic: False
enable_self_collisions: False
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 12
solver_velocity_iteration_count: 1
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: -1
max_depenetration_velocity: 1000.0
# # per-shape
# contact_offset: 0.02
# rest_offset: 0.001
| 3,418 | YAML | 25.92126 | 85 | 0.691925 |
KhaledSharif/omniverse-gym/cfg/task/FrankaCabinet.yaml | # used to create the object
name: FrankaCabinet
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:4096,${...num_envs}}
envSpacing: 3.0
episodeLength: 500
enableDebugVis: False
clipObservations: 5.0
clipActions: 1.0
controlFrequencyInv: 2 # 60 Hz
startPositionNoise: 0.0
startRotationNoise: 0.0
numProps: 4
aggregateMode: 3
actionScale: 7.5
dofVelocityScale: 0.1
distRewardScale: 2.0
rotRewardScale: 0.5
aroundHandleRewardScale: 10.0
openRewardScale: 7.5
fingerDistRewardScale: 100.0
actionPenaltyScale: 0.01
fingerCloseRewardScale: 10.0
sim:
dt: 0.0083 # 1/120 s
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
add_ground_plane: True
add_distant_light: False
use_fabric: True
enable_scene_query_support: False
disable_contact_processing: False
# set to True if you use camera sensors in the environment
enable_cameras: False
default_physics_material:
static_friction: 1.0
dynamic_friction: 1.0
restitution: 0.0
physx:
worker_thread_count: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${eq:${....sim_device},"gpu"} # set to False to run on CPU
solver_position_iteration_count: 12
solver_velocity_iteration_count: 1
contact_offset: 0.005
rest_offset: 0.0
bounce_threshold_velocity: 0.2
friction_offset_threshold: 0.04
friction_correlation_distance: 0.025
enable_sleeping: True
enable_stabilization: True
max_depenetration_velocity: 1000.0
# GPU buffers
gpu_max_rigid_contact_count: 524288
gpu_max_rigid_patch_count: 33554432
gpu_found_lost_pairs_capacity: 524288
gpu_found_lost_aggregate_pairs_capacity: 262144
gpu_total_aggregate_pairs_capacity: 1048576
gpu_max_soft_body_contacts: 1048576
gpu_max_particle_contacts: 1048576
gpu_heap_capacity: 33554432
gpu_temp_buffer_capacity: 16777216
gpu_max_num_partitions: 8
franka:
# -1 to use default values
override_usd_defaults: False
enable_self_collisions: False
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 12
solver_velocity_iteration_count: 1
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: -1
max_depenetration_velocity: 1000.0
cabinet:
# -1 to use default values
override_usd_defaults: False
enable_self_collisions: False
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 12
solver_velocity_iteration_count: 1
sleep_threshold: 0.0
stabilization_threshold: 0.001
# per-body
density: -1
max_depenetration_velocity: 1000.0
prop:
# -1 to use default values
override_usd_defaults: False
make_kinematic: False
enable_self_collisions: False
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 12
solver_velocity_iteration_count: 1
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: 100
max_depenetration_velocity: 1000.0
# per-shape
contact_offset: 0.005
rest_offset: 0.0
| 3,287 | YAML | 25.304 | 71 | 0.695467 |
KhaledSharif/omniverse-gym/cfg/task/Ant.yaml | # used to create the object
name: Ant
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
# numEnvs: ${...num_envs}
numEnvs: ${resolve_default:4096,${...num_envs}}
envSpacing: 5
episodeLength: 1000
enableDebugVis: False
clipActions: 1.0
powerScale: 0.5
controlFrequencyInv: 2 # 60 Hz
# reward parameters
headingWeight: 0.5
upWeight: 0.1
# cost parameters
actionsCost: 0.005
energyCost: 0.05
dofVelocityScale: 0.2
angularVelocityScale: 1.0
contactForceScale: 0.1
jointsAtLimitCost: 0.1
deathCost: -2.0
terminationHeight: 0.31
alive_reward_scale: 0.5
sim:
dt: 0.0083 # 1/120 s
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
add_ground_plane: True
add_distant_light: False
use_fabric: True
enable_scene_query_support: False
disable_contact_processing: False
# set to True if you use camera sensors in the environment
enable_cameras: False
default_physics_material:
static_friction: 1.0
dynamic_friction: 1.0
restitution: 0.0
physx:
worker_thread_count: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${eq:${....sim_device},"gpu"} # set to False to run on CPU
solver_position_iteration_count: 4
solver_velocity_iteration_count: 0
contact_offset: 0.02
rest_offset: 0.0
bounce_threshold_velocity: 0.2
friction_offset_threshold: 0.04
friction_correlation_distance: 0.025
enable_sleeping: True
enable_stabilization: True
max_depenetration_velocity: 10.0
# GPU buffers
gpu_max_rigid_contact_count: 524288
gpu_max_rigid_patch_count: 81920
gpu_found_lost_pairs_capacity: 8192
gpu_found_lost_aggregate_pairs_capacity: 262144
gpu_total_aggregate_pairs_capacity: 8192
gpu_max_soft_body_contacts: 1048576
gpu_max_particle_contacts: 1048576
gpu_heap_capacity: 67108864
gpu_temp_buffer_capacity: 16777216
gpu_max_num_partitions: 8
Ant:
# -1 to use default values
override_usd_defaults: False
enable_self_collisions: False
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 4
solver_velocity_iteration_count: 0
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: -1
max_depenetration_velocity: 10.0 | 2,370 | YAML | 24.771739 | 71 | 0.690717 |
KhaledSharif/omniverse-gym/cfg/task/AnymalTerrain.yaml | name: AnymalTerrain
physics_engine: ${..physics_engine}
env:
numEnvs: ${resolve_default:2048,${...num_envs}}
numObservations: 188
numActions: 12
envSpacing: 3. # [m]
terrain:
staticFriction: 1.0 # [-]
dynamicFriction: 1.0 # [-]
restitution: 0. # [-]
# rough terrain only:
curriculum: true
maxInitMapLevel: 0
mapLength: 8.
mapWidth: 8.
numLevels: 10
numTerrains: 20
# terrain types: [smooth slope, rough slope, stairs up, stairs down, discrete]
terrainProportions: [0.1, 0.1, 0.35, 0.25, 0.2]
# tri mesh only:
slopeTreshold: 0.5
baseInitState:
pos: [0.0, 0.0, 0.62] # x,y,z [m]
rot: [1.0, 0.0, 0.0, 0.0] # w,x,y,z [quat]
vLinear: [0.0, 0.0, 0.0] # x,y,z [m/s]
vAngular: [0.0, 0.0, 0.0] # x,y,z [rad/s]
randomCommandVelocityRanges:
# train
linear_x: [-1., 1.] # min max [m/s]
linear_y: [-1., 1.] # min max [m/s]
yaw: [-3.14, 3.14] # min max [rad/s]
control:
# PD Drive parameters:
stiffness: 80.0 # [N*m/rad]
damping: 2.0 # [N*m*s/rad]
# action scale: target angle = actionScale * action + defaultAngle
actionScale: 0.5
# decimation: Number of control action updates @ sim DT per policy DT
decimation: 4
defaultJointAngles: # = target angles when action = 0.0
LF_HAA: 0.03 # [rad]
LH_HAA: 0.03 # [rad]
RF_HAA: -0.03 # [rad]
RH_HAA: -0.03 # [rad]
LF_HFE: 0.4 # [rad]
LH_HFE: -0.4 # [rad]
RF_HFE: 0.4 # [rad]
RH_HFE: -0.4 # [rad]
LF_KFE: -0.8 # [rad]
LH_KFE: 0.8 # [rad]
RF_KFE: -0.8 # [rad]
RH_KFE: 0.8 # [rad]
learn:
# rewards
terminalReward: 0.0
linearVelocityXYRewardScale: 1.0
linearVelocityZRewardScale: -4.0
angularVelocityXYRewardScale: -0.05
angularVelocityZRewardScale: 0.5
orientationRewardScale: -0.
torqueRewardScale: -0.00002
jointAccRewardScale: -0.0005
baseHeightRewardScale: -0.0
actionRateRewardScale: -0.01
fallenOverRewardScale: -1.0
# cosmetics
hipRewardScale: -0. #25
# normalization
linearVelocityScale: 2.0
angularVelocityScale: 0.25
dofPositionScale: 1.0
dofVelocityScale: 0.05
heightMeasurementScale: 5.0
# noise
addNoise: true
noiseLevel: 1.0 # scales other values
dofPositionNoise: 0.01
dofVelocityNoise: 1.5
linearVelocityNoise: 0.1
angularVelocityNoise: 0.2
gravityNoise: 0.05
heightMeasurementNoise: 0.06
#randomization
pushInterval_s: 15
# episode length in seconds
episodeLength_s: 20
sim:
dt: 0.005
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
add_ground_plane: False
add_distant_light: False
use_fabric: True
enable_scene_query_support: False
disable_contact_processing: True
# set to True if you use camera sensors in the environment
enable_cameras: False
default_physics_material:
static_friction: 1.0
dynamic_friction: 1.0
restitution: 0.0
physx:
worker_thread_count: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${eq:${....sim_device},"gpu"} # set to False to run on CPU
solver_position_iteration_count: 4
solver_velocity_iteration_count: 0
contact_offset: 0.02
rest_offset: 0.0
bounce_threshold_velocity: 0.2
friction_offset_threshold: 0.04
friction_correlation_distance: 0.025
enable_sleeping: True
enable_stabilization: True
max_depenetration_velocity: 100.0
# GPU buffers
gpu_max_rigid_contact_count: 524288
gpu_max_rigid_patch_count: 163840
gpu_found_lost_pairs_capacity: 4194304
gpu_found_lost_aggregate_pairs_capacity: 33554432
gpu_total_aggregate_pairs_capacity: 4194304
gpu_max_soft_body_contacts: 1048576
gpu_max_particle_contacts: 1048576
gpu_heap_capacity: 134217728
gpu_temp_buffer_capacity: 33554432
gpu_max_num_partitions: 8
anymal:
# -1 to use default values
override_usd_defaults: False
enable_self_collisions: True
enable_gyroscopic_forces: False
# also in stage params
# per-actor
solver_position_iteration_count: 4
solver_velocity_iteration_count: 0
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: -1
max_depenetration_velocity: 100.0
| 4,346 | YAML | 25.345454 | 82 | 0.633916 |
KhaledSharif/omniverse-gym/cfg/task/BallBalance.yaml | # used to create the object
name: BallBalance
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:4096,${...num_envs}}
envSpacing: 2.0
maxEpisodeLength: 600
actionSpeedScale: 20
clipObservations: 5.0
clipActions: 1.0
sim:
dt: 0.01
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
add_ground_plane: True
add_distant_light: False
use_fabric: True
enable_scene_query_support: False
disable_contact_processing: False
# set to True if you use camera sensors in the environment
enable_cameras: False
default_physics_material:
static_friction: 1.0
dynamic_friction: 1.0
restitution: 0.0
physx:
worker_thread_count: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${eq:${....sim_device},"gpu"} # set to False to run on CPU
solver_position_iteration_count: 8
solver_velocity_iteration_count: 0
contact_offset: 0.02
rest_offset: 0.001
bounce_threshold_velocity: 0.2
friction_offset_threshold: 0.04
friction_correlation_distance: 0.025
enable_sleeping: True
enable_stabilization: True
max_depenetration_velocity: 1000.0
# GPU buffers
gpu_max_rigid_contact_count: 524288
gpu_max_rigid_patch_count: 81920
gpu_found_lost_pairs_capacity: 262144
gpu_found_lost_aggregate_pairs_capacity: 262144
gpu_total_aggregate_pairs_capacity: 262144
gpu_max_soft_body_contacts: 1048576
gpu_max_particle_contacts: 1048576
gpu_heap_capacity: 67108864
gpu_temp_buffer_capacity: 16777216
gpu_max_num_partitions: 8
table:
# -1 to use default values
override_usd_defaults: False
enable_self_collisions: True
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 8
solver_velocity_iteration_count: 0
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: -1
max_depenetration_velocity: 1000.0
ball:
# -1 to use default values
override_usd_defaults: False
make_kinematic: False
enable_self_collisions: False
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 8
solver_velocity_iteration_count: 0
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: 200
max_depenetration_velocity: 1000.0
| 2,458 | YAML | 25.728261 | 71 | 0.690806 |
KhaledSharif/omniverse-gym/cfg/task/FactoryBase.yaml | # See schema in factory_schema_config_base.py for descriptions of parameters.
defaults:
- _self_
- /factory_schema_config_base
sim:
add_damping: True
disable_contact_processing: False
env:
env_spacing: 1.5
franka_depth: 0.5
table_height: 0.4
franka_friction: 1.0
table_friction: 0.3
| 309 | YAML | 16.222221 | 77 | 0.699029 |
KhaledSharif/omniverse-gym/cfg/task/Humanoid.yaml | # used to create the object
name: Humanoid
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
# numEnvs: ${...num_envs}
numEnvs: ${resolve_default:4096,${...num_envs}}
envSpacing: 5
episodeLength: 1000
enableDebugVis: False
clipActions: 1.0
powerScale: 1.0
controlFrequencyInv: 2 # 60 Hz
# reward parameters
headingWeight: 0.5
upWeight: 0.1
# cost parameters
actionsCost: 0.01
energyCost: 0.05
dofVelocityScale: 0.1
angularVelocityScale: 0.25
contactForceScale: 0.01
jointsAtLimitCost: 0.25
deathCost: -1.0
terminationHeight: 0.8
alive_reward_scale: 2.0
sim:
dt: 0.0083 # 1/120 s
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
add_ground_plane: True
add_distant_light: False
use_fabric: True
enable_scene_query_support: False
disable_contact_processing: False
# set to True if you use camera sensors in the environment
enable_cameras: False
default_physics_material:
static_friction: 1.0
dynamic_friction: 1.0
restitution: 0.0
physx:
worker_thread_count: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${eq:${....sim_device},"gpu"} # set to False to run on CPU
solver_position_iteration_count: 4
solver_velocity_iteration_count: 0
bounce_threshold_velocity: 0.2
friction_offset_threshold: 0.04
friction_correlation_distance: 0.025
enable_sleeping: True
enable_stabilization: True
max_depenetration_velocity: 10.0
# GPU buffers
gpu_max_rigid_contact_count: 524288
gpu_max_rigid_patch_count: 81920
gpu_found_lost_pairs_capacity: 8192
gpu_found_lost_aggregate_pairs_capacity: 262144
gpu_total_aggregate_pairs_capacity: 8192
gpu_max_soft_body_contacts: 1048576
gpu_max_particle_contacts: 1048576
gpu_heap_capacity: 67108864
gpu_temp_buffer_capacity: 16777216
gpu_max_num_partitions: 8
Humanoid:
# -1 to use default values
override_usd_defaults: False
enable_self_collisions: True
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 4
solver_velocity_iteration_count: 0
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: -1
max_depenetration_velocity: 10.0
| 2,335 | YAML | 24.670329 | 71 | 0.693362 |
KhaledSharif/omniverse-gym/cfg/task/AllegroHand.yaml | # used to create the object
name: AllegroHand
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:8192,${...num_envs}}
envSpacing: 0.75
episodeLength: 600
clipObservations: 5.0
clipActions: 1.0
useRelativeControl: False
dofSpeedScale: 20.0
actionsMovingAverage: 1.0
controlFrequencyInv: 4 # 30 Hz
startPositionNoise: 0.01
startRotationNoise: 0.0
resetPositionNoise: 0.01
resetRotationNoise: 0.0
resetDofPosRandomInterval: 0.2
resetDofVelRandomInterval: 0.0
# reward -> dictionary
distRewardScale: -10.0
rotRewardScale: 1.0
rotEps: 0.1
actionPenaltyScale: -0.0002
reachGoalBonus: 250
fallDistance: 0.24
fallPenalty: 0.0
velObsScale: 0.2
objectType: "block"
observationType: "full" # can be "full_no_vel", "full"
successTolerance: 0.1
printNumSuccesses: False
maxConsecutiveSuccesses: 0
sim:
dt: 0.0083 # 1/120 s
add_ground_plane: True
add_distant_light: False
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
use_fabric: True
enable_scene_query_support: False
disable_contact_processing: False
# set to True if you use camera sensors in the environment
enable_cameras: False
default_material:
static_friction: 1.0
dynamic_friction: 1.0
restitution: 0.0
physx:
# per-scene
use_gpu: ${eq:${....sim_device},"gpu"} # set to False to run on CPU
worker_thread_count: ${....num_threads}
solver_type: ${....solver_type} # 0: PGS, 1: TGS
bounce_threshold_velocity: 0.2
friction_offset_threshold: 0.04
friction_correlation_distance: 0.025
enable_sleeping: True
enable_stabilization: True
# GPU buffers
gpu_max_rigid_contact_count: 524288
gpu_max_rigid_patch_count: 33554432
gpu_found_lost_pairs_capacity: 819200
gpu_found_lost_aggregate_pairs_capacity: 819200
gpu_total_aggregate_pairs_capacity: 1048576
gpu_max_soft_body_contacts: 1048576
gpu_max_particle_contacts: 1048576
gpu_heap_capacity: 33554432
gpu_temp_buffer_capacity: 16777216
gpu_max_num_partitions: 8
allegro_hand:
# -1 to use default values
override_usd_defaults: False
enable_self_collisions: True
enable_gyroscopic_forces: False
# also in stage params
# per-actor
solver_position_iteration_count: 8
solver_velocity_iteration_count: 0
sleep_threshold: 0.005
stabilization_threshold: 0.0005
# per-body
density: -1
max_depenetration_velocity: 1000.0
object:
# -1 to use default values
override_usd_defaults: False
make_kinematic: False
enable_self_collisions: False
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 8
solver_velocity_iteration_count: 0
sleep_threshold: 0.005
stabilization_threshold: 0.0025
# per-body
density: 400.0
max_depenetration_velocity: 1000.0
goal_object:
# -1 to use default values
override_usd_defaults: False
make_kinematic: True
enable_self_collisions: False
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 8
solver_velocity_iteration_count: 0
sleep_threshold: 0.000
stabilization_threshold: 0.0025
# per-body
density: -1
max_depenetration_velocity: 1000.0
| 3,360 | YAML | 25.464567 | 71 | 0.69881 |
KhaledSharif/omniverse-gym/cfg/task/HumanoidSAC.yaml | # used to create the object
defaults:
- Humanoid
- _self_
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:64,${...num_envs}} | 168 | YAML | 20.124997 | 52 | 0.678571 |
KhaledSharif/omniverse-gym/cfg/task/Ingenuity.yaml | # used to create the object
name: Ingenuity
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:4096,${...num_envs}}
envSpacing: 2.5
maxEpisodeLength: 2000
enableDebugVis: False
clipObservations: 5.0
clipActions: 1.0
sim:
dt: 0.01
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -3.721]
add_ground_plane: True
add_distant_light: False
use_fabric: True
enable_scene_query_support: False
# set to True if you use camera sensors in the environment
enable_cameras: False
disable_contact_processing: False
physx:
num_threads: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${eq:${....sim_device},"gpu"} # set to False to run on CPU
solver_position_iteration_count: 6
solver_velocity_iteration_count: 0
contact_offset: 0.02
rest_offset: 0.001
bounce_threshold_velocity: 0.2
max_depenetration_velocity: 1000.0
friction_offset_threshold: 0.04
friction_correlation_distance: 0.025
enable_sleeping: True
enable_stabilization: False
# GPU buffers
gpu_max_rigid_contact_count: 524288
gpu_max_rigid_patch_count: 81920
gpu_found_lost_pairs_capacity: 4194304
gpu_found_lost_aggregate_pairs_capacity: 33554432
gpu_total_aggregate_pairs_capacity: 4194304
gpu_max_soft_body_contacts: 1048576
gpu_max_particle_contacts: 1048576
gpu_heap_capacity: 67108864
gpu_temp_buffer_capacity: 16777216
gpu_max_num_partitions: 8
ingenuity:
# -1 to use default values
override_usd_defaults: False
enable_self_collisions: True
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 6
solver_velocity_iteration_count: 0
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: -1
max_depenetration_velocity: 1000.0
ball:
# -1 to use default values
override_usd_defaults: False
make_kinematic: True
enable_self_collisions: False
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 6
solver_velocity_iteration_count: 0
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: -1
max_depenetration_velocity: 1000.0 | 2,351 | YAML | 27 | 71 | 0.693322 |
KhaledSharif/omniverse-gym/cfg/task/Quadcopter.yaml | # used to create the object
name: Quadcopter
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:4096,${...num_envs}}
envSpacing: 1.25
maxEpisodeLength: 500
enableDebugVis: False
clipObservations: 5.0
clipActions: 1.0
sim:
dt: 0.01
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
add_ground_plane: True
add_distant_light: False
use_fabric: True
enable_scene_query_support: False
disable_contact_processing: False
# set to True if you use camera sensors in the environment
enable_cameras: False
default_physics_material:
static_friction: 1.0
dynamic_friction: 1.0
restitution: 0.0
physx:
worker_thread_count: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${eq:${....sim_device},"gpu"} # set to False to run on CPU
solver_position_iteration_count: 4
solver_velocity_iteration_count: 0
contact_offset: 0.02
rest_offset: 0.001
bounce_threshold_velocity: 0.2
friction_offset_threshold: 0.04
friction_correlation_distance: 0.025
enable_sleeping: True
enable_stabilization: True
max_depenetration_velocity: 1000.0
# GPU buffers
gpu_max_rigid_contact_count: 524288
gpu_max_rigid_patch_count: 81920
gpu_found_lost_pairs_capacity: 8192
gpu_found_lost_aggregate_pairs_capacity: 262144
gpu_total_aggregate_pairs_capacity: 8192
gpu_max_soft_body_contacts: 1048576
gpu_max_particle_contacts: 1048576
gpu_heap_capacity: 67108864
gpu_temp_buffer_capacity: 16777216
gpu_max_num_partitions: 8
copter:
# -1 to use default values
override_usd_defaults: False
enable_self_collisions: True
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 4
solver_velocity_iteration_count: 0
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: -1
max_depenetration_velocity: 1000.0
ball:
# -1 to use default values
override_usd_defaults: False
make_kinematic: True
enable_self_collisions: False
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 6
solver_velocity_iteration_count: 0
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: -1
max_depenetration_velocity: 1000.0
| 2,452 | YAML | 25.663043 | 71 | 0.690457 |
KhaledSharif/omniverse-gym/cfg/task/Crazyflie.yaml | # used to create the object
name: Crazyflie
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:4096,${...num_envs}}
envSpacing: 2.5
maxEpisodeLength: 700
enableDebugVis: False
clipObservations: 5.0
clipActions: 1.0
sim:
dt: 0.01
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
add_ground_plane: True
add_distant_light: False
use_fabric: True
enable_scene_query_support: False
# set to True if you use camera sensors in the environment
enable_cameras: False
disable_contact_processing: False
physx:
num_threads: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${eq:${....sim_device},"gpu"} # set to False to run on CPU
solver_position_iteration_count: 6
solver_velocity_iteration_count: 0
contact_offset: 0.02
rest_offset: 0.001
bounce_threshold_velocity: 0.2
max_depenetration_velocity: 1000.0
friction_offset_threshold: 0.04
friction_correlation_distance: 0.025
enable_sleeping: True
enable_stabilization: False
# GPU buffers
gpu_max_rigid_contact_count: 524288
gpu_max_rigid_patch_count: 81920
gpu_found_lost_pairs_capacity: 4194304
gpu_found_lost_aggregate_pairs_capacity: 33554432
gpu_total_aggregate_pairs_capacity: 4194304
gpu_max_soft_body_contacts: 1048576
gpu_max_particle_contacts: 1048576
gpu_heap_capacity: 67108864
gpu_temp_buffer_capacity: 16777216
gpu_max_num_partitions: 8
crazyflie:
# -1 to use default values
override_usd_defaults: False
enable_self_collisions: True
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 6
solver_velocity_iteration_count: 0
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: -1
max_depenetration_velocity: 1000.0
ball:
# -1 to use default values
override_usd_defaults: False
make_kinematic: True
enable_self_collisions: False
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 6
solver_velocity_iteration_count: 0
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: -1
max_depenetration_velocity: 1000.0
| 2,350 | YAML | 26.658823 | 71 | 0.692766 |
KhaledSharif/omniverse-gym/cfg/task/FactoryEnvNutBolt.yaml | # See schema in factory_schema_config_env.py for descriptions of common parameters.
defaults:
- _self_
- /factory_schema_config_env
sim:
disable_franka_collisions: False
disable_nut_collisions: False
disable_bolt_collisions: False
disable_contact_processing: False
env:
env_name: 'FactoryEnvNutBolt'
desired_subassemblies: ['nut_bolt_m16', 'nut_bolt_m16']
nut_lateral_offset: 0.1 # Y-axis offset of nut before initial reset to prevent initial interpenetration with bolt
nut_bolt_density: 7850.0
nut_bolt_friction: 0.3
# Subassembly options:
# {nut_bolt_m4, nut_bolt_m8, nut_bolt_m12, nut_bolt_m16, nut_bolt_m20}
| 643 | YAML | 28.272726 | 116 | 0.73717 |
KhaledSharif/omniverse-gym/cfg/task/AntSAC.yaml | # used to create the object
defaults:
- Ant
- _self_
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:64,${...num_envs}} | 163 | YAML | 19.499998 | 52 | 0.668712 |
KhaledSharif/omniverse-gym/cfg/task/Cartpole.yaml | # used to create the object
name: Cartpole
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:512,${...num_envs}}
envSpacing: 4.0
resetDist: 3.0
maxEffort: 400.0
clipObservations: 5.0
clipActions: 1.0
controlFrequencyInv: 2 # 60 Hz
sim:
dt: 0.0083 # 1/120 s
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
add_ground_plane: True
add_distant_light: False
use_fabric: True
enable_scene_query_support: False
disable_contact_processing: False
# set to True if you use camera sensors in the environment
enable_cameras: False
default_physics_material:
static_friction: 1.0
dynamic_friction: 1.0
restitution: 0.0
physx:
worker_thread_count: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${eq:${....sim_device},"gpu"} # set to False to run on CPU
solver_position_iteration_count: 4
solver_velocity_iteration_count: 0
contact_offset: 0.02
rest_offset: 0.001
bounce_threshold_velocity: 0.2
friction_offset_threshold: 0.04
friction_correlation_distance: 0.025
enable_sleeping: True
enable_stabilization: True
max_depenetration_velocity: 100.0
# GPU buffers
gpu_max_rigid_contact_count: 524288
gpu_max_rigid_patch_count: 81920
gpu_found_lost_pairs_capacity: 1024
gpu_found_lost_aggregate_pairs_capacity: 262144
gpu_total_aggregate_pairs_capacity: 1024
gpu_max_soft_body_contacts: 1048576
gpu_max_particle_contacts: 1048576
gpu_heap_capacity: 67108864
gpu_temp_buffer_capacity: 16777216
gpu_max_num_partitions: 8
Cartpole:
# -1 to use default values
override_usd_defaults: False
enable_self_collisions: False
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 4
solver_velocity_iteration_count: 0
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: -1
max_depenetration_velocity: 100.0
# per-shape
contact_offset: 0.02
rest_offset: 0.001 | 2,124 | YAML | 26.243589 | 71 | 0.686911 |
KhaledSharif/omniverse-gym/cfg/task/Anymal.yaml | # used to create the object
name: Anymal
physics_engine: ${..physics_engine}
env:
numEnvs: ${resolve_default:4096,${...num_envs}}
envSpacing: 4. # [m]
clipObservations: 5.0
clipActions: 1.0
controlFrequencyInv: 2
baseInitState:
pos: [0.0, 0.0, 0.62] # x,y,z [m]
rot: [0.0, 0.0, 0.0, 1.0] # x,y,z,w [quat]
vLinear: [0.0, 0.0, 0.0] # x,y,z [m/s]
vAngular: [0.0, 0.0, 0.0] # x,y,z [rad/s]
randomCommandVelocityRanges:
linear_x: [-2., 2.] # min max [m/s]
linear_y: [-1., 1.] # min max [m/s]
yaw: [-1., 1.] # min max [rad/s]
control:
# PD Drive parameters:
stiffness: 85.0 # [N*m/rad]
damping: 2.0 # [N*m*s/rad]
actionScale: 13.5
defaultJointAngles: # = target angles when action = 0.0
LF_HAA: 0.03 # [rad]
LH_HAA: 0.03 # [rad]
RF_HAA: -0.03 # [rad]
RH_HAA: -0.03 # [rad]
LF_HFE: 0.4 # [rad]
LH_HFE: -0.4 # [rad]
RF_HFE: 0.4 # [rad]
RH_HFE: -0.4 # [rad]
LF_KFE: -0.8 # [rad]
LH_KFE: 0.8 # [rad]
RF_KFE: -0.8 # [rad]
RH_KFE: 0.8 # [rad]
learn:
# rewards
linearVelocityXYRewardScale: 1.0
angularVelocityZRewardScale: 0.5
linearVelocityZRewardScale: -0.03
jointAccRewardScale: -0.0003
actionRateRewardScale: -0.006
cosmeticRewardScale: -0.06
# normalization
linearVelocityScale: 2.0
angularVelocityScale: 0.25
dofPositionScale: 1.0
dofVelocityScale: 0.05
# episode length in seconds
episodeLength_s: 50
sim:
dt: 0.01
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
add_ground_plane: True
add_distant_light: False
use_fabric: True
enable_scene_query_support: False
disable_contact_processing: False
# set to True if you use camera sensors in the environment
enable_cameras: False
default_physics_material:
static_friction: 1.0
dynamic_friction: 1.0
restitution: 0.0
physx:
worker_thread_count: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${eq:${....sim_device},"gpu"} # set to False to run on CPU
solver_position_iteration_count: 4
solver_velocity_iteration_count: 1
contact_offset: 0.02
rest_offset: 0.0
bounce_threshold_velocity: 0.2
friction_offset_threshold: 0.04
friction_correlation_distance: 0.025
enable_sleeping: True
enable_stabilization: True
max_depenetration_velocity: 100.0
# GPU buffers
gpu_max_rigid_contact_count: 524288
gpu_max_rigid_patch_count: 163840
gpu_found_lost_pairs_capacity: 4194304
gpu_found_lost_aggregate_pairs_capacity: 33554432
gpu_total_aggregate_pairs_capacity: 4194304
gpu_max_soft_body_contacts: 1048576
gpu_max_particle_contacts: 1048576
gpu_heap_capacity: 134217728
gpu_temp_buffer_capacity: 33554432
gpu_max_num_partitions: 8
Anymal:
# -1 to use default values
override_usd_defaults: False
enable_self_collisions: False
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 4
solver_velocity_iteration_count: 1
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: -1
max_depenetration_velocity: 100.0
| 3,270 | YAML | 24.960317 | 71 | 0.626911 |
KhaledSharif/omniverse-gym/cfg/task/ShadowHandOpenAI_LSTM.yaml | # specifies what the config is when running `ShadowHandOpenAI` in LSTM mode
defaults:
- ShadowHandOpenAI_FF
- _self_
env:
numEnvs: ${resolve_default:8192,${...num_envs}}
| 178 | YAML | 18.888887 | 75 | 0.707865 |
KhaledSharif/omniverse-gym/cfg/train/ShadowHandOpenAI_FFPPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [400, 400, 200, 100]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False}
load_path: ${...checkpoint}
config:
name: ${resolve_default:ShadowHandOpenAI_FF,${....experiment}}
full_experiment_name: ${.name}
device: ${....rl_device}
device_name: ${....rl_device}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.998
tau: 0.95
learning_rate: 5e-4
lr_schedule: adaptive
schedule_type: standard
kl_threshold: 0.016
score_to_win: 100000
max_epochs: ${resolve_default:10000,${....max_iterations}}
save_best_after: 100
save_frequency: 200
print_stats: True
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 16
minibatch_size: 16384
mini_epochs: 4
critic_coef: 4
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0001
central_value_config:
minibatch_size: 32864
mini_epochs: 4
learning_rate: 5e-4
lr_schedule: adaptive
schedule_type: standard
kl_threshold: 0.016
clip_value: True
normalize_input: True
truncate_grads: True
network:
name: actor_critic
central_value: True
mlp:
units: [512, 512, 256, 128]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
player:
deterministic: True
games_num: 100000
print_stats: True
| 2,215 | YAML | 20.940594 | 66 | 0.577427 |
KhaledSharif/omniverse-gym/cfg/train/AnymalTerrainPPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0. # std = 1.
fixed_sigma: True
mlp:
units: [512, 256, 128]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
# rnn:
# name: lstm
# units: 128
# layers: 1
# before_mlp: True
# concat_input: True
# layer_norm: False
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:AnymalTerrain,${....experiment}}
full_experiment_name: ${.name}
device: ${....rl_device}
device_name: ${....rl_device}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False # True
normalize_input: True
normalize_value: True
normalize_advantage: True
value_bootstrap: True
clip_actions: False
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 1.0
gamma: 0.99
tau: 0.95
e_clip: 0.2
entropy_coef: 0.001
learning_rate: 3.e-4 # overwritten by adaptive lr_schedule
lr_schedule: adaptive
kl_threshold: 0.008 # target kl for adaptive lr
truncate_grads: True
grad_norm: 1.
horizon_length: 48
minibatch_size: 16384
mini_epochs: 5
critic_coef: 2
clip_value: True
seq_length: 4 # only for rnn
bounds_loss_coef: 0.
max_epochs: ${resolve_default:2000,${....max_iterations}}
save_best_after: 100
score_to_win: 20000
save_frequency: 50
print_stats: True
| 1,928 | YAML | 21.694117 | 101 | 0.592842 |
KhaledSharif/omniverse-gym/cfg/train/HumanoidPPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [400, 200, 100]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:Humanoid,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
device: ${....rl_device}
device_name: ${....rl_device}
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: True
normalize_input: True
normalize_value: True
value_bootstrap: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
lr_schedule: adaptive
kl_threshold: 0.008
score_to_win: 20000
max_epochs: ${resolve_default:1000,${....max_iterations}}
save_best_after: 100
save_frequency: 100
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 32
minibatch_size: 32768
mini_epochs: 5
critic_coef: 4
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0001
| 1,639 | YAML | 21.465753 | 101 | 0.594875 |
KhaledSharif/omniverse-gym/cfg/train/CrazyfliePPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 256, 128]
activation: tanh
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:Crazyflie,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
device: ${....rl_device}
device_name: ${....rl_device}
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
lr_schedule: adaptive
kl_threshold: 0.016
score_to_win: 20000
max_epochs: ${resolve_default:1000,${....max_iterations}}
save_best_after: 50
save_frequency: 50
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 16
minibatch_size: 16384
mini_epochs: 8
critic_coef: 2
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0001
| 1,614 | YAML | 21.430555 | 101 | 0.593556 |
KhaledSharif/omniverse-gym/cfg/train/ShadowHandPPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [512, 512, 256, 128]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False}
load_path: ${...checkpoint}
config:
name: ${resolve_default:ShadowHand,${....experiment}}
full_experiment_name: ${.name}
device: ${....rl_device}
device_name: ${....rl_device}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
value_bootstrap: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
lr_schedule: adaptive
schedule_type: standard
kl_threshold: 0.016
score_to_win: 100000
max_epochs: ${resolve_default:10000,${....max_iterations}}
save_best_after: 100
save_frequency: 200
print_stats: True
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 16
minibatch_size: 32768
mini_epochs: 5
critic_coef: 4
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0001
player:
deterministic: True
games_num: 100000
print_stats: True
| 1,703 | YAML | 20.56962 | 62 | 0.589548 |
KhaledSharif/omniverse-gym/cfg/train/HumanoidSAC.yaml | params:
seed: ${...seed}
algo:
name: sac
model:
name: soft_actor_critic
network:
name: soft_actor_critic
separate: True
space:
continuous:
mlp:
units: [512, 256]
activation: relu
initializer:
name: default
log_std_bounds: [-5, 2]
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:HumanoidSAC,${....experiment}}
env_name: rlgpu
device: ${....rl_device}
device_name: ${....rl_device}
multi_gpu: ${....multi_gpu}
normalize_input: True
reward_shaper:
scale_value: 1.0
max_epochs: ${resolve_default:50000,${....max_iterations}}
num_steps_per_episode: 8
save_best_after: 100
save_frequency: 1000
gamma: 0.99
init_alpha: 1.0
alpha_lr: 0.005
actor_lr: 0.0005
critic_lr: 0.0005
critic_tau: 0.005
batch_size: 4096
learnable_temperature: true
num_seed_steps: 5
num_warmup_steps: 10
replay_buffer_size: 1000000
num_actors: ${....task.env.numEnvs}
| 1,165 | YAML | 21.423077 | 101 | 0.603433 |
KhaledSharif/omniverse-gym/cfg/train/ShadowHandOpenAI_LSTMPPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [512]
activation: relu
d2rl: False
initializer:
name: default
regularizer:
name: None
rnn:
name: lstm
units: 1024
layers: 1
before_mlp: True
layer_norm: True
load_checkpoint: ${if:${...checkpoint},True,False}
load_path: ${...checkpoint}
config:
name: ${resolve_default:ShadowHandOpenAI_LSTM,${....experiment}}
full_experiment_name: ${.name}
device: ${....rl_device}
device_name: ${....rl_device}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.998
tau: 0.95
learning_rate: 1e-4
lr_schedule: adaptive
schedule_type: standard
kl_threshold: 0.016
score_to_win: 100000
max_epochs: ${resolve_default:10000,${....max_iterations}}
save_best_after: 100
save_frequency: 200
print_stats: True
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 16
minibatch_size: 16384
mini_epochs: 4
critic_coef: 4
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0001
central_value_config:
minibatch_size: 32768
mini_epochs: 4
learning_rate: 1e-4
kl_threshold: 0.016
clip_value: True
normalize_input: True
truncate_grads: True
network:
name: actor_critic
central_value: True
mlp:
units: [512]
activation: relu
d2rl: False
initializer:
name: default
regularizer:
name: None
rnn:
name: lstm
units: 1024
layers: 1
before_mlp: True
layer_norm: True
zero_rnn_on_done: False
player:
deterministic: True
games_num: 100000
print_stats: True
| 2,402 | YAML | 20.265487 | 68 | 0.562448 |
KhaledSharif/omniverse-gym/cfg/train/IngenuityPPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 256, 128]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:Ingenuity,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
device: ${....rl_device}
device_name: ${....rl_device}
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-3
lr_schedule: adaptive
kl_threshold: 0.016
score_to_win: 20000
max_epochs: ${resolve_default:400,${....max_iterations}}
save_best_after: 50
save_frequency: 50
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 16
minibatch_size: 16384
mini_epochs: 8
critic_coef: 2
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0001
| 1,612 | YAML | 21.402777 | 101 | 0.593052 |
KhaledSharif/omniverse-gym/cfg/train/QuadcopterPPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 256, 128]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:Quadcopter,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
device: ${....rl_device}
device_name: ${....rl_device}
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-3
lr_schedule: adaptive
kl_threshold: 0.016
score_to_win: 20000
max_epochs: ${resolve_default:1000,${....max_iterations}}
save_best_after: 50
save_frequency: 50
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 16
minibatch_size: 16384
mini_epochs: 8
critic_coef: 2
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0001
| 1,613 | YAML | 21.416666 | 101 | 0.593304 |
KhaledSharif/omniverse-gym/cfg/train/FactoryTaskNutBoltScrewPPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 128, 64]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False}
load_path: ${...checkpoint}
config:
name: ${resolve_default:FactoryTaskNutBoltScrew,${....experiment}}
full_experiment_name: ${.name}
device: ${....rl_device}
device_name: ${....rl_device}
env_name: rlgpu
multi_gpu: False
ppo: True
mixed_precision: True
normalize_input: True
normalize_value: True
value_bootstrap: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 1.0
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
lr_schedule: fixed
schedule_type: standard
kl_threshold: 0.016
score_to_win: 20000
max_epochs: ${resolve_default:400,${....max_iterations}}
save_best_after: 50
save_frequency: 100
print_stats: True
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: False
e_clip: 0.2
horizon_length: 512
minibatch_size: 512
mini_epochs: 8
critic_coef: 2
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0001
| 1,597 | YAML | 20.594594 | 70 | 0.594865 |
KhaledSharif/omniverse-gym/cfg/train/BallBalancePPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [128, 64, 32]
activation: elu
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:BallBalance,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
device: ${....rl_device}
device_name: ${....rl_device}
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 3e-4
lr_schedule: adaptive
kl_threshold: 0.008
score_to_win: 20000
max_epochs: ${resolve_default:250,${....max_iterations}}
save_best_after: 50
save_frequency: 100
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 16
minibatch_size: 8192
mini_epochs: 8
critic_coef: 4
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0001
| 1,593 | YAML | 21.450704 | 101 | 0.593848 |
KhaledSharif/omniverse-gym/cfg/train/FrankaDeformablePPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 128, 64]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:FrankaDeformable,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
device: ${....rl_device}
device_name: ${....rl_device}
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
lr_schedule: adaptive
kl_threshold: 0.008
score_to_win: 100000000
max_epochs: ${resolve_default:6000,${....max_iterations}}
save_best_after: 500
save_frequency: 500
print_stats: True
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 16
minibatch_size: 16384 #2048 #4096 #8192 #16384
mini_epochs: 8
critic_coef: 4
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0001
| 1,665 | YAML | 22.138889 | 101 | 0.600601 |
KhaledSharif/omniverse-gym/cfg/train/FactoryTaskNutBoltPlacePPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 128, 64]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False}
load_path: ${...checkpoint}
config:
name: ${resolve_default:FactoryTaskNutBoltPlace,${....experiment}}
full_experiment_name: ${.name}
device: ${....rl_device}
device_name: ${....rl_device}
env_name: rlgpu
multi_gpu: False
ppo: True
mixed_precision: True
normalize_input: True
normalize_value: True
value_bootstrap: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 1.0
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
lr_schedule: fixed
schedule_type: standard
kl_threshold: 0.016
score_to_win: 20000
max_epochs: ${resolve_default:400,${....max_iterations}}
save_best_after: 50
save_frequency: 100
print_stats: True
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: False
e_clip: 0.2
horizon_length: 128
minibatch_size: 512
mini_epochs: 8
critic_coef: 2
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0001
| 1,597 | YAML | 20.594594 | 70 | 0.594865 |
KhaledSharif/omniverse-gym/cfg/train/CartpoleCameraPPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
cnn:
type: conv2d
activation: relu
initializer:
name: default
regularizer:
name: None
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 0
- filters: 64
kernel_size: 4
strides: 2
padding: 0
- filters: 64
kernel_size: 3
strides: 1
padding: 0
mlp:
units: [512]
activation: elu
initializer:
name: default
# rnn:
# name: lstm
# units: 128
# layers: 1
# before_mlp: False
# concat_input: True
# layer_norm: True
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:CartpoleCamera,${....experiment}}
full_experiment_name: ${.name}
device: ${....rl_device}
device_name: ${....rl_device}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False
normalize_input: False
normalize_value: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 1.0 #0.1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
lr_schedule: adaptive
kl_threshold: 0.008
score_to_win: 20000
max_epochs: ${resolve_default:500,${....max_iterations}}
save_best_after: 50
save_frequency: 10
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 256
minibatch_size: 512 #1024
mini_epochs: 4
critic_coef: 2
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0001 | 2,124 | YAML | 21.135416 | 101 | 0.556026 |
KhaledSharif/omniverse-gym/cfg/train/AntPPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 128, 64]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:Ant,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
device: ${....rl_device}
device_name: ${....rl_device}
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: True
normalize_input: True
normalize_value: True
value_bootstrap: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 3e-4
lr_schedule: adaptive
schedule_type: legacy
kl_threshold: 0.008
score_to_win: 20000
max_epochs: ${resolve_default:500,${....max_iterations}}
save_best_after: 100
save_frequency: 50
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 16
minibatch_size: 32768
mini_epochs: 4
critic_coef: 2
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0001
| 1,657 | YAML | 21.405405 | 101 | 0.594448 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.